repository_name
stringclasses
316 values
func_path_in_repository
stringlengths
6
223
func_name
stringlengths
1
134
language
stringclasses
1 value
func_code_string
stringlengths
57
65.5k
func_documentation_string
stringlengths
1
46.3k
split_name
stringclasses
1 value
func_code_url
stringlengths
91
315
called_functions
listlengths
1
156
enclosing_scope
stringlengths
2
1.48M
saltstack/salt
salt/pillar/virtkey.py
ext_pillar
python
def ext_pillar(hyper_id, pillar, name, key): ''' Accept the key for the VM on the hyper, if authorized. ''' vk = salt.utils.virt.VirtKey(hyper_id, name, __opts__) ok = vk.accept(key) pillar['virtkey'] = {name: ok} return {}
Accept the key for the VM on the hyper, if authorized.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/pillar/virtkey.py#L21-L28
[ "def accept(self, pub):\n '''\n Accept the provided key\n '''\n try:\n with salt.utils.files.fopen(self.path, 'r') as fp_:\n expiry = int(fp_.read())\n except (OSError, IOError):\n log.error(\n 'Request to sign key for minion \\'%s\\' on hyper \\'%s\\' '\n 'denied: no authorization', self.id, self.hyper\n )\n return False\n except ValueError:\n log.error('Invalid expiry data in %s', self.path)\n return False\n\n # Limit acceptance window to 10 minutes\n # TODO: Move this value to the master config file\n if (time.time() - expiry) > 600:\n log.warning(\n 'Request to sign key for minion \"%s\" on hyper \"%s\" denied: '\n 'authorization expired', self.id, self.hyper\n )\n return False\n\n pubfn = os.path.join(self.opts['pki_dir'],\n 'minions',\n self.id)\n with salt.utils.files.fopen(pubfn, 'w+') as fp_:\n fp_.write(pub)\n self.void()\n return True\n" ]
# -*- coding: utf-8 -*- ''' Accept a key from a hypervisor if the virt runner has already submitted an authorization request ''' from __future__ import absolute_import, print_function, unicode_literals # Don't "fix" the above docstring to put it on two lines, as the sphinx # autosummary pulls only the first line for its description. # Import python libs import logging # Import salt libs import salt.utils.virt # Set up logging log = logging.getLogger(__name__)
saltstack/salt
salt/states/lvm.py
pv_present
python
def pv_present(name, **kwargs): ''' Set a Physical Device to be used as an LVM Physical Volume name The device name to initialize. kwargs Any supported options to pvcreate. See :mod:`linux_lvm <salt.modules.linux_lvm>` for more details. ''' ret = {'changes': {}, 'comment': '', 'name': name, 'result': True} if __salt__['lvm.pvdisplay'](name, quiet=True): ret['comment'] = 'Physical Volume {0} already present'.format(name) elif __opts__['test']: ret['comment'] = 'Physical Volume {0} is set to be created'.format(name) ret['result'] = None return ret else: changes = __salt__['lvm.pvcreate'](name, **kwargs) if __salt__['lvm.pvdisplay'](name): ret['comment'] = 'Created Physical Volume {0}'.format(name) ret['changes']['created'] = changes else: ret['comment'] = 'Failed to create Physical Volume {0}'.format(name) ret['result'] = False return ret
Set a Physical Device to be used as an LVM Physical Volume name The device name to initialize. kwargs Any supported options to pvcreate. See :mod:`linux_lvm <salt.modules.linux_lvm>` for more details.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/lvm.py#L43-L74
null
# -*- coding: utf-8 -*- ''' Management of Linux logical volumes =================================== A state module to manage LVMs .. code-block:: yaml /dev/sda: lvm.pv_present my_vg: lvm.vg_present: - devices: /dev/sda lvroot: lvm.lv_present: - vgname: my_vg - size: 10G - stripes: 5 - stripesize: 8K ''' from __future__ import absolute_import, print_function, unicode_literals # Import python libs import os # Import salt libs import salt.utils.path from salt.ext import six def __virtual__(): ''' Only load the module if lvm is installed ''' if salt.utils.path.which('lvm'): return 'lvm' return False def pv_absent(name): ''' Ensure that a Physical Device is not being used by lvm name The device name to initialize. ''' ret = {'changes': {}, 'comment': '', 'name': name, 'result': True} if not __salt__['lvm.pvdisplay'](name, quiet=True): ret['comment'] = 'Physical Volume {0} does not exist'.format(name) elif __opts__['test']: ret['comment'] = 'Physical Volume {0} is set to be removed'.format(name) ret['result'] = None return ret else: changes = __salt__['lvm.pvremove'](name) if __salt__['lvm.pvdisplay'](name, quiet=True): ret['comment'] = 'Failed to remove Physical Volume {0}'.format(name) ret['result'] = False else: ret['comment'] = 'Removed Physical Volume {0}'.format(name) ret['changes']['removed'] = changes return ret def vg_present(name, devices=None, **kwargs): ''' Create an LVM Volume Group name The Volume Group name to create devices A list of devices that will be added to the Volume Group kwargs Any supported options to vgcreate. See :mod:`linux_lvm <salt.modules.linux_lvm>` for more details. ''' ret = {'changes': {}, 'comment': '', 'name': name, 'result': True} if isinstance(devices, six.string_types): devices = devices.split(',') if __salt__['lvm.vgdisplay'](name, quiet=True): ret['comment'] = 'Volume Group {0} already present'.format(name) for device in devices: realdev = os.path.realpath(device) pvs = __salt__['lvm.pvdisplay'](realdev, real=True) if pvs and pvs.get(realdev, None): if pvs[realdev]['Volume Group Name'] == name: ret['comment'] = '{0}\n{1}'.format( ret['comment'], '{0} is part of Volume Group'.format(device)) elif pvs[realdev]['Volume Group Name'] in ['', '#orphans_lvm2']: __salt__['lvm.vgextend'](name, device) pvs = __salt__['lvm.pvdisplay'](realdev, real=True) if pvs[realdev]['Volume Group Name'] == name: ret['changes'].update( {device: 'added to {0}'.format(name)}) else: ret['comment'] = '{0}\n{1}'.format( ret['comment'], '{0} could not be added'.format(device)) ret['result'] = False else: ret['comment'] = '{0}\n{1}'.format( ret['comment'], '{0} is part of {1}'.format( device, pvs[realdev]['Volume Group Name'])) ret['result'] = False else: ret['comment'] = '{0}\n{1}'.format( ret['comment'], 'pv {0} is not present'.format(device)) ret['result'] = False elif __opts__['test']: ret['comment'] = 'Volume Group {0} is set to be created'.format(name) ret['result'] = None return ret else: changes = __salt__['lvm.vgcreate'](name, devices, **kwargs) if __salt__['lvm.vgdisplay'](name): ret['comment'] = 'Created Volume Group {0}'.format(name) ret['changes']['created'] = changes else: ret['comment'] = 'Failed to create Volume Group {0}'.format(name) ret['result'] = False return ret def vg_absent(name): ''' Remove an LVM volume group name The volume group to remove ''' ret = {'changes': {}, 'comment': '', 'name': name, 'result': True} if not __salt__['lvm.vgdisplay'](name, quiet=True): ret['comment'] = 'Volume Group {0} already absent'.format(name) elif __opts__['test']: ret['comment'] = 'Volume Group {0} is set to be removed'.format(name) ret['result'] = None return ret else: changes = __salt__['lvm.vgremove'](name) if not __salt__['lvm.vgdisplay'](name, quiet=True): ret['comment'] = 'Removed Volume Group {0}'.format(name) ret['changes']['removed'] = changes else: ret['comment'] = 'Failed to remove Volume Group {0}'.format(name) ret['result'] = False return ret def lv_present(name, vgname=None, size=None, extents=None, snapshot=None, pv='', thinvolume=False, thinpool=False, force=False, **kwargs): ''' Create a new Logical Volume name The name of the Logical Volume vgname The name of the Volume Group on which the Logical Volume resides size The initial size of the Logical Volume extents The number of logical extents to allocate snapshot The name of the snapshot pv The Physical Volume to use kwargs Any supported options to lvcreate. See :mod:`linux_lvm <salt.modules.linux_lvm>` for more details. .. versionadded:: to_complete thinvolume Logical Volume is thinly provisioned thinpool Logical Volume is a thin pool .. versionadded:: 2018.3.0 force Assume yes to all prompts ''' ret = {'changes': {}, 'comment': '', 'name': name, 'result': True} _snapshot = None if snapshot: _snapshot = name name = snapshot if thinvolume: lvpath = '/dev/{0}/{1}'.format(vgname.split('/')[0], name) else: lvpath = '/dev/{0}/{1}'.format(vgname, name) if __salt__['lvm.lvdisplay'](lvpath, quiet=True): ret['comment'] = 'Logical Volume {0} already present'.format(name) elif __opts__['test']: ret['comment'] = 'Logical Volume {0} is set to be created'.format(name) ret['result'] = None return ret else: changes = __salt__['lvm.lvcreate'](name, vgname, size=size, extents=extents, snapshot=_snapshot, pv=pv, thinvolume=thinvolume, thinpool=thinpool, force=force, **kwargs) if __salt__['lvm.lvdisplay'](lvpath): ret['comment'] = 'Created Logical Volume {0}'.format(name) ret['changes']['created'] = changes else: ret['comment'] = 'Failed to create Logical Volume {0}. Error: {1}'.format(name, changes) ret['result'] = False return ret def lv_absent(name, vgname=None): ''' Remove a given existing Logical Volume from a named existing volume group name The Logical Volume to remove vgname The name of the Volume Group on which the Logical Volume resides ''' ret = {'changes': {}, 'comment': '', 'name': name, 'result': True} lvpath = '/dev/{0}/{1}'.format(vgname, name) if not __salt__['lvm.lvdisplay'](lvpath, quiet=True): ret['comment'] = 'Logical Volume {0} already absent'.format(name) elif __opts__['test']: ret['comment'] = 'Logical Volume {0} is set to be removed'.format(name) ret['result'] = None return ret else: changes = __salt__['lvm.lvremove'](name, vgname) if not __salt__['lvm.lvdisplay'](lvpath, quiet=True): ret['comment'] = 'Removed Logical Volume {0}'.format(name) ret['changes']['removed'] = changes else: ret['comment'] = 'Failed to remove Logical Volume {0}'.format(name) ret['result'] = False return ret
saltstack/salt
salt/states/lvm.py
pv_absent
python
def pv_absent(name): ''' Ensure that a Physical Device is not being used by lvm name The device name to initialize. ''' ret = {'changes': {}, 'comment': '', 'name': name, 'result': True} if not __salt__['lvm.pvdisplay'](name, quiet=True): ret['comment'] = 'Physical Volume {0} does not exist'.format(name) elif __opts__['test']: ret['comment'] = 'Physical Volume {0} is set to be removed'.format(name) ret['result'] = None return ret else: changes = __salt__['lvm.pvremove'](name) if __salt__['lvm.pvdisplay'](name, quiet=True): ret['comment'] = 'Failed to remove Physical Volume {0}'.format(name) ret['result'] = False else: ret['comment'] = 'Removed Physical Volume {0}'.format(name) ret['changes']['removed'] = changes return ret
Ensure that a Physical Device is not being used by lvm name The device name to initialize.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/lvm.py#L77-L104
null
# -*- coding: utf-8 -*- ''' Management of Linux logical volumes =================================== A state module to manage LVMs .. code-block:: yaml /dev/sda: lvm.pv_present my_vg: lvm.vg_present: - devices: /dev/sda lvroot: lvm.lv_present: - vgname: my_vg - size: 10G - stripes: 5 - stripesize: 8K ''' from __future__ import absolute_import, print_function, unicode_literals # Import python libs import os # Import salt libs import salt.utils.path from salt.ext import six def __virtual__(): ''' Only load the module if lvm is installed ''' if salt.utils.path.which('lvm'): return 'lvm' return False def pv_present(name, **kwargs): ''' Set a Physical Device to be used as an LVM Physical Volume name The device name to initialize. kwargs Any supported options to pvcreate. See :mod:`linux_lvm <salt.modules.linux_lvm>` for more details. ''' ret = {'changes': {}, 'comment': '', 'name': name, 'result': True} if __salt__['lvm.pvdisplay'](name, quiet=True): ret['comment'] = 'Physical Volume {0} already present'.format(name) elif __opts__['test']: ret['comment'] = 'Physical Volume {0} is set to be created'.format(name) ret['result'] = None return ret else: changes = __salt__['lvm.pvcreate'](name, **kwargs) if __salt__['lvm.pvdisplay'](name): ret['comment'] = 'Created Physical Volume {0}'.format(name) ret['changes']['created'] = changes else: ret['comment'] = 'Failed to create Physical Volume {0}'.format(name) ret['result'] = False return ret def vg_present(name, devices=None, **kwargs): ''' Create an LVM Volume Group name The Volume Group name to create devices A list of devices that will be added to the Volume Group kwargs Any supported options to vgcreate. See :mod:`linux_lvm <salt.modules.linux_lvm>` for more details. ''' ret = {'changes': {}, 'comment': '', 'name': name, 'result': True} if isinstance(devices, six.string_types): devices = devices.split(',') if __salt__['lvm.vgdisplay'](name, quiet=True): ret['comment'] = 'Volume Group {0} already present'.format(name) for device in devices: realdev = os.path.realpath(device) pvs = __salt__['lvm.pvdisplay'](realdev, real=True) if pvs and pvs.get(realdev, None): if pvs[realdev]['Volume Group Name'] == name: ret['comment'] = '{0}\n{1}'.format( ret['comment'], '{0} is part of Volume Group'.format(device)) elif pvs[realdev]['Volume Group Name'] in ['', '#orphans_lvm2']: __salt__['lvm.vgextend'](name, device) pvs = __salt__['lvm.pvdisplay'](realdev, real=True) if pvs[realdev]['Volume Group Name'] == name: ret['changes'].update( {device: 'added to {0}'.format(name)}) else: ret['comment'] = '{0}\n{1}'.format( ret['comment'], '{0} could not be added'.format(device)) ret['result'] = False else: ret['comment'] = '{0}\n{1}'.format( ret['comment'], '{0} is part of {1}'.format( device, pvs[realdev]['Volume Group Name'])) ret['result'] = False else: ret['comment'] = '{0}\n{1}'.format( ret['comment'], 'pv {0} is not present'.format(device)) ret['result'] = False elif __opts__['test']: ret['comment'] = 'Volume Group {0} is set to be created'.format(name) ret['result'] = None return ret else: changes = __salt__['lvm.vgcreate'](name, devices, **kwargs) if __salt__['lvm.vgdisplay'](name): ret['comment'] = 'Created Volume Group {0}'.format(name) ret['changes']['created'] = changes else: ret['comment'] = 'Failed to create Volume Group {0}'.format(name) ret['result'] = False return ret def vg_absent(name): ''' Remove an LVM volume group name The volume group to remove ''' ret = {'changes': {}, 'comment': '', 'name': name, 'result': True} if not __salt__['lvm.vgdisplay'](name, quiet=True): ret['comment'] = 'Volume Group {0} already absent'.format(name) elif __opts__['test']: ret['comment'] = 'Volume Group {0} is set to be removed'.format(name) ret['result'] = None return ret else: changes = __salt__['lvm.vgremove'](name) if not __salt__['lvm.vgdisplay'](name, quiet=True): ret['comment'] = 'Removed Volume Group {0}'.format(name) ret['changes']['removed'] = changes else: ret['comment'] = 'Failed to remove Volume Group {0}'.format(name) ret['result'] = False return ret def lv_present(name, vgname=None, size=None, extents=None, snapshot=None, pv='', thinvolume=False, thinpool=False, force=False, **kwargs): ''' Create a new Logical Volume name The name of the Logical Volume vgname The name of the Volume Group on which the Logical Volume resides size The initial size of the Logical Volume extents The number of logical extents to allocate snapshot The name of the snapshot pv The Physical Volume to use kwargs Any supported options to lvcreate. See :mod:`linux_lvm <salt.modules.linux_lvm>` for more details. .. versionadded:: to_complete thinvolume Logical Volume is thinly provisioned thinpool Logical Volume is a thin pool .. versionadded:: 2018.3.0 force Assume yes to all prompts ''' ret = {'changes': {}, 'comment': '', 'name': name, 'result': True} _snapshot = None if snapshot: _snapshot = name name = snapshot if thinvolume: lvpath = '/dev/{0}/{1}'.format(vgname.split('/')[0], name) else: lvpath = '/dev/{0}/{1}'.format(vgname, name) if __salt__['lvm.lvdisplay'](lvpath, quiet=True): ret['comment'] = 'Logical Volume {0} already present'.format(name) elif __opts__['test']: ret['comment'] = 'Logical Volume {0} is set to be created'.format(name) ret['result'] = None return ret else: changes = __salt__['lvm.lvcreate'](name, vgname, size=size, extents=extents, snapshot=_snapshot, pv=pv, thinvolume=thinvolume, thinpool=thinpool, force=force, **kwargs) if __salt__['lvm.lvdisplay'](lvpath): ret['comment'] = 'Created Logical Volume {0}'.format(name) ret['changes']['created'] = changes else: ret['comment'] = 'Failed to create Logical Volume {0}. Error: {1}'.format(name, changes) ret['result'] = False return ret def lv_absent(name, vgname=None): ''' Remove a given existing Logical Volume from a named existing volume group name The Logical Volume to remove vgname The name of the Volume Group on which the Logical Volume resides ''' ret = {'changes': {}, 'comment': '', 'name': name, 'result': True} lvpath = '/dev/{0}/{1}'.format(vgname, name) if not __salt__['lvm.lvdisplay'](lvpath, quiet=True): ret['comment'] = 'Logical Volume {0} already absent'.format(name) elif __opts__['test']: ret['comment'] = 'Logical Volume {0} is set to be removed'.format(name) ret['result'] = None return ret else: changes = __salt__['lvm.lvremove'](name, vgname) if not __salt__['lvm.lvdisplay'](lvpath, quiet=True): ret['comment'] = 'Removed Logical Volume {0}'.format(name) ret['changes']['removed'] = changes else: ret['comment'] = 'Failed to remove Logical Volume {0}'.format(name) ret['result'] = False return ret
saltstack/salt
salt/states/lvm.py
vg_present
python
def vg_present(name, devices=None, **kwargs): ''' Create an LVM Volume Group name The Volume Group name to create devices A list of devices that will be added to the Volume Group kwargs Any supported options to vgcreate. See :mod:`linux_lvm <salt.modules.linux_lvm>` for more details. ''' ret = {'changes': {}, 'comment': '', 'name': name, 'result': True} if isinstance(devices, six.string_types): devices = devices.split(',') if __salt__['lvm.vgdisplay'](name, quiet=True): ret['comment'] = 'Volume Group {0} already present'.format(name) for device in devices: realdev = os.path.realpath(device) pvs = __salt__['lvm.pvdisplay'](realdev, real=True) if pvs and pvs.get(realdev, None): if pvs[realdev]['Volume Group Name'] == name: ret['comment'] = '{0}\n{1}'.format( ret['comment'], '{0} is part of Volume Group'.format(device)) elif pvs[realdev]['Volume Group Name'] in ['', '#orphans_lvm2']: __salt__['lvm.vgextend'](name, device) pvs = __salt__['lvm.pvdisplay'](realdev, real=True) if pvs[realdev]['Volume Group Name'] == name: ret['changes'].update( {device: 'added to {0}'.format(name)}) else: ret['comment'] = '{0}\n{1}'.format( ret['comment'], '{0} could not be added'.format(device)) ret['result'] = False else: ret['comment'] = '{0}\n{1}'.format( ret['comment'], '{0} is part of {1}'.format( device, pvs[realdev]['Volume Group Name'])) ret['result'] = False else: ret['comment'] = '{0}\n{1}'.format( ret['comment'], 'pv {0} is not present'.format(device)) ret['result'] = False elif __opts__['test']: ret['comment'] = 'Volume Group {0} is set to be created'.format(name) ret['result'] = None return ret else: changes = __salt__['lvm.vgcreate'](name, devices, **kwargs) if __salt__['lvm.vgdisplay'](name): ret['comment'] = 'Created Volume Group {0}'.format(name) ret['changes']['created'] = changes else: ret['comment'] = 'Failed to create Volume Group {0}'.format(name) ret['result'] = False return ret
Create an LVM Volume Group name The Volume Group name to create devices A list of devices that will be added to the Volume Group kwargs Any supported options to vgcreate. See :mod:`linux_lvm <salt.modules.linux_lvm>` for more details.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/lvm.py#L107-L173
null
# -*- coding: utf-8 -*- ''' Management of Linux logical volumes =================================== A state module to manage LVMs .. code-block:: yaml /dev/sda: lvm.pv_present my_vg: lvm.vg_present: - devices: /dev/sda lvroot: lvm.lv_present: - vgname: my_vg - size: 10G - stripes: 5 - stripesize: 8K ''' from __future__ import absolute_import, print_function, unicode_literals # Import python libs import os # Import salt libs import salt.utils.path from salt.ext import six def __virtual__(): ''' Only load the module if lvm is installed ''' if salt.utils.path.which('lvm'): return 'lvm' return False def pv_present(name, **kwargs): ''' Set a Physical Device to be used as an LVM Physical Volume name The device name to initialize. kwargs Any supported options to pvcreate. See :mod:`linux_lvm <salt.modules.linux_lvm>` for more details. ''' ret = {'changes': {}, 'comment': '', 'name': name, 'result': True} if __salt__['lvm.pvdisplay'](name, quiet=True): ret['comment'] = 'Physical Volume {0} already present'.format(name) elif __opts__['test']: ret['comment'] = 'Physical Volume {0} is set to be created'.format(name) ret['result'] = None return ret else: changes = __salt__['lvm.pvcreate'](name, **kwargs) if __salt__['lvm.pvdisplay'](name): ret['comment'] = 'Created Physical Volume {0}'.format(name) ret['changes']['created'] = changes else: ret['comment'] = 'Failed to create Physical Volume {0}'.format(name) ret['result'] = False return ret def pv_absent(name): ''' Ensure that a Physical Device is not being used by lvm name The device name to initialize. ''' ret = {'changes': {}, 'comment': '', 'name': name, 'result': True} if not __salt__['lvm.pvdisplay'](name, quiet=True): ret['comment'] = 'Physical Volume {0} does not exist'.format(name) elif __opts__['test']: ret['comment'] = 'Physical Volume {0} is set to be removed'.format(name) ret['result'] = None return ret else: changes = __salt__['lvm.pvremove'](name) if __salt__['lvm.pvdisplay'](name, quiet=True): ret['comment'] = 'Failed to remove Physical Volume {0}'.format(name) ret['result'] = False else: ret['comment'] = 'Removed Physical Volume {0}'.format(name) ret['changes']['removed'] = changes return ret def vg_absent(name): ''' Remove an LVM volume group name The volume group to remove ''' ret = {'changes': {}, 'comment': '', 'name': name, 'result': True} if not __salt__['lvm.vgdisplay'](name, quiet=True): ret['comment'] = 'Volume Group {0} already absent'.format(name) elif __opts__['test']: ret['comment'] = 'Volume Group {0} is set to be removed'.format(name) ret['result'] = None return ret else: changes = __salt__['lvm.vgremove'](name) if not __salt__['lvm.vgdisplay'](name, quiet=True): ret['comment'] = 'Removed Volume Group {0}'.format(name) ret['changes']['removed'] = changes else: ret['comment'] = 'Failed to remove Volume Group {0}'.format(name) ret['result'] = False return ret def lv_present(name, vgname=None, size=None, extents=None, snapshot=None, pv='', thinvolume=False, thinpool=False, force=False, **kwargs): ''' Create a new Logical Volume name The name of the Logical Volume vgname The name of the Volume Group on which the Logical Volume resides size The initial size of the Logical Volume extents The number of logical extents to allocate snapshot The name of the snapshot pv The Physical Volume to use kwargs Any supported options to lvcreate. See :mod:`linux_lvm <salt.modules.linux_lvm>` for more details. .. versionadded:: to_complete thinvolume Logical Volume is thinly provisioned thinpool Logical Volume is a thin pool .. versionadded:: 2018.3.0 force Assume yes to all prompts ''' ret = {'changes': {}, 'comment': '', 'name': name, 'result': True} _snapshot = None if snapshot: _snapshot = name name = snapshot if thinvolume: lvpath = '/dev/{0}/{1}'.format(vgname.split('/')[0], name) else: lvpath = '/dev/{0}/{1}'.format(vgname, name) if __salt__['lvm.lvdisplay'](lvpath, quiet=True): ret['comment'] = 'Logical Volume {0} already present'.format(name) elif __opts__['test']: ret['comment'] = 'Logical Volume {0} is set to be created'.format(name) ret['result'] = None return ret else: changes = __salt__['lvm.lvcreate'](name, vgname, size=size, extents=extents, snapshot=_snapshot, pv=pv, thinvolume=thinvolume, thinpool=thinpool, force=force, **kwargs) if __salt__['lvm.lvdisplay'](lvpath): ret['comment'] = 'Created Logical Volume {0}'.format(name) ret['changes']['created'] = changes else: ret['comment'] = 'Failed to create Logical Volume {0}. Error: {1}'.format(name, changes) ret['result'] = False return ret def lv_absent(name, vgname=None): ''' Remove a given existing Logical Volume from a named existing volume group name The Logical Volume to remove vgname The name of the Volume Group on which the Logical Volume resides ''' ret = {'changes': {}, 'comment': '', 'name': name, 'result': True} lvpath = '/dev/{0}/{1}'.format(vgname, name) if not __salt__['lvm.lvdisplay'](lvpath, quiet=True): ret['comment'] = 'Logical Volume {0} already absent'.format(name) elif __opts__['test']: ret['comment'] = 'Logical Volume {0} is set to be removed'.format(name) ret['result'] = None return ret else: changes = __salt__['lvm.lvremove'](name, vgname) if not __salt__['lvm.lvdisplay'](lvpath, quiet=True): ret['comment'] = 'Removed Logical Volume {0}'.format(name) ret['changes']['removed'] = changes else: ret['comment'] = 'Failed to remove Logical Volume {0}'.format(name) ret['result'] = False return ret
saltstack/salt
salt/states/lvm.py
lv_present
python
def lv_present(name, vgname=None, size=None, extents=None, snapshot=None, pv='', thinvolume=False, thinpool=False, force=False, **kwargs): ''' Create a new Logical Volume name The name of the Logical Volume vgname The name of the Volume Group on which the Logical Volume resides size The initial size of the Logical Volume extents The number of logical extents to allocate snapshot The name of the snapshot pv The Physical Volume to use kwargs Any supported options to lvcreate. See :mod:`linux_lvm <salt.modules.linux_lvm>` for more details. .. versionadded:: to_complete thinvolume Logical Volume is thinly provisioned thinpool Logical Volume is a thin pool .. versionadded:: 2018.3.0 force Assume yes to all prompts ''' ret = {'changes': {}, 'comment': '', 'name': name, 'result': True} _snapshot = None if snapshot: _snapshot = name name = snapshot if thinvolume: lvpath = '/dev/{0}/{1}'.format(vgname.split('/')[0], name) else: lvpath = '/dev/{0}/{1}'.format(vgname, name) if __salt__['lvm.lvdisplay'](lvpath, quiet=True): ret['comment'] = 'Logical Volume {0} already present'.format(name) elif __opts__['test']: ret['comment'] = 'Logical Volume {0} is set to be created'.format(name) ret['result'] = None return ret else: changes = __salt__['lvm.lvcreate'](name, vgname, size=size, extents=extents, snapshot=_snapshot, pv=pv, thinvolume=thinvolume, thinpool=thinpool, force=force, **kwargs) if __salt__['lvm.lvdisplay'](lvpath): ret['comment'] = 'Created Logical Volume {0}'.format(name) ret['changes']['created'] = changes else: ret['comment'] = 'Failed to create Logical Volume {0}. Error: {1}'.format(name, changes) ret['result'] = False return ret
Create a new Logical Volume name The name of the Logical Volume vgname The name of the Volume Group on which the Logical Volume resides size The initial size of the Logical Volume extents The number of logical extents to allocate snapshot The name of the snapshot pv The Physical Volume to use kwargs Any supported options to lvcreate. See :mod:`linux_lvm <salt.modules.linux_lvm>` for more details. .. versionadded:: to_complete thinvolume Logical Volume is thinly provisioned thinpool Logical Volume is a thin pool .. versionadded:: 2018.3.0 force Assume yes to all prompts
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/lvm.py#L206-L295
null
# -*- coding: utf-8 -*- ''' Management of Linux logical volumes =================================== A state module to manage LVMs .. code-block:: yaml /dev/sda: lvm.pv_present my_vg: lvm.vg_present: - devices: /dev/sda lvroot: lvm.lv_present: - vgname: my_vg - size: 10G - stripes: 5 - stripesize: 8K ''' from __future__ import absolute_import, print_function, unicode_literals # Import python libs import os # Import salt libs import salt.utils.path from salt.ext import six def __virtual__(): ''' Only load the module if lvm is installed ''' if salt.utils.path.which('lvm'): return 'lvm' return False def pv_present(name, **kwargs): ''' Set a Physical Device to be used as an LVM Physical Volume name The device name to initialize. kwargs Any supported options to pvcreate. See :mod:`linux_lvm <salt.modules.linux_lvm>` for more details. ''' ret = {'changes': {}, 'comment': '', 'name': name, 'result': True} if __salt__['lvm.pvdisplay'](name, quiet=True): ret['comment'] = 'Physical Volume {0} already present'.format(name) elif __opts__['test']: ret['comment'] = 'Physical Volume {0} is set to be created'.format(name) ret['result'] = None return ret else: changes = __salt__['lvm.pvcreate'](name, **kwargs) if __salt__['lvm.pvdisplay'](name): ret['comment'] = 'Created Physical Volume {0}'.format(name) ret['changes']['created'] = changes else: ret['comment'] = 'Failed to create Physical Volume {0}'.format(name) ret['result'] = False return ret def pv_absent(name): ''' Ensure that a Physical Device is not being used by lvm name The device name to initialize. ''' ret = {'changes': {}, 'comment': '', 'name': name, 'result': True} if not __salt__['lvm.pvdisplay'](name, quiet=True): ret['comment'] = 'Physical Volume {0} does not exist'.format(name) elif __opts__['test']: ret['comment'] = 'Physical Volume {0} is set to be removed'.format(name) ret['result'] = None return ret else: changes = __salt__['lvm.pvremove'](name) if __salt__['lvm.pvdisplay'](name, quiet=True): ret['comment'] = 'Failed to remove Physical Volume {0}'.format(name) ret['result'] = False else: ret['comment'] = 'Removed Physical Volume {0}'.format(name) ret['changes']['removed'] = changes return ret def vg_present(name, devices=None, **kwargs): ''' Create an LVM Volume Group name The Volume Group name to create devices A list of devices that will be added to the Volume Group kwargs Any supported options to vgcreate. See :mod:`linux_lvm <salt.modules.linux_lvm>` for more details. ''' ret = {'changes': {}, 'comment': '', 'name': name, 'result': True} if isinstance(devices, six.string_types): devices = devices.split(',') if __salt__['lvm.vgdisplay'](name, quiet=True): ret['comment'] = 'Volume Group {0} already present'.format(name) for device in devices: realdev = os.path.realpath(device) pvs = __salt__['lvm.pvdisplay'](realdev, real=True) if pvs and pvs.get(realdev, None): if pvs[realdev]['Volume Group Name'] == name: ret['comment'] = '{0}\n{1}'.format( ret['comment'], '{0} is part of Volume Group'.format(device)) elif pvs[realdev]['Volume Group Name'] in ['', '#orphans_lvm2']: __salt__['lvm.vgextend'](name, device) pvs = __salt__['lvm.pvdisplay'](realdev, real=True) if pvs[realdev]['Volume Group Name'] == name: ret['changes'].update( {device: 'added to {0}'.format(name)}) else: ret['comment'] = '{0}\n{1}'.format( ret['comment'], '{0} could not be added'.format(device)) ret['result'] = False else: ret['comment'] = '{0}\n{1}'.format( ret['comment'], '{0} is part of {1}'.format( device, pvs[realdev]['Volume Group Name'])) ret['result'] = False else: ret['comment'] = '{0}\n{1}'.format( ret['comment'], 'pv {0} is not present'.format(device)) ret['result'] = False elif __opts__['test']: ret['comment'] = 'Volume Group {0} is set to be created'.format(name) ret['result'] = None return ret else: changes = __salt__['lvm.vgcreate'](name, devices, **kwargs) if __salt__['lvm.vgdisplay'](name): ret['comment'] = 'Created Volume Group {0}'.format(name) ret['changes']['created'] = changes else: ret['comment'] = 'Failed to create Volume Group {0}'.format(name) ret['result'] = False return ret def vg_absent(name): ''' Remove an LVM volume group name The volume group to remove ''' ret = {'changes': {}, 'comment': '', 'name': name, 'result': True} if not __salt__['lvm.vgdisplay'](name, quiet=True): ret['comment'] = 'Volume Group {0} already absent'.format(name) elif __opts__['test']: ret['comment'] = 'Volume Group {0} is set to be removed'.format(name) ret['result'] = None return ret else: changes = __salt__['lvm.vgremove'](name) if not __salt__['lvm.vgdisplay'](name, quiet=True): ret['comment'] = 'Removed Volume Group {0}'.format(name) ret['changes']['removed'] = changes else: ret['comment'] = 'Failed to remove Volume Group {0}'.format(name) ret['result'] = False return ret def lv_absent(name, vgname=None): ''' Remove a given existing Logical Volume from a named existing volume group name The Logical Volume to remove vgname The name of the Volume Group on which the Logical Volume resides ''' ret = {'changes': {}, 'comment': '', 'name': name, 'result': True} lvpath = '/dev/{0}/{1}'.format(vgname, name) if not __salt__['lvm.lvdisplay'](lvpath, quiet=True): ret['comment'] = 'Logical Volume {0} already absent'.format(name) elif __opts__['test']: ret['comment'] = 'Logical Volume {0} is set to be removed'.format(name) ret['result'] = None return ret else: changes = __salt__['lvm.lvremove'](name, vgname) if not __salt__['lvm.lvdisplay'](lvpath, quiet=True): ret['comment'] = 'Removed Logical Volume {0}'.format(name) ret['changes']['removed'] = changes else: ret['comment'] = 'Failed to remove Logical Volume {0}'.format(name) ret['result'] = False return ret
saltstack/salt
salt/states/lvm.py
lv_absent
python
def lv_absent(name, vgname=None): ''' Remove a given existing Logical Volume from a named existing volume group name The Logical Volume to remove vgname The name of the Volume Group on which the Logical Volume resides ''' ret = {'changes': {}, 'comment': '', 'name': name, 'result': True} lvpath = '/dev/{0}/{1}'.format(vgname, name) if not __salt__['lvm.lvdisplay'](lvpath, quiet=True): ret['comment'] = 'Logical Volume {0} already absent'.format(name) elif __opts__['test']: ret['comment'] = 'Logical Volume {0} is set to be removed'.format(name) ret['result'] = None return ret else: changes = __salt__['lvm.lvremove'](name, vgname) if not __salt__['lvm.lvdisplay'](lvpath, quiet=True): ret['comment'] = 'Removed Logical Volume {0}'.format(name) ret['changes']['removed'] = changes else: ret['comment'] = 'Failed to remove Logical Volume {0}'.format(name) ret['result'] = False return ret
Remove a given existing Logical Volume from a named existing volume group name The Logical Volume to remove vgname The name of the Volume Group on which the Logical Volume resides
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/lvm.py#L298-L329
null
# -*- coding: utf-8 -*- ''' Management of Linux logical volumes =================================== A state module to manage LVMs .. code-block:: yaml /dev/sda: lvm.pv_present my_vg: lvm.vg_present: - devices: /dev/sda lvroot: lvm.lv_present: - vgname: my_vg - size: 10G - stripes: 5 - stripesize: 8K ''' from __future__ import absolute_import, print_function, unicode_literals # Import python libs import os # Import salt libs import salt.utils.path from salt.ext import six def __virtual__(): ''' Only load the module if lvm is installed ''' if salt.utils.path.which('lvm'): return 'lvm' return False def pv_present(name, **kwargs): ''' Set a Physical Device to be used as an LVM Physical Volume name The device name to initialize. kwargs Any supported options to pvcreate. See :mod:`linux_lvm <salt.modules.linux_lvm>` for more details. ''' ret = {'changes': {}, 'comment': '', 'name': name, 'result': True} if __salt__['lvm.pvdisplay'](name, quiet=True): ret['comment'] = 'Physical Volume {0} already present'.format(name) elif __opts__['test']: ret['comment'] = 'Physical Volume {0} is set to be created'.format(name) ret['result'] = None return ret else: changes = __salt__['lvm.pvcreate'](name, **kwargs) if __salt__['lvm.pvdisplay'](name): ret['comment'] = 'Created Physical Volume {0}'.format(name) ret['changes']['created'] = changes else: ret['comment'] = 'Failed to create Physical Volume {0}'.format(name) ret['result'] = False return ret def pv_absent(name): ''' Ensure that a Physical Device is not being used by lvm name The device name to initialize. ''' ret = {'changes': {}, 'comment': '', 'name': name, 'result': True} if not __salt__['lvm.pvdisplay'](name, quiet=True): ret['comment'] = 'Physical Volume {0} does not exist'.format(name) elif __opts__['test']: ret['comment'] = 'Physical Volume {0} is set to be removed'.format(name) ret['result'] = None return ret else: changes = __salt__['lvm.pvremove'](name) if __salt__['lvm.pvdisplay'](name, quiet=True): ret['comment'] = 'Failed to remove Physical Volume {0}'.format(name) ret['result'] = False else: ret['comment'] = 'Removed Physical Volume {0}'.format(name) ret['changes']['removed'] = changes return ret def vg_present(name, devices=None, **kwargs): ''' Create an LVM Volume Group name The Volume Group name to create devices A list of devices that will be added to the Volume Group kwargs Any supported options to vgcreate. See :mod:`linux_lvm <salt.modules.linux_lvm>` for more details. ''' ret = {'changes': {}, 'comment': '', 'name': name, 'result': True} if isinstance(devices, six.string_types): devices = devices.split(',') if __salt__['lvm.vgdisplay'](name, quiet=True): ret['comment'] = 'Volume Group {0} already present'.format(name) for device in devices: realdev = os.path.realpath(device) pvs = __salt__['lvm.pvdisplay'](realdev, real=True) if pvs and pvs.get(realdev, None): if pvs[realdev]['Volume Group Name'] == name: ret['comment'] = '{0}\n{1}'.format( ret['comment'], '{0} is part of Volume Group'.format(device)) elif pvs[realdev]['Volume Group Name'] in ['', '#orphans_lvm2']: __salt__['lvm.vgextend'](name, device) pvs = __salt__['lvm.pvdisplay'](realdev, real=True) if pvs[realdev]['Volume Group Name'] == name: ret['changes'].update( {device: 'added to {0}'.format(name)}) else: ret['comment'] = '{0}\n{1}'.format( ret['comment'], '{0} could not be added'.format(device)) ret['result'] = False else: ret['comment'] = '{0}\n{1}'.format( ret['comment'], '{0} is part of {1}'.format( device, pvs[realdev]['Volume Group Name'])) ret['result'] = False else: ret['comment'] = '{0}\n{1}'.format( ret['comment'], 'pv {0} is not present'.format(device)) ret['result'] = False elif __opts__['test']: ret['comment'] = 'Volume Group {0} is set to be created'.format(name) ret['result'] = None return ret else: changes = __salt__['lvm.vgcreate'](name, devices, **kwargs) if __salt__['lvm.vgdisplay'](name): ret['comment'] = 'Created Volume Group {0}'.format(name) ret['changes']['created'] = changes else: ret['comment'] = 'Failed to create Volume Group {0}'.format(name) ret['result'] = False return ret def vg_absent(name): ''' Remove an LVM volume group name The volume group to remove ''' ret = {'changes': {}, 'comment': '', 'name': name, 'result': True} if not __salt__['lvm.vgdisplay'](name, quiet=True): ret['comment'] = 'Volume Group {0} already absent'.format(name) elif __opts__['test']: ret['comment'] = 'Volume Group {0} is set to be removed'.format(name) ret['result'] = None return ret else: changes = __salt__['lvm.vgremove'](name) if not __salt__['lvm.vgdisplay'](name, quiet=True): ret['comment'] = 'Removed Volume Group {0}'.format(name) ret['changes']['removed'] = changes else: ret['comment'] = 'Failed to remove Volume Group {0}'.format(name) ret['result'] = False return ret def lv_present(name, vgname=None, size=None, extents=None, snapshot=None, pv='', thinvolume=False, thinpool=False, force=False, **kwargs): ''' Create a new Logical Volume name The name of the Logical Volume vgname The name of the Volume Group on which the Logical Volume resides size The initial size of the Logical Volume extents The number of logical extents to allocate snapshot The name of the snapshot pv The Physical Volume to use kwargs Any supported options to lvcreate. See :mod:`linux_lvm <salt.modules.linux_lvm>` for more details. .. versionadded:: to_complete thinvolume Logical Volume is thinly provisioned thinpool Logical Volume is a thin pool .. versionadded:: 2018.3.0 force Assume yes to all prompts ''' ret = {'changes': {}, 'comment': '', 'name': name, 'result': True} _snapshot = None if snapshot: _snapshot = name name = snapshot if thinvolume: lvpath = '/dev/{0}/{1}'.format(vgname.split('/')[0], name) else: lvpath = '/dev/{0}/{1}'.format(vgname, name) if __salt__['lvm.lvdisplay'](lvpath, quiet=True): ret['comment'] = 'Logical Volume {0} already present'.format(name) elif __opts__['test']: ret['comment'] = 'Logical Volume {0} is set to be created'.format(name) ret['result'] = None return ret else: changes = __salt__['lvm.lvcreate'](name, vgname, size=size, extents=extents, snapshot=_snapshot, pv=pv, thinvolume=thinvolume, thinpool=thinpool, force=force, **kwargs) if __salt__['lvm.lvdisplay'](lvpath): ret['comment'] = 'Created Logical Volume {0}'.format(name) ret['changes']['created'] = changes else: ret['comment'] = 'Failed to create Logical Volume {0}. Error: {1}'.format(name, changes) ret['result'] = False return ret
saltstack/salt
salt/sdb/consul.py
get_conn
python
def get_conn(profile): ''' Return a client object for accessing consul ''' params = {} for key in ('host', 'port', 'token', 'scheme', 'consistency', 'dc', 'verify'): if key in profile: params[key] = profile[key] if HAS_CONSUL: return consul.Consul(**params) else: raise CommandExecutionError( '(unable to import consul, ' 'module most likely not installed. PLease install python-consul)' )
Return a client object for accessing consul
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/sdb/consul.py#L66-L81
null
# -*- coding: utf-8 -*- ''' Consul sdb Module :maintainer: SaltStack :maturity: New :platform: all This module allows access to Consul using an ``sdb://`` URI Like all sdb modules, the Consul module requires a configuration profile to be configured in either the minion or master configuration file. This profile requires very little. For example: .. code-block:: yaml myconsul: driver: consul host: 127.0.0.1 port: 8500 token: b6376760-a8bb-edd5-fcda-33bc13bfc556 scheme: http consistency: default dc: dev verify: True The ``driver`` refers to the Consul module, all other options are optional. For option details see: https://python-consul.readthedocs.io/en/latest/#consul ''' from __future__ import absolute_import, print_function, unicode_literals from salt.exceptions import CommandExecutionError try: import consul HAS_CONSUL = True except ImportError: HAS_CONSUL = False __func_alias__ = { 'set_': 'set' } def set_(key, value, profile=None): if not profile: return False conn = get_conn(profile) return conn.kv.put(key, value) def get(key, profile=None): if not profile: return False conn = get_conn(profile) _, result = conn.kv.get(key) return result['Value'] if result else None
saltstack/salt
salt/returners/kafka_return.py
_delivery_report
python
def _delivery_report(err, msg): ''' Called once for each message produced to indicate delivery result. Triggered by poll() or flush(). ''' if err is not None: log.error('Message delivery failed: %s', err) else: log.debug('Message delivered to %s [%s]', msg.topic(), msg.partition())
Called once for each message produced to indicate delivery result. Triggered by poll() or flush().
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/returners/kafka_return.py#L61-L67
null
# -*- coding: utf-8 -*- ''' Return data to a Kafka topic :maintainer: Justin Desilets (justin.desilets@gmail.com) :maturity: 20181119 :depends: confluent-kafka :platform: all To enable this returner install confluent-kafka and enable the following settings in the minion config: returner.kafka.bootstrap: - "server1:9092" - "server2:9092" - "server3:9092" returner.kafka.topic: 'topic' To use the kafka returner, append `--return kafka` to the Salt command, eg; salt '*' test.ping --return kafka ''' from __future__ import absolute_import, print_function, unicode_literals import logging import salt.utils.json # Import third-party libs try: from confluent_kafka import Producer HAS_KAFKA = True except ImportError: HAS_KAFKA = False log = logging.getLogger(__name__) __virtualname__ = 'kafka' def __virtual__(): if not HAS_KAFKA: return False, 'Could not import kafka returner; confluent-kafka is not installed.' return __virtualname__ def _get_conn(): ''' Return a kafka connection ''' if __salt__['config.option']('returner.kafka.bootstrap'): bootstrap = ','.join(__salt__['config.option']('returner.kafka.bootstrap')) else: log.error('Unable to find kafka returner config option: bootstrap') return None return bootstrap def returner(ret): ''' Return information to a Kafka server ''' if __salt__['config.option']('returner.kafka.topic'): topic = __salt__['config.option']('returner.kafka.topic') conn = _get_conn() producer = Producer({'bootstrap.servers': conn}) producer.poll(0) producer.produce(topic, salt.utils.json.dumps(ret), str(ret).encode('utf-8'), callback=_delivery_report) producer.flush() else: log.error('Unable to find kafka returner config option: topic')
saltstack/salt
salt/returners/kafka_return.py
returner
python
def returner(ret): ''' Return information to a Kafka server ''' if __salt__['config.option']('returner.kafka.topic'): topic = __salt__['config.option']('returner.kafka.topic') conn = _get_conn() producer = Producer({'bootstrap.servers': conn}) producer.poll(0) producer.produce(topic, salt.utils.json.dumps(ret), str(ret).encode('utf-8'), callback=_delivery_report) producer.flush() else: log.error('Unable to find kafka returner config option: topic')
Return information to a Kafka server
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/returners/kafka_return.py#L70-L84
[ "def dumps(obj, **kwargs):\n '''\n .. versionadded:: 2018.3.0\n\n Wraps json.dumps, and assumes that ensure_ascii is False (unless explicitly\n passed as True) for unicode compatibility. Note that setting it to True\n will mess up any unicode characters, as they will be dumped as the string\n literal version of the unicode code point.\n\n On Python 2, encodes the result to a str since json.dumps does not want\n unicode types.\n\n You can pass an alternate json module (loaded via import_json() above)\n using the _json_module argument)\n '''\n json_module = kwargs.pop('_json_module', json)\n orig_enc_func = kwargs.pop('default', lambda x: x)\n\n def _enc_func(obj):\n obj = ThreadLocalProxy.unproxy(obj)\n return orig_enc_func(obj)\n\n if 'ensure_ascii' not in kwargs:\n kwargs['ensure_ascii'] = False\n if six.PY2:\n obj = salt.utils.data.encode(obj)\n return json_module.dumps(obj, default=_enc_func, **kwargs) # future lint: blacklisted-function\n", "def _get_conn():\n '''\n Return a kafka connection\n '''\n if __salt__['config.option']('returner.kafka.bootstrap'):\n bootstrap = ','.join(__salt__['config.option']('returner.kafka.bootstrap'))\n else:\n log.error('Unable to find kafka returner config option: bootstrap')\n return None\n return bootstrap\n" ]
# -*- coding: utf-8 -*- ''' Return data to a Kafka topic :maintainer: Justin Desilets (justin.desilets@gmail.com) :maturity: 20181119 :depends: confluent-kafka :platform: all To enable this returner install confluent-kafka and enable the following settings in the minion config: returner.kafka.bootstrap: - "server1:9092" - "server2:9092" - "server3:9092" returner.kafka.topic: 'topic' To use the kafka returner, append `--return kafka` to the Salt command, eg; salt '*' test.ping --return kafka ''' from __future__ import absolute_import, print_function, unicode_literals import logging import salt.utils.json # Import third-party libs try: from confluent_kafka import Producer HAS_KAFKA = True except ImportError: HAS_KAFKA = False log = logging.getLogger(__name__) __virtualname__ = 'kafka' def __virtual__(): if not HAS_KAFKA: return False, 'Could not import kafka returner; confluent-kafka is not installed.' return __virtualname__ def _get_conn(): ''' Return a kafka connection ''' if __salt__['config.option']('returner.kafka.bootstrap'): bootstrap = ','.join(__salt__['config.option']('returner.kafka.bootstrap')) else: log.error('Unable to find kafka returner config option: bootstrap') return None return bootstrap def _delivery_report(err, msg): ''' Called once for each message produced to indicate delivery result. Triggered by poll() or flush(). ''' if err is not None: log.error('Message delivery failed: %s', err) else: log.debug('Message delivered to %s [%s]', msg.topic(), msg.partition())
saltstack/salt
salt/states/composer.py
installed
python
def installed(name, composer=None, php=None, user=None, prefer_source=None, prefer_dist=None, no_scripts=None, no_plugins=None, optimize=None, no_dev=None, quiet=False, composer_home='/root', always_check=True, env=None): ''' Verify that the correct versions of composer dependencies are present. name Directory location of the ``composer.json`` file. composer Location of the ``composer.phar`` file. If not set composer will just execute ``composer`` as if it is installed globally. (i.e. ``/path/to/composer.phar``) php Location of the php executable to use with composer. (i.e. ``/usr/bin/php``) user Which system user to run composer as. .. versionadded:: 2014.1.4 prefer_source ``--prefer-source`` option of composer. prefer_dist ``--prefer-dist`` option of composer. no_scripts ``--no-scripts`` option of composer. no_plugins ``--no-plugins`` option of composer. optimize ``--optimize-autoloader`` option of composer. Recommended for production. no_dev ``--no-dev`` option for composer. Recommended for production. quiet ``--quiet`` option for composer. Whether or not to return output from composer. composer_home ``$COMPOSER_HOME`` environment variable always_check If ``True``, *always* run ``composer install`` in the directory. This is the default behavior. If ``False``, only run ``composer install`` if there is no vendor directory present. env A list of environment variables to be set prior to execution. ''' ret = {'name': name, 'result': None, 'comment': '', 'changes': {}} did_install = __salt__['composer.did_composer_install'](name) # Check if composer.lock exists, if so we already ran `composer install` # and we don't need to do it again if always_check is False and did_install: ret['result'] = True ret['comment'] = 'Composer already installed this directory' return ret # The state of the system does need to be changed. Check if we're running # in ``test=true`` mode. if __opts__['test'] is True: if did_install is True: install_status = "" else: install_status = "not " ret['comment'] = 'The state of "{0}" will be changed.'.format(name) ret['changes'] = { 'old': 'composer install has {0}been run in {1}'.format(install_status, name), 'new': 'composer install will be run in {0}'.format(name) } ret['result'] = None return ret try: call = __salt__['composer.install']( name, composer=composer, php=php, runas=user, prefer_source=prefer_source, prefer_dist=prefer_dist, no_scripts=no_scripts, no_plugins=no_plugins, optimize=optimize, no_dev=no_dev, quiet=quiet, composer_home=composer_home, env=env ) except (SaltException) as err: ret['result'] = False ret['comment'] = 'Error executing composer in \'{0}\': {1}'.format(name, err) return ret # If composer retcode != 0 then an exception was thrown and we dealt with it. # Any other case is success, regardless of what composer decides to output. ret['result'] = True if quiet is True: ret['comment'] = 'Composer install completed successfully, output silenced by quiet flag' else: ret['comment'] = 'Composer install completed successfully' ret['changes'] = { 'stderr': call['stderr'], 'stdout': call['stdout'] } return ret
Verify that the correct versions of composer dependencies are present. name Directory location of the ``composer.json`` file. composer Location of the ``composer.phar`` file. If not set composer will just execute ``composer`` as if it is installed globally. (i.e. ``/path/to/composer.phar``) php Location of the php executable to use with composer. (i.e. ``/usr/bin/php``) user Which system user to run composer as. .. versionadded:: 2014.1.4 prefer_source ``--prefer-source`` option of composer. prefer_dist ``--prefer-dist`` option of composer. no_scripts ``--no-scripts`` option of composer. no_plugins ``--no-plugins`` option of composer. optimize ``--optimize-autoloader`` option of composer. Recommended for production. no_dev ``--no-dev`` option for composer. Recommended for production. quiet ``--quiet`` option for composer. Whether or not to return output from composer. composer_home ``$COMPOSER_HOME`` environment variable always_check If ``True``, *always* run ``composer install`` in the directory. This is the default behavior. If ``False``, only run ``composer install`` if there is no vendor directory present. env A list of environment variables to be set prior to execution.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/composer.py#L53-L182
null
# -*- coding: utf-8 -*- ''' Installation of Composer Packages ================================= These states manage the installed packages for composer for PHP. Note that either composer is installed and accessible via a bin directory or you can pass the location of composer in the state. .. code-block:: yaml get-composer: cmd.run: - name: 'CURL=`which curl`; $CURL -sS https://getcomposer.org/installer | php' - unless: test -f /usr/local/bin/composer - cwd: /root/ install-composer: cmd.wait: - name: mv /root/composer.phar /usr/local/bin/composer - cwd: /root/ - watch: - cmd: get-composer /path/to/project: composer.installed: - no_dev: true - require: - cmd: install-composer # Without composer installed in your PATH # Note: composer.phar must be executable for state to work properly /path/to/project: composer.installed: - composer: /path/to/composer.phar - php: /usr/local/bin/php - no_dev: true ''' from __future__ import absolute_import, print_function, unicode_literals # Import salt libs from salt.exceptions import SaltException def __virtual__(): ''' Only load if the composer module is available in __salt__ ''' return 'composer.install' in __salt__ def update(name, composer=None, php=None, user=None, prefer_source=None, prefer_dist=None, no_scripts=None, no_plugins=None, optimize=None, no_dev=None, quiet=False, composer_home='/root', env=None): ''' Composer update the directory to ensure we have the latest versions of all project dependencies. name Directory location of the ``composer.json`` file. composer Location of the ``composer.phar`` file. If not set composer will just execute ``composer`` as if it is installed globally. (i.e. /path/to/composer.phar) php Location of the php executable to use with composer. (i.e. ``/usr/bin/php``) user Which system user to run composer as. .. versionadded:: 2014.1.4 prefer_source ``--prefer-source`` option of composer. prefer_dist ``--prefer-dist`` option of composer. no_scripts ``--no-scripts`` option of composer. no_plugins ``--no-plugins`` option of composer. optimize ``--optimize-autoloader`` option of composer. Recommended for production. no_dev ``--no-dev`` option for composer. Recommended for production. quiet ``--quiet`` option for composer. Whether or not to return output from composer. composer_home ``$COMPOSER_HOME`` environment variable env A list of environment variables to be set prior to execution. ''' ret = {'name': name, 'result': None, 'comment': '', 'changes': {}} # Check if composer.lock exists, if so we already ran `composer install` is_installed = __salt__['composer.did_composer_install'](name) if is_installed: old_status = "composer install has not yet been run in {0}".format(name) else: old_status = "composer install has been run in {0}".format(name) # The state of the system does need to be changed. Check if we're running # in ``test=true`` mode. if __opts__['test'] is True: ret['comment'] = 'The state of "{0}" will be changed.'.format(name) ret['changes'] = { 'old': old_status, 'new': 'composer install/update will be run in {0}'.format(name) } ret['result'] = None return ret try: call = __salt__['composer.update']( name, composer=composer, php=php, runas=user, prefer_source=prefer_source, prefer_dist=prefer_dist, no_scripts=no_scripts, no_plugins=no_plugins, optimize=optimize, no_dev=no_dev, quiet=quiet, composer_home=composer_home, env=env ) except (SaltException) as err: ret['result'] = False ret['comment'] = 'Error executing composer in \'{0}\': {1}'.format(name, err) return ret # If composer retcode != 0 then an exception was thrown and we dealt with it. # Any other case is success, regardless of what composer decides to output. ret['result'] = True if quiet is True: ret['comment'] = 'Composer update completed successfully, output silenced by quiet flag' else: ret['comment'] = 'Composer update completed successfully' ret['changes'] = { 'stderr': call['stderr'], 'stdout': call['stdout'] } return ret
saltstack/salt
salt/states/composer.py
update
python
def update(name, composer=None, php=None, user=None, prefer_source=None, prefer_dist=None, no_scripts=None, no_plugins=None, optimize=None, no_dev=None, quiet=False, composer_home='/root', env=None): ''' Composer update the directory to ensure we have the latest versions of all project dependencies. name Directory location of the ``composer.json`` file. composer Location of the ``composer.phar`` file. If not set composer will just execute ``composer`` as if it is installed globally. (i.e. /path/to/composer.phar) php Location of the php executable to use with composer. (i.e. ``/usr/bin/php``) user Which system user to run composer as. .. versionadded:: 2014.1.4 prefer_source ``--prefer-source`` option of composer. prefer_dist ``--prefer-dist`` option of composer. no_scripts ``--no-scripts`` option of composer. no_plugins ``--no-plugins`` option of composer. optimize ``--optimize-autoloader`` option of composer. Recommended for production. no_dev ``--no-dev`` option for composer. Recommended for production. quiet ``--quiet`` option for composer. Whether or not to return output from composer. composer_home ``$COMPOSER_HOME`` environment variable env A list of environment variables to be set prior to execution. ''' ret = {'name': name, 'result': None, 'comment': '', 'changes': {}} # Check if composer.lock exists, if so we already ran `composer install` is_installed = __salt__['composer.did_composer_install'](name) if is_installed: old_status = "composer install has not yet been run in {0}".format(name) else: old_status = "composer install has been run in {0}".format(name) # The state of the system does need to be changed. Check if we're running # in ``test=true`` mode. if __opts__['test'] is True: ret['comment'] = 'The state of "{0}" will be changed.'.format(name) ret['changes'] = { 'old': old_status, 'new': 'composer install/update will be run in {0}'.format(name) } ret['result'] = None return ret try: call = __salt__['composer.update']( name, composer=composer, php=php, runas=user, prefer_source=prefer_source, prefer_dist=prefer_dist, no_scripts=no_scripts, no_plugins=no_plugins, optimize=optimize, no_dev=no_dev, quiet=quiet, composer_home=composer_home, env=env ) except (SaltException) as err: ret['result'] = False ret['comment'] = 'Error executing composer in \'{0}\': {1}'.format(name, err) return ret # If composer retcode != 0 then an exception was thrown and we dealt with it. # Any other case is success, regardless of what composer decides to output. ret['result'] = True if quiet is True: ret['comment'] = 'Composer update completed successfully, output silenced by quiet flag' else: ret['comment'] = 'Composer update completed successfully' ret['changes'] = { 'stderr': call['stderr'], 'stdout': call['stdout'] } return ret
Composer update the directory to ensure we have the latest versions of all project dependencies. name Directory location of the ``composer.json`` file. composer Location of the ``composer.phar`` file. If not set composer will just execute ``composer`` as if it is installed globally. (i.e. /path/to/composer.phar) php Location of the php executable to use with composer. (i.e. ``/usr/bin/php``) user Which system user to run composer as. .. versionadded:: 2014.1.4 prefer_source ``--prefer-source`` option of composer. prefer_dist ``--prefer-dist`` option of composer. no_scripts ``--no-scripts`` option of composer. no_plugins ``--no-plugins`` option of composer. optimize ``--optimize-autoloader`` option of composer. Recommended for production. no_dev ``--no-dev`` option for composer. Recommended for production. quiet ``--quiet`` option for composer. Whether or not to return output from composer. composer_home ``$COMPOSER_HOME`` environment variable env A list of environment variables to be set prior to execution.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/composer.py#L185-L301
null
# -*- coding: utf-8 -*- ''' Installation of Composer Packages ================================= These states manage the installed packages for composer for PHP. Note that either composer is installed and accessible via a bin directory or you can pass the location of composer in the state. .. code-block:: yaml get-composer: cmd.run: - name: 'CURL=`which curl`; $CURL -sS https://getcomposer.org/installer | php' - unless: test -f /usr/local/bin/composer - cwd: /root/ install-composer: cmd.wait: - name: mv /root/composer.phar /usr/local/bin/composer - cwd: /root/ - watch: - cmd: get-composer /path/to/project: composer.installed: - no_dev: true - require: - cmd: install-composer # Without composer installed in your PATH # Note: composer.phar must be executable for state to work properly /path/to/project: composer.installed: - composer: /path/to/composer.phar - php: /usr/local/bin/php - no_dev: true ''' from __future__ import absolute_import, print_function, unicode_literals # Import salt libs from salt.exceptions import SaltException def __virtual__(): ''' Only load if the composer module is available in __salt__ ''' return 'composer.install' in __salt__ def installed(name, composer=None, php=None, user=None, prefer_source=None, prefer_dist=None, no_scripts=None, no_plugins=None, optimize=None, no_dev=None, quiet=False, composer_home='/root', always_check=True, env=None): ''' Verify that the correct versions of composer dependencies are present. name Directory location of the ``composer.json`` file. composer Location of the ``composer.phar`` file. If not set composer will just execute ``composer`` as if it is installed globally. (i.e. ``/path/to/composer.phar``) php Location of the php executable to use with composer. (i.e. ``/usr/bin/php``) user Which system user to run composer as. .. versionadded:: 2014.1.4 prefer_source ``--prefer-source`` option of composer. prefer_dist ``--prefer-dist`` option of composer. no_scripts ``--no-scripts`` option of composer. no_plugins ``--no-plugins`` option of composer. optimize ``--optimize-autoloader`` option of composer. Recommended for production. no_dev ``--no-dev`` option for composer. Recommended for production. quiet ``--quiet`` option for composer. Whether or not to return output from composer. composer_home ``$COMPOSER_HOME`` environment variable always_check If ``True``, *always* run ``composer install`` in the directory. This is the default behavior. If ``False``, only run ``composer install`` if there is no vendor directory present. env A list of environment variables to be set prior to execution. ''' ret = {'name': name, 'result': None, 'comment': '', 'changes': {}} did_install = __salt__['composer.did_composer_install'](name) # Check if composer.lock exists, if so we already ran `composer install` # and we don't need to do it again if always_check is False and did_install: ret['result'] = True ret['comment'] = 'Composer already installed this directory' return ret # The state of the system does need to be changed. Check if we're running # in ``test=true`` mode. if __opts__['test'] is True: if did_install is True: install_status = "" else: install_status = "not " ret['comment'] = 'The state of "{0}" will be changed.'.format(name) ret['changes'] = { 'old': 'composer install has {0}been run in {1}'.format(install_status, name), 'new': 'composer install will be run in {0}'.format(name) } ret['result'] = None return ret try: call = __salt__['composer.install']( name, composer=composer, php=php, runas=user, prefer_source=prefer_source, prefer_dist=prefer_dist, no_scripts=no_scripts, no_plugins=no_plugins, optimize=optimize, no_dev=no_dev, quiet=quiet, composer_home=composer_home, env=env ) except (SaltException) as err: ret['result'] = False ret['comment'] = 'Error executing composer in \'{0}\': {1}'.format(name, err) return ret # If composer retcode != 0 then an exception was thrown and we dealt with it. # Any other case is success, regardless of what composer decides to output. ret['result'] = True if quiet is True: ret['comment'] = 'Composer install completed successfully, output silenced by quiet flag' else: ret['comment'] = 'Composer install completed successfully' ret['changes'] = { 'stderr': call['stderr'], 'stdout': call['stdout'] } return ret
saltstack/salt
salt/modules/boto_elasticache.py
exists
python
def exists(name, region=None, key=None, keyid=None, profile=None): ''' Check to see if a cache cluster exists. CLI example:: salt myminion boto_elasticache.exists myelasticache ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: conn.describe_cache_clusters(name) return True except boto.exception.BotoServerError as e: log.debug(e) return False
Check to see if a cache cluster exists. CLI example:: salt myminion boto_elasticache.exists myelasticache
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/boto_elasticache.py#L87-L102
null
# -*- coding: utf-8 -*- ''' Connection module for Amazon Elasticache .. versionadded:: 2014.7.0 :configuration: This module accepts explicit elasticache credentials but can also utilize IAM roles assigned to the instance through Instance Profiles. Dynamic credentials are then automatically obtained from AWS API and no further configuration is necessary. More Information available at: .. code-block:: text http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html If IAM roles are not used you need to specify them either in a pillar or in the minion's config file: .. code-block:: yaml elasticache.keyid: GKTADJGHEIQSXMKKRBJ08H elasticache.key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs A region may also be specified in the configuration: .. code-block:: yaml elasticache.region: us-east-1 If a region is not specified, the default is us-east-1. It's also possible to specify key, keyid and region via a profile, either as a passed in dict, or as a string to pull from pillars or minion config: .. code-block:: yaml myprofile: keyid: GKTADJGHEIQSXMKKRBJ08H key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs region: us-east-1 :depends: boto ''' # keep lint from choking on _get_conn and _cache_id #pylint: disable=E0602 from __future__ import absolute_import, print_function, unicode_literals # Import Python libs import logging import time # Import Salt libs from salt.ext import six from salt.exceptions import SaltInvocationError import salt.utils.odict as odict import salt.utils.versions log = logging.getLogger(__name__) # Import third party libs try: # pylint: disable=unused-import import boto import boto.elasticache # pylint: enable=unused-import import boto.utils logging.getLogger('boto').setLevel(logging.CRITICAL) HAS_BOTO = True except ImportError: HAS_BOTO = False def __virtual__(): ''' Only load if boto libraries exist. ''' has_boto_reqs = salt.utils.versions.check_boto_reqs( check_boto3=False ) if has_boto_reqs is True: __utils__['boto.assign_funcs'](__name__, 'elasticache', pack=__salt__) return has_boto_reqs def group_exists(name, region=None, key=None, keyid=None, profile=None): ''' Check to see if a replication group exists. CLI example:: salt myminion boto_elasticache.group_exists myelasticache ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: conn.describe_replication_groups(name) return True except boto.exception.BotoServerError as e: log.debug(e) return False def create_replication_group(name, primary_cluster_id, replication_group_description, wait=None, region=None, key=None, keyid=None, profile=None): ''' Create replication group. CLI example:: salt myminion boto_elasticache.create_replication_group myelasticache myprimarycluster description ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return None try: cc = conn.create_replication_group(name, primary_cluster_id, replication_group_description) if not wait: log.info('Created cache cluster %s.', name) return True while True: time.sleep(3) config = describe_replication_group(name, region, key, keyid, profile) if not config: return True if config['status'] == 'available': return True except boto.exception.BotoServerError as e: msg = 'Failed to create replication group {0}.'.format(name) log.error(msg) log.debug(e) return {} def delete_replication_group(name, region=None, key=None, keyid=None, profile=None): ''' Delete an ElastiCache replication group. CLI example:: salt myminion boto_elasticache.delete_replication_group my-replication-group \ region=us-east-1 ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return False try: conn.delete_replication_group(name) msg = 'Deleted ElastiCache replication group {0}.'.format(name) log.info(msg) return True except boto.exception.BotoServerError as e: log.debug(e) msg = 'Failed to delete ElastiCache replication group {0}'.format(name) log.error(msg) return False def describe_replication_group(name, region=None, key=None, keyid=None, profile=None, parameter=None): ''' Get replication group information. CLI example:: salt myminion boto_elasticache.describe_replication_group mygroup ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return None try: cc = conn.describe_replication_groups(name) except boto.exception.BotoServerError as e: msg = 'Failed to get config for cache cluster {0}.'.format(name) log.error(msg) log.debug(e) return {} ret = odict.OrderedDict() cc = cc['DescribeReplicationGroupsResponse']['DescribeReplicationGroupsResult'] cc = cc['ReplicationGroups'][0] attrs = ['status', 'description', 'primary_endpoint', 'member_clusters', 'replication_group_id', 'pending_modified_values', 'primary_cluster_id', 'node_groups'] for key, val in six.iteritems(cc): _key = boto.utils.pythonize_name(key) if _key == 'status': if val: ret[_key] = val else: ret[_key] = None if _key == 'description': if val: ret[_key] = val else: ret[_key] = None if _key == 'replication_group_id': if val: ret[_key] = val else: ret[_key] = None if _key == 'member_clusters': if val: ret[_key] = val else: ret[_key] = None if _key == 'node_groups': if val: ret[_key] = val else: ret[_key] = None if _key == 'pending_modified_values': if val: ret[_key] = val else: ret[_key] = None return ret def get_config(name, region=None, key=None, keyid=None, profile=None): ''' Get the configuration for a cache cluster. CLI example:: salt myminion boto_elasticache.get_config myelasticache ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return None try: cc = conn.describe_cache_clusters(name, show_cache_node_info=True) except boto.exception.BotoServerError as e: msg = 'Failed to get config for cache cluster {0}.'.format(name) log.error(msg) log.debug(e) return {} cc = cc['DescribeCacheClustersResponse']['DescribeCacheClustersResult'] cc = cc['CacheClusters'][0] ret = odict.OrderedDict() attrs = ['engine', 'cache_parameter_group', 'cache_cluster_id', 'cache_security_groups', 'replication_group_id', 'auto_minor_version_upgrade', 'num_cache_nodes', 'preferred_availability_zone', 'security_groups', 'cache_subnet_group_name', 'engine_version', 'cache_node_type', 'notification_configuration', 'preferred_maintenance_window', 'configuration_endpoint', 'cache_cluster_status', 'cache_nodes'] for key, val in six.iteritems(cc): _key = boto.utils.pythonize_name(key) if _key not in attrs: continue if _key == 'cache_parameter_group': if val: ret[_key] = val['CacheParameterGroupName'] else: ret[_key] = None elif _key == 'cache_nodes': if val: ret[_key] = [k for k in val] else: ret[_key] = [] elif _key == 'cache_security_groups': if val: ret[_key] = [k['CacheSecurityGroupName'] for k in val] else: ret[_key] = [] elif _key == 'configuration_endpoint': if val: ret['port'] = val['Port'] ret['address'] = val['Address'] else: ret['port'] = None ret['address'] = None elif _key == 'notification_configuration': if val: ret['notification_topic_arn'] = val['TopicArn'] else: ret['notification_topic_arn'] = None else: ret[_key] = val return ret def get_node_host(name, region=None, key=None, keyid=None, profile=None): ''' Get hostname from cache node CLI example:: salt myminion boto_elasticache.get_node_host myelasticache ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return None try: cc = conn.describe_cache_clusters(name, show_cache_node_info=True) except boto.exception.BotoServerError as e: msg = 'Failed to get config for cache cluster {0}.'.format(name) log.error(msg) log.debug(e) return {} cc = cc['DescribeCacheClustersResponse']['DescribeCacheClustersResult'] host = cc['CacheClusters'][0]['CacheNodes'][0]['Endpoint']['Address'] return host def get_group_host(name, region=None, key=None, keyid=None, profile=None): ''' Get hostname from replication cache group CLI example:: salt myminion boto_elasticache.get_group_host myelasticachegroup ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return None try: cc = conn.describe_replication_groups(name) except boto.exception.BotoServerError as e: msg = 'Failed to get config for cache cluster {0}.'.format(name) log.error(msg) log.debug(e) return {} cc = cc['DescribeReplicationGroupsResponse']['DescribeReplicationGroupsResult'] cc = cc['ReplicationGroups'][0]['NodeGroups'][0]['PrimaryEndpoint'] host = cc['Address'] return host def get_all_cache_subnet_groups(name=None, region=None, key=None, keyid=None, profile=None): ''' Return a list of all cache subnet groups with details CLI example:: salt myminion boto_elasticache.get_all_subnet_groups region=us-east-1 ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: marker = '' groups = [] while marker is not None: ret = conn.describe_cache_subnet_groups(cache_subnet_group_name=name, marker=marker) trimmed = ret.get('DescribeCacheSubnetGroupsResponse', {}).get('DescribeCacheSubnetGroupsResult', {}) groups += trimmed.get('CacheSubnetGroups', []) marker = trimmed.get('Marker', None) if not groups: log.debug('No ElastiCache subnet groups found.') return groups except boto.exception.BotoServerError as e: log.error(e) return [] def list_cache_subnet_groups(name=None, region=None, key=None, keyid=None, profile=None): ''' Return a list of all cache subnet group names CLI example:: salt myminion boto_elasticache.list_subnet_groups region=us-east-1 ''' return [g['CacheSubnetGroupName'] for g in get_all_cache_subnet_groups(name, region, key, keyid, profile)] def subnet_group_exists(name, tags=None, region=None, key=None, keyid=None, profile=None): ''' Check to see if an ElastiCache subnet group exists. CLI example:: salt myminion boto_elasticache.subnet_group_exists my-param-group \ region=us-east-1 ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return False try: ec = conn.describe_cache_subnet_groups(cache_subnet_group_name=name) if not ec: msg = ('ElastiCache subnet group does not exist in region {0}'.format(region)) log.debug(msg) return False return True except boto.exception.BotoServerError as e: log.debug(e) return False def create_subnet_group(name, description, subnet_ids=None, subnet_names=None, tags=None, region=None, key=None, keyid=None, profile=None): ''' Create an ElastiCache subnet group CLI example to create an ElastiCache subnet group:: salt myminion boto_elasticache.create_subnet_group my-subnet-group \ "group description" subnet_ids='[subnet-12345678, subnet-87654321]' \ region=us-east-1 ''' if not _exactly_one((subnet_ids, subnet_names)): raise SaltInvocationError("Exactly one of either 'subnet_ids' or " "'subnet_names' must be provided.") conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return False if subnet_group_exists(name, tags, region, key, keyid, profile): return True if subnet_names: subnet_ids = [] for n in subnet_names: r = __salt__['boto_vpc.get_resource_id']('subnet', n, region=region, key=key, keyid=keyid, profile=profile) if 'id' not in r: log.error('Couldn\'t resolve subnet name %s to an ID.', subnet_name) return False subnet_ids += [r['id']] try: ec = conn.create_cache_subnet_group(name, description, subnet_ids) if not ec: msg = 'Failed to create ElastiCache subnet group {0}'.format(name) log.error(msg) return False log.info('Created ElastiCache subnet group %s', name) return True except boto.exception.BotoServerError as e: log.debug(e) msg = 'Failed to create ElastiCache subnet group {0}'.format(name) log.error(msg) return False def get_cache_subnet_group(name, region=None, key=None, keyid=None, profile=None): ''' Get information about a cache subnet group. CLI example:: salt myminion boto_elasticache.get_cache_subnet_group mycache_subnet_group ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: csg = conn.describe_cache_subnet_groups(name) csg = csg['DescribeCacheSubnetGroupsResponse'] csg = csg['DescribeCacheSubnetGroupsResult']['CacheSubnetGroups'][0] except boto.exception.BotoServerError as e: msg = 'Failed to get cache subnet group {0}.'.format(name) log.error(msg) log.debug(e) return False except (IndexError, TypeError, KeyError): msg = 'Failed to get cache subnet group {0} (2).'.format(name) log.error(msg) return False ret = {} for key, val in six.iteritems(csg): if key == 'CacheSubnetGroupName': ret['cache_subnet_group_name'] = val elif key == 'CacheSubnetGroupDescription': ret['cache_subnet_group_description'] = val elif key == 'VpcId': ret['vpc_id'] = val elif key == 'Subnets': ret['subnets'] = [] for subnet in val: _subnet = {} _subnet['subnet_id'] = subnet['SubnetIdentifier'] _az = subnet['SubnetAvailabilityZone']['Name'] _subnet['subnet_availability_zone'] = _az ret['subnets'].append(_subnet) else: ret[key] = val return ret def delete_subnet_group(name, region=None, key=None, keyid=None, profile=None): ''' Delete an ElastiCache subnet group. CLI example:: salt myminion boto_elasticache.delete_subnet_group my-subnet-group \ region=us-east-1 ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return False try: conn.delete_cache_subnet_group(name) msg = 'Deleted ElastiCache subnet group {0}.'.format(name) log.info(msg) return True except boto.exception.BotoServerError as e: log.debug(e) msg = 'Failed to delete ElastiCache subnet group {0}'.format(name) log.error(msg) return False def create(name, num_cache_nodes=None, engine=None, cache_node_type=None, replication_group_id=None, engine_version=None, cache_parameter_group_name=None, cache_subnet_group_name=None, cache_security_group_names=None, security_group_ids=None, snapshot_arns=None, preferred_availability_zone=None, preferred_maintenance_window=None, port=None, notification_topic_arn=None, auto_minor_version_upgrade=None, wait=None, region=None, key=None, keyid=None, profile=None): ''' Create a cache cluster. CLI example:: salt myminion boto_elasticache.create myelasticache 1 redis cache.t1.micro cache_security_group_names='["myelasticachesg"]' ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: conn.create_cache_cluster( name, num_cache_nodes, cache_node_type, engine, replication_group_id, engine_version, cache_parameter_group_name, cache_subnet_group_name, cache_security_group_names, security_group_ids, snapshot_arns, preferred_availability_zone, preferred_maintenance_window, port, notification_topic_arn, auto_minor_version_upgrade) if not wait: log.info('Created cache cluster %s.', name) return True while True: time.sleep(3) config = get_config(name, region, key, keyid, profile) if not config: return True if config['cache_cluster_status'] == 'available': return True log.info('Created cache cluster %s.', name) except boto.exception.BotoServerError as e: msg = 'Failed to create cache cluster {0}.'.format(name) log.error(msg) log.debug(e) return False def delete(name, wait=False, region=None, key=None, keyid=None, profile=None): ''' Delete a cache cluster. CLI example:: salt myminion boto_elasticache.delete myelasticache ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: conn.delete_cache_cluster(name) if not wait: log.info('Deleted cache cluster %s.', name) return True while True: config = get_config(name, region, key, keyid, profile) if not config: return True if config['cache_cluster_status'] == 'deleting': return True time.sleep(2) log.info('Deleted cache cluster %s.', name) return True except boto.exception.BotoServerError as e: msg = 'Failed to delete cache cluster {0}.'.format(name) log.error(msg) log.debug(e) return False def create_cache_security_group(name, description, region=None, key=None, keyid=None, profile=None): ''' Create a cache security group. CLI example:: salt myminion boto_elasticache.create_cache_security_group myelasticachesg 'My Cache Security Group' ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) created = conn.create_cache_security_group(name, description) if created: log.info('Created cache security group %s.', name) return True else: msg = 'Failed to create cache security group {0}.'.format(name) log.error(msg) return False def delete_cache_security_group(name, region=None, key=None, keyid=None, profile=None): ''' Delete a cache security group. CLI example:: salt myminion boto_elasticache.delete_cache_security_group myelasticachesg 'My Cache Security Group' ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) deleted = conn.delete_cache_security_group(name) if deleted: log.info('Deleted cache security group %s.', name) return True else: msg = 'Failed to delete cache security group {0}.'.format(name) log.error(msg) return False def authorize_cache_security_group_ingress(name, ec2_security_group_name, ec2_security_group_owner_id, region=None, key=None, keyid=None, profile=None): ''' Authorize network ingress from an ec2 security group to a cache security group. CLI example:: salt myminion boto_elasticache.authorize_cache_security_group_ingress myelasticachesg myec2sg 879879 ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: added = conn.authorize_cache_security_group_ingress( name, ec2_security_group_name, ec2_security_group_owner_id) if added: msg = 'Added {0} to cache security group {1}.' msg = msg.format(name, ec2_security_group_name) log.info(msg) return True else: msg = 'Failed to add {0} to cache security group {1}.' msg = msg.format(name, ec2_security_group_name) log.error(msg) return False except boto.exception.EC2ResponseError as e: log.debug(e) msg = 'Failed to add {0} to cache security group {1}.' msg = msg.format(name, ec2_security_group_name) log.error(msg) return False def revoke_cache_security_group_ingress(name, ec2_security_group_name, ec2_security_group_owner_id, region=None, key=None, keyid=None, profile=None): ''' Revoke network ingress from an ec2 security group to a cache security group. CLI example:: salt myminion boto_elasticache.revoke_cache_security_group_ingress myelasticachesg myec2sg 879879 ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: removed = conn.revoke_cache_security_group_ingress( name, ec2_security_group_name, ec2_security_group_owner_id) if removed: msg = 'Removed {0} from cache security group {1}.' msg = msg.format(name, ec2_security_group_name) log.info(msg) return True else: msg = 'Failed to remove {0} from cache security group {1}.' msg = msg.format(name, ec2_security_group_name) log.error(msg) return False except boto.exception.EC2ResponseError as e: log.debug(e) msg = 'Failed to remove {0} from cache security group {1}.' msg = msg.format(name, ec2_security_group_name) log.error(msg) return False
saltstack/salt
salt/modules/boto_elasticache.py
group_exists
python
def group_exists(name, region=None, key=None, keyid=None, profile=None): ''' Check to see if a replication group exists. CLI example:: salt myminion boto_elasticache.group_exists myelasticache ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: conn.describe_replication_groups(name) return True except boto.exception.BotoServerError as e: log.debug(e) return False
Check to see if a replication group exists. CLI example:: salt myminion boto_elasticache.group_exists myelasticache
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/boto_elasticache.py#L105-L120
null
# -*- coding: utf-8 -*- ''' Connection module for Amazon Elasticache .. versionadded:: 2014.7.0 :configuration: This module accepts explicit elasticache credentials but can also utilize IAM roles assigned to the instance through Instance Profiles. Dynamic credentials are then automatically obtained from AWS API and no further configuration is necessary. More Information available at: .. code-block:: text http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html If IAM roles are not used you need to specify them either in a pillar or in the minion's config file: .. code-block:: yaml elasticache.keyid: GKTADJGHEIQSXMKKRBJ08H elasticache.key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs A region may also be specified in the configuration: .. code-block:: yaml elasticache.region: us-east-1 If a region is not specified, the default is us-east-1. It's also possible to specify key, keyid and region via a profile, either as a passed in dict, or as a string to pull from pillars or minion config: .. code-block:: yaml myprofile: keyid: GKTADJGHEIQSXMKKRBJ08H key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs region: us-east-1 :depends: boto ''' # keep lint from choking on _get_conn and _cache_id #pylint: disable=E0602 from __future__ import absolute_import, print_function, unicode_literals # Import Python libs import logging import time # Import Salt libs from salt.ext import six from salt.exceptions import SaltInvocationError import salt.utils.odict as odict import salt.utils.versions log = logging.getLogger(__name__) # Import third party libs try: # pylint: disable=unused-import import boto import boto.elasticache # pylint: enable=unused-import import boto.utils logging.getLogger('boto').setLevel(logging.CRITICAL) HAS_BOTO = True except ImportError: HAS_BOTO = False def __virtual__(): ''' Only load if boto libraries exist. ''' has_boto_reqs = salt.utils.versions.check_boto_reqs( check_boto3=False ) if has_boto_reqs is True: __utils__['boto.assign_funcs'](__name__, 'elasticache', pack=__salt__) return has_boto_reqs def exists(name, region=None, key=None, keyid=None, profile=None): ''' Check to see if a cache cluster exists. CLI example:: salt myminion boto_elasticache.exists myelasticache ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: conn.describe_cache_clusters(name) return True except boto.exception.BotoServerError as e: log.debug(e) return False def create_replication_group(name, primary_cluster_id, replication_group_description, wait=None, region=None, key=None, keyid=None, profile=None): ''' Create replication group. CLI example:: salt myminion boto_elasticache.create_replication_group myelasticache myprimarycluster description ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return None try: cc = conn.create_replication_group(name, primary_cluster_id, replication_group_description) if not wait: log.info('Created cache cluster %s.', name) return True while True: time.sleep(3) config = describe_replication_group(name, region, key, keyid, profile) if not config: return True if config['status'] == 'available': return True except boto.exception.BotoServerError as e: msg = 'Failed to create replication group {0}.'.format(name) log.error(msg) log.debug(e) return {} def delete_replication_group(name, region=None, key=None, keyid=None, profile=None): ''' Delete an ElastiCache replication group. CLI example:: salt myminion boto_elasticache.delete_replication_group my-replication-group \ region=us-east-1 ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return False try: conn.delete_replication_group(name) msg = 'Deleted ElastiCache replication group {0}.'.format(name) log.info(msg) return True except boto.exception.BotoServerError as e: log.debug(e) msg = 'Failed to delete ElastiCache replication group {0}'.format(name) log.error(msg) return False def describe_replication_group(name, region=None, key=None, keyid=None, profile=None, parameter=None): ''' Get replication group information. CLI example:: salt myminion boto_elasticache.describe_replication_group mygroup ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return None try: cc = conn.describe_replication_groups(name) except boto.exception.BotoServerError as e: msg = 'Failed to get config for cache cluster {0}.'.format(name) log.error(msg) log.debug(e) return {} ret = odict.OrderedDict() cc = cc['DescribeReplicationGroupsResponse']['DescribeReplicationGroupsResult'] cc = cc['ReplicationGroups'][0] attrs = ['status', 'description', 'primary_endpoint', 'member_clusters', 'replication_group_id', 'pending_modified_values', 'primary_cluster_id', 'node_groups'] for key, val in six.iteritems(cc): _key = boto.utils.pythonize_name(key) if _key == 'status': if val: ret[_key] = val else: ret[_key] = None if _key == 'description': if val: ret[_key] = val else: ret[_key] = None if _key == 'replication_group_id': if val: ret[_key] = val else: ret[_key] = None if _key == 'member_clusters': if val: ret[_key] = val else: ret[_key] = None if _key == 'node_groups': if val: ret[_key] = val else: ret[_key] = None if _key == 'pending_modified_values': if val: ret[_key] = val else: ret[_key] = None return ret def get_config(name, region=None, key=None, keyid=None, profile=None): ''' Get the configuration for a cache cluster. CLI example:: salt myminion boto_elasticache.get_config myelasticache ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return None try: cc = conn.describe_cache_clusters(name, show_cache_node_info=True) except boto.exception.BotoServerError as e: msg = 'Failed to get config for cache cluster {0}.'.format(name) log.error(msg) log.debug(e) return {} cc = cc['DescribeCacheClustersResponse']['DescribeCacheClustersResult'] cc = cc['CacheClusters'][0] ret = odict.OrderedDict() attrs = ['engine', 'cache_parameter_group', 'cache_cluster_id', 'cache_security_groups', 'replication_group_id', 'auto_minor_version_upgrade', 'num_cache_nodes', 'preferred_availability_zone', 'security_groups', 'cache_subnet_group_name', 'engine_version', 'cache_node_type', 'notification_configuration', 'preferred_maintenance_window', 'configuration_endpoint', 'cache_cluster_status', 'cache_nodes'] for key, val in six.iteritems(cc): _key = boto.utils.pythonize_name(key) if _key not in attrs: continue if _key == 'cache_parameter_group': if val: ret[_key] = val['CacheParameterGroupName'] else: ret[_key] = None elif _key == 'cache_nodes': if val: ret[_key] = [k for k in val] else: ret[_key] = [] elif _key == 'cache_security_groups': if val: ret[_key] = [k['CacheSecurityGroupName'] for k in val] else: ret[_key] = [] elif _key == 'configuration_endpoint': if val: ret['port'] = val['Port'] ret['address'] = val['Address'] else: ret['port'] = None ret['address'] = None elif _key == 'notification_configuration': if val: ret['notification_topic_arn'] = val['TopicArn'] else: ret['notification_topic_arn'] = None else: ret[_key] = val return ret def get_node_host(name, region=None, key=None, keyid=None, profile=None): ''' Get hostname from cache node CLI example:: salt myminion boto_elasticache.get_node_host myelasticache ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return None try: cc = conn.describe_cache_clusters(name, show_cache_node_info=True) except boto.exception.BotoServerError as e: msg = 'Failed to get config for cache cluster {0}.'.format(name) log.error(msg) log.debug(e) return {} cc = cc['DescribeCacheClustersResponse']['DescribeCacheClustersResult'] host = cc['CacheClusters'][0]['CacheNodes'][0]['Endpoint']['Address'] return host def get_group_host(name, region=None, key=None, keyid=None, profile=None): ''' Get hostname from replication cache group CLI example:: salt myminion boto_elasticache.get_group_host myelasticachegroup ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return None try: cc = conn.describe_replication_groups(name) except boto.exception.BotoServerError as e: msg = 'Failed to get config for cache cluster {0}.'.format(name) log.error(msg) log.debug(e) return {} cc = cc['DescribeReplicationGroupsResponse']['DescribeReplicationGroupsResult'] cc = cc['ReplicationGroups'][0]['NodeGroups'][0]['PrimaryEndpoint'] host = cc['Address'] return host def get_all_cache_subnet_groups(name=None, region=None, key=None, keyid=None, profile=None): ''' Return a list of all cache subnet groups with details CLI example:: salt myminion boto_elasticache.get_all_subnet_groups region=us-east-1 ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: marker = '' groups = [] while marker is not None: ret = conn.describe_cache_subnet_groups(cache_subnet_group_name=name, marker=marker) trimmed = ret.get('DescribeCacheSubnetGroupsResponse', {}).get('DescribeCacheSubnetGroupsResult', {}) groups += trimmed.get('CacheSubnetGroups', []) marker = trimmed.get('Marker', None) if not groups: log.debug('No ElastiCache subnet groups found.') return groups except boto.exception.BotoServerError as e: log.error(e) return [] def list_cache_subnet_groups(name=None, region=None, key=None, keyid=None, profile=None): ''' Return a list of all cache subnet group names CLI example:: salt myminion boto_elasticache.list_subnet_groups region=us-east-1 ''' return [g['CacheSubnetGroupName'] for g in get_all_cache_subnet_groups(name, region, key, keyid, profile)] def subnet_group_exists(name, tags=None, region=None, key=None, keyid=None, profile=None): ''' Check to see if an ElastiCache subnet group exists. CLI example:: salt myminion boto_elasticache.subnet_group_exists my-param-group \ region=us-east-1 ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return False try: ec = conn.describe_cache_subnet_groups(cache_subnet_group_name=name) if not ec: msg = ('ElastiCache subnet group does not exist in region {0}'.format(region)) log.debug(msg) return False return True except boto.exception.BotoServerError as e: log.debug(e) return False def create_subnet_group(name, description, subnet_ids=None, subnet_names=None, tags=None, region=None, key=None, keyid=None, profile=None): ''' Create an ElastiCache subnet group CLI example to create an ElastiCache subnet group:: salt myminion boto_elasticache.create_subnet_group my-subnet-group \ "group description" subnet_ids='[subnet-12345678, subnet-87654321]' \ region=us-east-1 ''' if not _exactly_one((subnet_ids, subnet_names)): raise SaltInvocationError("Exactly one of either 'subnet_ids' or " "'subnet_names' must be provided.") conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return False if subnet_group_exists(name, tags, region, key, keyid, profile): return True if subnet_names: subnet_ids = [] for n in subnet_names: r = __salt__['boto_vpc.get_resource_id']('subnet', n, region=region, key=key, keyid=keyid, profile=profile) if 'id' not in r: log.error('Couldn\'t resolve subnet name %s to an ID.', subnet_name) return False subnet_ids += [r['id']] try: ec = conn.create_cache_subnet_group(name, description, subnet_ids) if not ec: msg = 'Failed to create ElastiCache subnet group {0}'.format(name) log.error(msg) return False log.info('Created ElastiCache subnet group %s', name) return True except boto.exception.BotoServerError as e: log.debug(e) msg = 'Failed to create ElastiCache subnet group {0}'.format(name) log.error(msg) return False def get_cache_subnet_group(name, region=None, key=None, keyid=None, profile=None): ''' Get information about a cache subnet group. CLI example:: salt myminion boto_elasticache.get_cache_subnet_group mycache_subnet_group ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: csg = conn.describe_cache_subnet_groups(name) csg = csg['DescribeCacheSubnetGroupsResponse'] csg = csg['DescribeCacheSubnetGroupsResult']['CacheSubnetGroups'][0] except boto.exception.BotoServerError as e: msg = 'Failed to get cache subnet group {0}.'.format(name) log.error(msg) log.debug(e) return False except (IndexError, TypeError, KeyError): msg = 'Failed to get cache subnet group {0} (2).'.format(name) log.error(msg) return False ret = {} for key, val in six.iteritems(csg): if key == 'CacheSubnetGroupName': ret['cache_subnet_group_name'] = val elif key == 'CacheSubnetGroupDescription': ret['cache_subnet_group_description'] = val elif key == 'VpcId': ret['vpc_id'] = val elif key == 'Subnets': ret['subnets'] = [] for subnet in val: _subnet = {} _subnet['subnet_id'] = subnet['SubnetIdentifier'] _az = subnet['SubnetAvailabilityZone']['Name'] _subnet['subnet_availability_zone'] = _az ret['subnets'].append(_subnet) else: ret[key] = val return ret def delete_subnet_group(name, region=None, key=None, keyid=None, profile=None): ''' Delete an ElastiCache subnet group. CLI example:: salt myminion boto_elasticache.delete_subnet_group my-subnet-group \ region=us-east-1 ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return False try: conn.delete_cache_subnet_group(name) msg = 'Deleted ElastiCache subnet group {0}.'.format(name) log.info(msg) return True except boto.exception.BotoServerError as e: log.debug(e) msg = 'Failed to delete ElastiCache subnet group {0}'.format(name) log.error(msg) return False def create(name, num_cache_nodes=None, engine=None, cache_node_type=None, replication_group_id=None, engine_version=None, cache_parameter_group_name=None, cache_subnet_group_name=None, cache_security_group_names=None, security_group_ids=None, snapshot_arns=None, preferred_availability_zone=None, preferred_maintenance_window=None, port=None, notification_topic_arn=None, auto_minor_version_upgrade=None, wait=None, region=None, key=None, keyid=None, profile=None): ''' Create a cache cluster. CLI example:: salt myminion boto_elasticache.create myelasticache 1 redis cache.t1.micro cache_security_group_names='["myelasticachesg"]' ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: conn.create_cache_cluster( name, num_cache_nodes, cache_node_type, engine, replication_group_id, engine_version, cache_parameter_group_name, cache_subnet_group_name, cache_security_group_names, security_group_ids, snapshot_arns, preferred_availability_zone, preferred_maintenance_window, port, notification_topic_arn, auto_minor_version_upgrade) if not wait: log.info('Created cache cluster %s.', name) return True while True: time.sleep(3) config = get_config(name, region, key, keyid, profile) if not config: return True if config['cache_cluster_status'] == 'available': return True log.info('Created cache cluster %s.', name) except boto.exception.BotoServerError as e: msg = 'Failed to create cache cluster {0}.'.format(name) log.error(msg) log.debug(e) return False def delete(name, wait=False, region=None, key=None, keyid=None, profile=None): ''' Delete a cache cluster. CLI example:: salt myminion boto_elasticache.delete myelasticache ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: conn.delete_cache_cluster(name) if not wait: log.info('Deleted cache cluster %s.', name) return True while True: config = get_config(name, region, key, keyid, profile) if not config: return True if config['cache_cluster_status'] == 'deleting': return True time.sleep(2) log.info('Deleted cache cluster %s.', name) return True except boto.exception.BotoServerError as e: msg = 'Failed to delete cache cluster {0}.'.format(name) log.error(msg) log.debug(e) return False def create_cache_security_group(name, description, region=None, key=None, keyid=None, profile=None): ''' Create a cache security group. CLI example:: salt myminion boto_elasticache.create_cache_security_group myelasticachesg 'My Cache Security Group' ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) created = conn.create_cache_security_group(name, description) if created: log.info('Created cache security group %s.', name) return True else: msg = 'Failed to create cache security group {0}.'.format(name) log.error(msg) return False def delete_cache_security_group(name, region=None, key=None, keyid=None, profile=None): ''' Delete a cache security group. CLI example:: salt myminion boto_elasticache.delete_cache_security_group myelasticachesg 'My Cache Security Group' ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) deleted = conn.delete_cache_security_group(name) if deleted: log.info('Deleted cache security group %s.', name) return True else: msg = 'Failed to delete cache security group {0}.'.format(name) log.error(msg) return False def authorize_cache_security_group_ingress(name, ec2_security_group_name, ec2_security_group_owner_id, region=None, key=None, keyid=None, profile=None): ''' Authorize network ingress from an ec2 security group to a cache security group. CLI example:: salt myminion boto_elasticache.authorize_cache_security_group_ingress myelasticachesg myec2sg 879879 ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: added = conn.authorize_cache_security_group_ingress( name, ec2_security_group_name, ec2_security_group_owner_id) if added: msg = 'Added {0} to cache security group {1}.' msg = msg.format(name, ec2_security_group_name) log.info(msg) return True else: msg = 'Failed to add {0} to cache security group {1}.' msg = msg.format(name, ec2_security_group_name) log.error(msg) return False except boto.exception.EC2ResponseError as e: log.debug(e) msg = 'Failed to add {0} to cache security group {1}.' msg = msg.format(name, ec2_security_group_name) log.error(msg) return False def revoke_cache_security_group_ingress(name, ec2_security_group_name, ec2_security_group_owner_id, region=None, key=None, keyid=None, profile=None): ''' Revoke network ingress from an ec2 security group to a cache security group. CLI example:: salt myminion boto_elasticache.revoke_cache_security_group_ingress myelasticachesg myec2sg 879879 ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: removed = conn.revoke_cache_security_group_ingress( name, ec2_security_group_name, ec2_security_group_owner_id) if removed: msg = 'Removed {0} from cache security group {1}.' msg = msg.format(name, ec2_security_group_name) log.info(msg) return True else: msg = 'Failed to remove {0} from cache security group {1}.' msg = msg.format(name, ec2_security_group_name) log.error(msg) return False except boto.exception.EC2ResponseError as e: log.debug(e) msg = 'Failed to remove {0} from cache security group {1}.' msg = msg.format(name, ec2_security_group_name) log.error(msg) return False
saltstack/salt
salt/modules/boto_elasticache.py
create_replication_group
python
def create_replication_group(name, primary_cluster_id, replication_group_description, wait=None, region=None, key=None, keyid=None, profile=None): ''' Create replication group. CLI example:: salt myminion boto_elasticache.create_replication_group myelasticache myprimarycluster description ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return None try: cc = conn.create_replication_group(name, primary_cluster_id, replication_group_description) if not wait: log.info('Created cache cluster %s.', name) return True while True: time.sleep(3) config = describe_replication_group(name, region, key, keyid, profile) if not config: return True if config['status'] == 'available': return True except boto.exception.BotoServerError as e: msg = 'Failed to create replication group {0}.'.format(name) log.error(msg) log.debug(e) return {}
Create replication group. CLI example:: salt myminion boto_elasticache.create_replication_group myelasticache myprimarycluster description
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/boto_elasticache.py#L123-L154
[ "def describe_replication_group(name, region=None, key=None, keyid=None,\n profile=None, parameter=None):\n '''\n Get replication group information.\n\n CLI example::\n\n salt myminion boto_elasticache.describe_replication_group mygroup\n '''\n conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)\n\n if not conn:\n return None\n try:\n cc = conn.describe_replication_groups(name)\n except boto.exception.BotoServerError as e:\n msg = 'Failed to get config for cache cluster {0}.'.format(name)\n log.error(msg)\n log.debug(e)\n return {}\n ret = odict.OrderedDict()\n cc = cc['DescribeReplicationGroupsResponse']['DescribeReplicationGroupsResult']\n cc = cc['ReplicationGroups'][0]\n\n attrs = ['status', 'description', 'primary_endpoint',\n 'member_clusters', 'replication_group_id',\n 'pending_modified_values', 'primary_cluster_id',\n 'node_groups']\n for key, val in six.iteritems(cc):\n _key = boto.utils.pythonize_name(key)\n if _key == 'status':\n if val:\n ret[_key] = val\n else:\n ret[_key] = None\n if _key == 'description':\n if val:\n ret[_key] = val\n else:\n ret[_key] = None\n if _key == 'replication_group_id':\n if val:\n ret[_key] = val\n else:\n ret[_key] = None\n if _key == 'member_clusters':\n if val:\n ret[_key] = val\n else:\n ret[_key] = None\n if _key == 'node_groups':\n if val:\n ret[_key] = val\n else:\n ret[_key] = None\n if _key == 'pending_modified_values':\n if val:\n ret[_key] = val\n else:\n ret[_key] = None\n return ret\n" ]
# -*- coding: utf-8 -*- ''' Connection module for Amazon Elasticache .. versionadded:: 2014.7.0 :configuration: This module accepts explicit elasticache credentials but can also utilize IAM roles assigned to the instance through Instance Profiles. Dynamic credentials are then automatically obtained from AWS API and no further configuration is necessary. More Information available at: .. code-block:: text http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html If IAM roles are not used you need to specify them either in a pillar or in the minion's config file: .. code-block:: yaml elasticache.keyid: GKTADJGHEIQSXMKKRBJ08H elasticache.key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs A region may also be specified in the configuration: .. code-block:: yaml elasticache.region: us-east-1 If a region is not specified, the default is us-east-1. It's also possible to specify key, keyid and region via a profile, either as a passed in dict, or as a string to pull from pillars or minion config: .. code-block:: yaml myprofile: keyid: GKTADJGHEIQSXMKKRBJ08H key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs region: us-east-1 :depends: boto ''' # keep lint from choking on _get_conn and _cache_id #pylint: disable=E0602 from __future__ import absolute_import, print_function, unicode_literals # Import Python libs import logging import time # Import Salt libs from salt.ext import six from salt.exceptions import SaltInvocationError import salt.utils.odict as odict import salt.utils.versions log = logging.getLogger(__name__) # Import third party libs try: # pylint: disable=unused-import import boto import boto.elasticache # pylint: enable=unused-import import boto.utils logging.getLogger('boto').setLevel(logging.CRITICAL) HAS_BOTO = True except ImportError: HAS_BOTO = False def __virtual__(): ''' Only load if boto libraries exist. ''' has_boto_reqs = salt.utils.versions.check_boto_reqs( check_boto3=False ) if has_boto_reqs is True: __utils__['boto.assign_funcs'](__name__, 'elasticache', pack=__salt__) return has_boto_reqs def exists(name, region=None, key=None, keyid=None, profile=None): ''' Check to see if a cache cluster exists. CLI example:: salt myminion boto_elasticache.exists myelasticache ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: conn.describe_cache_clusters(name) return True except boto.exception.BotoServerError as e: log.debug(e) return False def group_exists(name, region=None, key=None, keyid=None, profile=None): ''' Check to see if a replication group exists. CLI example:: salt myminion boto_elasticache.group_exists myelasticache ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: conn.describe_replication_groups(name) return True except boto.exception.BotoServerError as e: log.debug(e) return False def delete_replication_group(name, region=None, key=None, keyid=None, profile=None): ''' Delete an ElastiCache replication group. CLI example:: salt myminion boto_elasticache.delete_replication_group my-replication-group \ region=us-east-1 ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return False try: conn.delete_replication_group(name) msg = 'Deleted ElastiCache replication group {0}.'.format(name) log.info(msg) return True except boto.exception.BotoServerError as e: log.debug(e) msg = 'Failed to delete ElastiCache replication group {0}'.format(name) log.error(msg) return False def describe_replication_group(name, region=None, key=None, keyid=None, profile=None, parameter=None): ''' Get replication group information. CLI example:: salt myminion boto_elasticache.describe_replication_group mygroup ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return None try: cc = conn.describe_replication_groups(name) except boto.exception.BotoServerError as e: msg = 'Failed to get config for cache cluster {0}.'.format(name) log.error(msg) log.debug(e) return {} ret = odict.OrderedDict() cc = cc['DescribeReplicationGroupsResponse']['DescribeReplicationGroupsResult'] cc = cc['ReplicationGroups'][0] attrs = ['status', 'description', 'primary_endpoint', 'member_clusters', 'replication_group_id', 'pending_modified_values', 'primary_cluster_id', 'node_groups'] for key, val in six.iteritems(cc): _key = boto.utils.pythonize_name(key) if _key == 'status': if val: ret[_key] = val else: ret[_key] = None if _key == 'description': if val: ret[_key] = val else: ret[_key] = None if _key == 'replication_group_id': if val: ret[_key] = val else: ret[_key] = None if _key == 'member_clusters': if val: ret[_key] = val else: ret[_key] = None if _key == 'node_groups': if val: ret[_key] = val else: ret[_key] = None if _key == 'pending_modified_values': if val: ret[_key] = val else: ret[_key] = None return ret def get_config(name, region=None, key=None, keyid=None, profile=None): ''' Get the configuration for a cache cluster. CLI example:: salt myminion boto_elasticache.get_config myelasticache ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return None try: cc = conn.describe_cache_clusters(name, show_cache_node_info=True) except boto.exception.BotoServerError as e: msg = 'Failed to get config for cache cluster {0}.'.format(name) log.error(msg) log.debug(e) return {} cc = cc['DescribeCacheClustersResponse']['DescribeCacheClustersResult'] cc = cc['CacheClusters'][0] ret = odict.OrderedDict() attrs = ['engine', 'cache_parameter_group', 'cache_cluster_id', 'cache_security_groups', 'replication_group_id', 'auto_minor_version_upgrade', 'num_cache_nodes', 'preferred_availability_zone', 'security_groups', 'cache_subnet_group_name', 'engine_version', 'cache_node_type', 'notification_configuration', 'preferred_maintenance_window', 'configuration_endpoint', 'cache_cluster_status', 'cache_nodes'] for key, val in six.iteritems(cc): _key = boto.utils.pythonize_name(key) if _key not in attrs: continue if _key == 'cache_parameter_group': if val: ret[_key] = val['CacheParameterGroupName'] else: ret[_key] = None elif _key == 'cache_nodes': if val: ret[_key] = [k for k in val] else: ret[_key] = [] elif _key == 'cache_security_groups': if val: ret[_key] = [k['CacheSecurityGroupName'] for k in val] else: ret[_key] = [] elif _key == 'configuration_endpoint': if val: ret['port'] = val['Port'] ret['address'] = val['Address'] else: ret['port'] = None ret['address'] = None elif _key == 'notification_configuration': if val: ret['notification_topic_arn'] = val['TopicArn'] else: ret['notification_topic_arn'] = None else: ret[_key] = val return ret def get_node_host(name, region=None, key=None, keyid=None, profile=None): ''' Get hostname from cache node CLI example:: salt myminion boto_elasticache.get_node_host myelasticache ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return None try: cc = conn.describe_cache_clusters(name, show_cache_node_info=True) except boto.exception.BotoServerError as e: msg = 'Failed to get config for cache cluster {0}.'.format(name) log.error(msg) log.debug(e) return {} cc = cc['DescribeCacheClustersResponse']['DescribeCacheClustersResult'] host = cc['CacheClusters'][0]['CacheNodes'][0]['Endpoint']['Address'] return host def get_group_host(name, region=None, key=None, keyid=None, profile=None): ''' Get hostname from replication cache group CLI example:: salt myminion boto_elasticache.get_group_host myelasticachegroup ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return None try: cc = conn.describe_replication_groups(name) except boto.exception.BotoServerError as e: msg = 'Failed to get config for cache cluster {0}.'.format(name) log.error(msg) log.debug(e) return {} cc = cc['DescribeReplicationGroupsResponse']['DescribeReplicationGroupsResult'] cc = cc['ReplicationGroups'][0]['NodeGroups'][0]['PrimaryEndpoint'] host = cc['Address'] return host def get_all_cache_subnet_groups(name=None, region=None, key=None, keyid=None, profile=None): ''' Return a list of all cache subnet groups with details CLI example:: salt myminion boto_elasticache.get_all_subnet_groups region=us-east-1 ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: marker = '' groups = [] while marker is not None: ret = conn.describe_cache_subnet_groups(cache_subnet_group_name=name, marker=marker) trimmed = ret.get('DescribeCacheSubnetGroupsResponse', {}).get('DescribeCacheSubnetGroupsResult', {}) groups += trimmed.get('CacheSubnetGroups', []) marker = trimmed.get('Marker', None) if not groups: log.debug('No ElastiCache subnet groups found.') return groups except boto.exception.BotoServerError as e: log.error(e) return [] def list_cache_subnet_groups(name=None, region=None, key=None, keyid=None, profile=None): ''' Return a list of all cache subnet group names CLI example:: salt myminion boto_elasticache.list_subnet_groups region=us-east-1 ''' return [g['CacheSubnetGroupName'] for g in get_all_cache_subnet_groups(name, region, key, keyid, profile)] def subnet_group_exists(name, tags=None, region=None, key=None, keyid=None, profile=None): ''' Check to see if an ElastiCache subnet group exists. CLI example:: salt myminion boto_elasticache.subnet_group_exists my-param-group \ region=us-east-1 ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return False try: ec = conn.describe_cache_subnet_groups(cache_subnet_group_name=name) if not ec: msg = ('ElastiCache subnet group does not exist in region {0}'.format(region)) log.debug(msg) return False return True except boto.exception.BotoServerError as e: log.debug(e) return False def create_subnet_group(name, description, subnet_ids=None, subnet_names=None, tags=None, region=None, key=None, keyid=None, profile=None): ''' Create an ElastiCache subnet group CLI example to create an ElastiCache subnet group:: salt myminion boto_elasticache.create_subnet_group my-subnet-group \ "group description" subnet_ids='[subnet-12345678, subnet-87654321]' \ region=us-east-1 ''' if not _exactly_one((subnet_ids, subnet_names)): raise SaltInvocationError("Exactly one of either 'subnet_ids' or " "'subnet_names' must be provided.") conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return False if subnet_group_exists(name, tags, region, key, keyid, profile): return True if subnet_names: subnet_ids = [] for n in subnet_names: r = __salt__['boto_vpc.get_resource_id']('subnet', n, region=region, key=key, keyid=keyid, profile=profile) if 'id' not in r: log.error('Couldn\'t resolve subnet name %s to an ID.', subnet_name) return False subnet_ids += [r['id']] try: ec = conn.create_cache_subnet_group(name, description, subnet_ids) if not ec: msg = 'Failed to create ElastiCache subnet group {0}'.format(name) log.error(msg) return False log.info('Created ElastiCache subnet group %s', name) return True except boto.exception.BotoServerError as e: log.debug(e) msg = 'Failed to create ElastiCache subnet group {0}'.format(name) log.error(msg) return False def get_cache_subnet_group(name, region=None, key=None, keyid=None, profile=None): ''' Get information about a cache subnet group. CLI example:: salt myminion boto_elasticache.get_cache_subnet_group mycache_subnet_group ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: csg = conn.describe_cache_subnet_groups(name) csg = csg['DescribeCacheSubnetGroupsResponse'] csg = csg['DescribeCacheSubnetGroupsResult']['CacheSubnetGroups'][0] except boto.exception.BotoServerError as e: msg = 'Failed to get cache subnet group {0}.'.format(name) log.error(msg) log.debug(e) return False except (IndexError, TypeError, KeyError): msg = 'Failed to get cache subnet group {0} (2).'.format(name) log.error(msg) return False ret = {} for key, val in six.iteritems(csg): if key == 'CacheSubnetGroupName': ret['cache_subnet_group_name'] = val elif key == 'CacheSubnetGroupDescription': ret['cache_subnet_group_description'] = val elif key == 'VpcId': ret['vpc_id'] = val elif key == 'Subnets': ret['subnets'] = [] for subnet in val: _subnet = {} _subnet['subnet_id'] = subnet['SubnetIdentifier'] _az = subnet['SubnetAvailabilityZone']['Name'] _subnet['subnet_availability_zone'] = _az ret['subnets'].append(_subnet) else: ret[key] = val return ret def delete_subnet_group(name, region=None, key=None, keyid=None, profile=None): ''' Delete an ElastiCache subnet group. CLI example:: salt myminion boto_elasticache.delete_subnet_group my-subnet-group \ region=us-east-1 ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return False try: conn.delete_cache_subnet_group(name) msg = 'Deleted ElastiCache subnet group {0}.'.format(name) log.info(msg) return True except boto.exception.BotoServerError as e: log.debug(e) msg = 'Failed to delete ElastiCache subnet group {0}'.format(name) log.error(msg) return False def create(name, num_cache_nodes=None, engine=None, cache_node_type=None, replication_group_id=None, engine_version=None, cache_parameter_group_name=None, cache_subnet_group_name=None, cache_security_group_names=None, security_group_ids=None, snapshot_arns=None, preferred_availability_zone=None, preferred_maintenance_window=None, port=None, notification_topic_arn=None, auto_minor_version_upgrade=None, wait=None, region=None, key=None, keyid=None, profile=None): ''' Create a cache cluster. CLI example:: salt myminion boto_elasticache.create myelasticache 1 redis cache.t1.micro cache_security_group_names='["myelasticachesg"]' ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: conn.create_cache_cluster( name, num_cache_nodes, cache_node_type, engine, replication_group_id, engine_version, cache_parameter_group_name, cache_subnet_group_name, cache_security_group_names, security_group_ids, snapshot_arns, preferred_availability_zone, preferred_maintenance_window, port, notification_topic_arn, auto_minor_version_upgrade) if not wait: log.info('Created cache cluster %s.', name) return True while True: time.sleep(3) config = get_config(name, region, key, keyid, profile) if not config: return True if config['cache_cluster_status'] == 'available': return True log.info('Created cache cluster %s.', name) except boto.exception.BotoServerError as e: msg = 'Failed to create cache cluster {0}.'.format(name) log.error(msg) log.debug(e) return False def delete(name, wait=False, region=None, key=None, keyid=None, profile=None): ''' Delete a cache cluster. CLI example:: salt myminion boto_elasticache.delete myelasticache ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: conn.delete_cache_cluster(name) if not wait: log.info('Deleted cache cluster %s.', name) return True while True: config = get_config(name, region, key, keyid, profile) if not config: return True if config['cache_cluster_status'] == 'deleting': return True time.sleep(2) log.info('Deleted cache cluster %s.', name) return True except boto.exception.BotoServerError as e: msg = 'Failed to delete cache cluster {0}.'.format(name) log.error(msg) log.debug(e) return False def create_cache_security_group(name, description, region=None, key=None, keyid=None, profile=None): ''' Create a cache security group. CLI example:: salt myminion boto_elasticache.create_cache_security_group myelasticachesg 'My Cache Security Group' ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) created = conn.create_cache_security_group(name, description) if created: log.info('Created cache security group %s.', name) return True else: msg = 'Failed to create cache security group {0}.'.format(name) log.error(msg) return False def delete_cache_security_group(name, region=None, key=None, keyid=None, profile=None): ''' Delete a cache security group. CLI example:: salt myminion boto_elasticache.delete_cache_security_group myelasticachesg 'My Cache Security Group' ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) deleted = conn.delete_cache_security_group(name) if deleted: log.info('Deleted cache security group %s.', name) return True else: msg = 'Failed to delete cache security group {0}.'.format(name) log.error(msg) return False def authorize_cache_security_group_ingress(name, ec2_security_group_name, ec2_security_group_owner_id, region=None, key=None, keyid=None, profile=None): ''' Authorize network ingress from an ec2 security group to a cache security group. CLI example:: salt myminion boto_elasticache.authorize_cache_security_group_ingress myelasticachesg myec2sg 879879 ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: added = conn.authorize_cache_security_group_ingress( name, ec2_security_group_name, ec2_security_group_owner_id) if added: msg = 'Added {0} to cache security group {1}.' msg = msg.format(name, ec2_security_group_name) log.info(msg) return True else: msg = 'Failed to add {0} to cache security group {1}.' msg = msg.format(name, ec2_security_group_name) log.error(msg) return False except boto.exception.EC2ResponseError as e: log.debug(e) msg = 'Failed to add {0} to cache security group {1}.' msg = msg.format(name, ec2_security_group_name) log.error(msg) return False def revoke_cache_security_group_ingress(name, ec2_security_group_name, ec2_security_group_owner_id, region=None, key=None, keyid=None, profile=None): ''' Revoke network ingress from an ec2 security group to a cache security group. CLI example:: salt myminion boto_elasticache.revoke_cache_security_group_ingress myelasticachesg myec2sg 879879 ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: removed = conn.revoke_cache_security_group_ingress( name, ec2_security_group_name, ec2_security_group_owner_id) if removed: msg = 'Removed {0} from cache security group {1}.' msg = msg.format(name, ec2_security_group_name) log.info(msg) return True else: msg = 'Failed to remove {0} from cache security group {1}.' msg = msg.format(name, ec2_security_group_name) log.error(msg) return False except boto.exception.EC2ResponseError as e: log.debug(e) msg = 'Failed to remove {0} from cache security group {1}.' msg = msg.format(name, ec2_security_group_name) log.error(msg) return False
saltstack/salt
salt/modules/boto_elasticache.py
describe_replication_group
python
def describe_replication_group(name, region=None, key=None, keyid=None, profile=None, parameter=None): ''' Get replication group information. CLI example:: salt myminion boto_elasticache.describe_replication_group mygroup ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return None try: cc = conn.describe_replication_groups(name) except boto.exception.BotoServerError as e: msg = 'Failed to get config for cache cluster {0}.'.format(name) log.error(msg) log.debug(e) return {} ret = odict.OrderedDict() cc = cc['DescribeReplicationGroupsResponse']['DescribeReplicationGroupsResult'] cc = cc['ReplicationGroups'][0] attrs = ['status', 'description', 'primary_endpoint', 'member_clusters', 'replication_group_id', 'pending_modified_values', 'primary_cluster_id', 'node_groups'] for key, val in six.iteritems(cc): _key = boto.utils.pythonize_name(key) if _key == 'status': if val: ret[_key] = val else: ret[_key] = None if _key == 'description': if val: ret[_key] = val else: ret[_key] = None if _key == 'replication_group_id': if val: ret[_key] = val else: ret[_key] = None if _key == 'member_clusters': if val: ret[_key] = val else: ret[_key] = None if _key == 'node_groups': if val: ret[_key] = val else: ret[_key] = None if _key == 'pending_modified_values': if val: ret[_key] = val else: ret[_key] = None return ret
Get replication group information. CLI example:: salt myminion boto_elasticache.describe_replication_group mygroup
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/boto_elasticache.py#L181-L241
[ "def iteritems(d, **kw):\n return d.iteritems(**kw)\n" ]
# -*- coding: utf-8 -*- ''' Connection module for Amazon Elasticache .. versionadded:: 2014.7.0 :configuration: This module accepts explicit elasticache credentials but can also utilize IAM roles assigned to the instance through Instance Profiles. Dynamic credentials are then automatically obtained from AWS API and no further configuration is necessary. More Information available at: .. code-block:: text http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html If IAM roles are not used you need to specify them either in a pillar or in the minion's config file: .. code-block:: yaml elasticache.keyid: GKTADJGHEIQSXMKKRBJ08H elasticache.key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs A region may also be specified in the configuration: .. code-block:: yaml elasticache.region: us-east-1 If a region is not specified, the default is us-east-1. It's also possible to specify key, keyid and region via a profile, either as a passed in dict, or as a string to pull from pillars or minion config: .. code-block:: yaml myprofile: keyid: GKTADJGHEIQSXMKKRBJ08H key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs region: us-east-1 :depends: boto ''' # keep lint from choking on _get_conn and _cache_id #pylint: disable=E0602 from __future__ import absolute_import, print_function, unicode_literals # Import Python libs import logging import time # Import Salt libs from salt.ext import six from salt.exceptions import SaltInvocationError import salt.utils.odict as odict import salt.utils.versions log = logging.getLogger(__name__) # Import third party libs try: # pylint: disable=unused-import import boto import boto.elasticache # pylint: enable=unused-import import boto.utils logging.getLogger('boto').setLevel(logging.CRITICAL) HAS_BOTO = True except ImportError: HAS_BOTO = False def __virtual__(): ''' Only load if boto libraries exist. ''' has_boto_reqs = salt.utils.versions.check_boto_reqs( check_boto3=False ) if has_boto_reqs is True: __utils__['boto.assign_funcs'](__name__, 'elasticache', pack=__salt__) return has_boto_reqs def exists(name, region=None, key=None, keyid=None, profile=None): ''' Check to see if a cache cluster exists. CLI example:: salt myminion boto_elasticache.exists myelasticache ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: conn.describe_cache_clusters(name) return True except boto.exception.BotoServerError as e: log.debug(e) return False def group_exists(name, region=None, key=None, keyid=None, profile=None): ''' Check to see if a replication group exists. CLI example:: salt myminion boto_elasticache.group_exists myelasticache ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: conn.describe_replication_groups(name) return True except boto.exception.BotoServerError as e: log.debug(e) return False def create_replication_group(name, primary_cluster_id, replication_group_description, wait=None, region=None, key=None, keyid=None, profile=None): ''' Create replication group. CLI example:: salt myminion boto_elasticache.create_replication_group myelasticache myprimarycluster description ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return None try: cc = conn.create_replication_group(name, primary_cluster_id, replication_group_description) if not wait: log.info('Created cache cluster %s.', name) return True while True: time.sleep(3) config = describe_replication_group(name, region, key, keyid, profile) if not config: return True if config['status'] == 'available': return True except boto.exception.BotoServerError as e: msg = 'Failed to create replication group {0}.'.format(name) log.error(msg) log.debug(e) return {} def delete_replication_group(name, region=None, key=None, keyid=None, profile=None): ''' Delete an ElastiCache replication group. CLI example:: salt myminion boto_elasticache.delete_replication_group my-replication-group \ region=us-east-1 ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return False try: conn.delete_replication_group(name) msg = 'Deleted ElastiCache replication group {0}.'.format(name) log.info(msg) return True except boto.exception.BotoServerError as e: log.debug(e) msg = 'Failed to delete ElastiCache replication group {0}'.format(name) log.error(msg) return False def get_config(name, region=None, key=None, keyid=None, profile=None): ''' Get the configuration for a cache cluster. CLI example:: salt myminion boto_elasticache.get_config myelasticache ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return None try: cc = conn.describe_cache_clusters(name, show_cache_node_info=True) except boto.exception.BotoServerError as e: msg = 'Failed to get config for cache cluster {0}.'.format(name) log.error(msg) log.debug(e) return {} cc = cc['DescribeCacheClustersResponse']['DescribeCacheClustersResult'] cc = cc['CacheClusters'][0] ret = odict.OrderedDict() attrs = ['engine', 'cache_parameter_group', 'cache_cluster_id', 'cache_security_groups', 'replication_group_id', 'auto_minor_version_upgrade', 'num_cache_nodes', 'preferred_availability_zone', 'security_groups', 'cache_subnet_group_name', 'engine_version', 'cache_node_type', 'notification_configuration', 'preferred_maintenance_window', 'configuration_endpoint', 'cache_cluster_status', 'cache_nodes'] for key, val in six.iteritems(cc): _key = boto.utils.pythonize_name(key) if _key not in attrs: continue if _key == 'cache_parameter_group': if val: ret[_key] = val['CacheParameterGroupName'] else: ret[_key] = None elif _key == 'cache_nodes': if val: ret[_key] = [k for k in val] else: ret[_key] = [] elif _key == 'cache_security_groups': if val: ret[_key] = [k['CacheSecurityGroupName'] for k in val] else: ret[_key] = [] elif _key == 'configuration_endpoint': if val: ret['port'] = val['Port'] ret['address'] = val['Address'] else: ret['port'] = None ret['address'] = None elif _key == 'notification_configuration': if val: ret['notification_topic_arn'] = val['TopicArn'] else: ret['notification_topic_arn'] = None else: ret[_key] = val return ret def get_node_host(name, region=None, key=None, keyid=None, profile=None): ''' Get hostname from cache node CLI example:: salt myminion boto_elasticache.get_node_host myelasticache ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return None try: cc = conn.describe_cache_clusters(name, show_cache_node_info=True) except boto.exception.BotoServerError as e: msg = 'Failed to get config for cache cluster {0}.'.format(name) log.error(msg) log.debug(e) return {} cc = cc['DescribeCacheClustersResponse']['DescribeCacheClustersResult'] host = cc['CacheClusters'][0]['CacheNodes'][0]['Endpoint']['Address'] return host def get_group_host(name, region=None, key=None, keyid=None, profile=None): ''' Get hostname from replication cache group CLI example:: salt myminion boto_elasticache.get_group_host myelasticachegroup ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return None try: cc = conn.describe_replication_groups(name) except boto.exception.BotoServerError as e: msg = 'Failed to get config for cache cluster {0}.'.format(name) log.error(msg) log.debug(e) return {} cc = cc['DescribeReplicationGroupsResponse']['DescribeReplicationGroupsResult'] cc = cc['ReplicationGroups'][0]['NodeGroups'][0]['PrimaryEndpoint'] host = cc['Address'] return host def get_all_cache_subnet_groups(name=None, region=None, key=None, keyid=None, profile=None): ''' Return a list of all cache subnet groups with details CLI example:: salt myminion boto_elasticache.get_all_subnet_groups region=us-east-1 ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: marker = '' groups = [] while marker is not None: ret = conn.describe_cache_subnet_groups(cache_subnet_group_name=name, marker=marker) trimmed = ret.get('DescribeCacheSubnetGroupsResponse', {}).get('DescribeCacheSubnetGroupsResult', {}) groups += trimmed.get('CacheSubnetGroups', []) marker = trimmed.get('Marker', None) if not groups: log.debug('No ElastiCache subnet groups found.') return groups except boto.exception.BotoServerError as e: log.error(e) return [] def list_cache_subnet_groups(name=None, region=None, key=None, keyid=None, profile=None): ''' Return a list of all cache subnet group names CLI example:: salt myminion boto_elasticache.list_subnet_groups region=us-east-1 ''' return [g['CacheSubnetGroupName'] for g in get_all_cache_subnet_groups(name, region, key, keyid, profile)] def subnet_group_exists(name, tags=None, region=None, key=None, keyid=None, profile=None): ''' Check to see if an ElastiCache subnet group exists. CLI example:: salt myminion boto_elasticache.subnet_group_exists my-param-group \ region=us-east-1 ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return False try: ec = conn.describe_cache_subnet_groups(cache_subnet_group_name=name) if not ec: msg = ('ElastiCache subnet group does not exist in region {0}'.format(region)) log.debug(msg) return False return True except boto.exception.BotoServerError as e: log.debug(e) return False def create_subnet_group(name, description, subnet_ids=None, subnet_names=None, tags=None, region=None, key=None, keyid=None, profile=None): ''' Create an ElastiCache subnet group CLI example to create an ElastiCache subnet group:: salt myminion boto_elasticache.create_subnet_group my-subnet-group \ "group description" subnet_ids='[subnet-12345678, subnet-87654321]' \ region=us-east-1 ''' if not _exactly_one((subnet_ids, subnet_names)): raise SaltInvocationError("Exactly one of either 'subnet_ids' or " "'subnet_names' must be provided.") conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return False if subnet_group_exists(name, tags, region, key, keyid, profile): return True if subnet_names: subnet_ids = [] for n in subnet_names: r = __salt__['boto_vpc.get_resource_id']('subnet', n, region=region, key=key, keyid=keyid, profile=profile) if 'id' not in r: log.error('Couldn\'t resolve subnet name %s to an ID.', subnet_name) return False subnet_ids += [r['id']] try: ec = conn.create_cache_subnet_group(name, description, subnet_ids) if not ec: msg = 'Failed to create ElastiCache subnet group {0}'.format(name) log.error(msg) return False log.info('Created ElastiCache subnet group %s', name) return True except boto.exception.BotoServerError as e: log.debug(e) msg = 'Failed to create ElastiCache subnet group {0}'.format(name) log.error(msg) return False def get_cache_subnet_group(name, region=None, key=None, keyid=None, profile=None): ''' Get information about a cache subnet group. CLI example:: salt myminion boto_elasticache.get_cache_subnet_group mycache_subnet_group ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: csg = conn.describe_cache_subnet_groups(name) csg = csg['DescribeCacheSubnetGroupsResponse'] csg = csg['DescribeCacheSubnetGroupsResult']['CacheSubnetGroups'][0] except boto.exception.BotoServerError as e: msg = 'Failed to get cache subnet group {0}.'.format(name) log.error(msg) log.debug(e) return False except (IndexError, TypeError, KeyError): msg = 'Failed to get cache subnet group {0} (2).'.format(name) log.error(msg) return False ret = {} for key, val in six.iteritems(csg): if key == 'CacheSubnetGroupName': ret['cache_subnet_group_name'] = val elif key == 'CacheSubnetGroupDescription': ret['cache_subnet_group_description'] = val elif key == 'VpcId': ret['vpc_id'] = val elif key == 'Subnets': ret['subnets'] = [] for subnet in val: _subnet = {} _subnet['subnet_id'] = subnet['SubnetIdentifier'] _az = subnet['SubnetAvailabilityZone']['Name'] _subnet['subnet_availability_zone'] = _az ret['subnets'].append(_subnet) else: ret[key] = val return ret def delete_subnet_group(name, region=None, key=None, keyid=None, profile=None): ''' Delete an ElastiCache subnet group. CLI example:: salt myminion boto_elasticache.delete_subnet_group my-subnet-group \ region=us-east-1 ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return False try: conn.delete_cache_subnet_group(name) msg = 'Deleted ElastiCache subnet group {0}.'.format(name) log.info(msg) return True except boto.exception.BotoServerError as e: log.debug(e) msg = 'Failed to delete ElastiCache subnet group {0}'.format(name) log.error(msg) return False def create(name, num_cache_nodes=None, engine=None, cache_node_type=None, replication_group_id=None, engine_version=None, cache_parameter_group_name=None, cache_subnet_group_name=None, cache_security_group_names=None, security_group_ids=None, snapshot_arns=None, preferred_availability_zone=None, preferred_maintenance_window=None, port=None, notification_topic_arn=None, auto_minor_version_upgrade=None, wait=None, region=None, key=None, keyid=None, profile=None): ''' Create a cache cluster. CLI example:: salt myminion boto_elasticache.create myelasticache 1 redis cache.t1.micro cache_security_group_names='["myelasticachesg"]' ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: conn.create_cache_cluster( name, num_cache_nodes, cache_node_type, engine, replication_group_id, engine_version, cache_parameter_group_name, cache_subnet_group_name, cache_security_group_names, security_group_ids, snapshot_arns, preferred_availability_zone, preferred_maintenance_window, port, notification_topic_arn, auto_minor_version_upgrade) if not wait: log.info('Created cache cluster %s.', name) return True while True: time.sleep(3) config = get_config(name, region, key, keyid, profile) if not config: return True if config['cache_cluster_status'] == 'available': return True log.info('Created cache cluster %s.', name) except boto.exception.BotoServerError as e: msg = 'Failed to create cache cluster {0}.'.format(name) log.error(msg) log.debug(e) return False def delete(name, wait=False, region=None, key=None, keyid=None, profile=None): ''' Delete a cache cluster. CLI example:: salt myminion boto_elasticache.delete myelasticache ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: conn.delete_cache_cluster(name) if not wait: log.info('Deleted cache cluster %s.', name) return True while True: config = get_config(name, region, key, keyid, profile) if not config: return True if config['cache_cluster_status'] == 'deleting': return True time.sleep(2) log.info('Deleted cache cluster %s.', name) return True except boto.exception.BotoServerError as e: msg = 'Failed to delete cache cluster {0}.'.format(name) log.error(msg) log.debug(e) return False def create_cache_security_group(name, description, region=None, key=None, keyid=None, profile=None): ''' Create a cache security group. CLI example:: salt myminion boto_elasticache.create_cache_security_group myelasticachesg 'My Cache Security Group' ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) created = conn.create_cache_security_group(name, description) if created: log.info('Created cache security group %s.', name) return True else: msg = 'Failed to create cache security group {0}.'.format(name) log.error(msg) return False def delete_cache_security_group(name, region=None, key=None, keyid=None, profile=None): ''' Delete a cache security group. CLI example:: salt myminion boto_elasticache.delete_cache_security_group myelasticachesg 'My Cache Security Group' ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) deleted = conn.delete_cache_security_group(name) if deleted: log.info('Deleted cache security group %s.', name) return True else: msg = 'Failed to delete cache security group {0}.'.format(name) log.error(msg) return False def authorize_cache_security_group_ingress(name, ec2_security_group_name, ec2_security_group_owner_id, region=None, key=None, keyid=None, profile=None): ''' Authorize network ingress from an ec2 security group to a cache security group. CLI example:: salt myminion boto_elasticache.authorize_cache_security_group_ingress myelasticachesg myec2sg 879879 ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: added = conn.authorize_cache_security_group_ingress( name, ec2_security_group_name, ec2_security_group_owner_id) if added: msg = 'Added {0} to cache security group {1}.' msg = msg.format(name, ec2_security_group_name) log.info(msg) return True else: msg = 'Failed to add {0} to cache security group {1}.' msg = msg.format(name, ec2_security_group_name) log.error(msg) return False except boto.exception.EC2ResponseError as e: log.debug(e) msg = 'Failed to add {0} to cache security group {1}.' msg = msg.format(name, ec2_security_group_name) log.error(msg) return False def revoke_cache_security_group_ingress(name, ec2_security_group_name, ec2_security_group_owner_id, region=None, key=None, keyid=None, profile=None): ''' Revoke network ingress from an ec2 security group to a cache security group. CLI example:: salt myminion boto_elasticache.revoke_cache_security_group_ingress myelasticachesg myec2sg 879879 ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: removed = conn.revoke_cache_security_group_ingress( name, ec2_security_group_name, ec2_security_group_owner_id) if removed: msg = 'Removed {0} from cache security group {1}.' msg = msg.format(name, ec2_security_group_name) log.info(msg) return True else: msg = 'Failed to remove {0} from cache security group {1}.' msg = msg.format(name, ec2_security_group_name) log.error(msg) return False except boto.exception.EC2ResponseError as e: log.debug(e) msg = 'Failed to remove {0} from cache security group {1}.' msg = msg.format(name, ec2_security_group_name) log.error(msg) return False
saltstack/salt
salt/modules/boto_elasticache.py
get_config
python
def get_config(name, region=None, key=None, keyid=None, profile=None): ''' Get the configuration for a cache cluster. CLI example:: salt myminion boto_elasticache.get_config myelasticache ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return None try: cc = conn.describe_cache_clusters(name, show_cache_node_info=True) except boto.exception.BotoServerError as e: msg = 'Failed to get config for cache cluster {0}.'.format(name) log.error(msg) log.debug(e) return {} cc = cc['DescribeCacheClustersResponse']['DescribeCacheClustersResult'] cc = cc['CacheClusters'][0] ret = odict.OrderedDict() attrs = ['engine', 'cache_parameter_group', 'cache_cluster_id', 'cache_security_groups', 'replication_group_id', 'auto_minor_version_upgrade', 'num_cache_nodes', 'preferred_availability_zone', 'security_groups', 'cache_subnet_group_name', 'engine_version', 'cache_node_type', 'notification_configuration', 'preferred_maintenance_window', 'configuration_endpoint', 'cache_cluster_status', 'cache_nodes'] for key, val in six.iteritems(cc): _key = boto.utils.pythonize_name(key) if _key not in attrs: continue if _key == 'cache_parameter_group': if val: ret[_key] = val['CacheParameterGroupName'] else: ret[_key] = None elif _key == 'cache_nodes': if val: ret[_key] = [k for k in val] else: ret[_key] = [] elif _key == 'cache_security_groups': if val: ret[_key] = [k['CacheSecurityGroupName'] for k in val] else: ret[_key] = [] elif _key == 'configuration_endpoint': if val: ret['port'] = val['Port'] ret['address'] = val['Address'] else: ret['port'] = None ret['address'] = None elif _key == 'notification_configuration': if val: ret['notification_topic_arn'] = val['TopicArn'] else: ret['notification_topic_arn'] = None else: ret[_key] = val return ret
Get the configuration for a cache cluster. CLI example:: salt myminion boto_elasticache.get_config myelasticache
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/boto_elasticache.py#L244-L307
[ "def iteritems(d, **kw):\n return d.iteritems(**kw)\n" ]
# -*- coding: utf-8 -*- ''' Connection module for Amazon Elasticache .. versionadded:: 2014.7.0 :configuration: This module accepts explicit elasticache credentials but can also utilize IAM roles assigned to the instance through Instance Profiles. Dynamic credentials are then automatically obtained from AWS API and no further configuration is necessary. More Information available at: .. code-block:: text http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html If IAM roles are not used you need to specify them either in a pillar or in the minion's config file: .. code-block:: yaml elasticache.keyid: GKTADJGHEIQSXMKKRBJ08H elasticache.key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs A region may also be specified in the configuration: .. code-block:: yaml elasticache.region: us-east-1 If a region is not specified, the default is us-east-1. It's also possible to specify key, keyid and region via a profile, either as a passed in dict, or as a string to pull from pillars or minion config: .. code-block:: yaml myprofile: keyid: GKTADJGHEIQSXMKKRBJ08H key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs region: us-east-1 :depends: boto ''' # keep lint from choking on _get_conn and _cache_id #pylint: disable=E0602 from __future__ import absolute_import, print_function, unicode_literals # Import Python libs import logging import time # Import Salt libs from salt.ext import six from salt.exceptions import SaltInvocationError import salt.utils.odict as odict import salt.utils.versions log = logging.getLogger(__name__) # Import third party libs try: # pylint: disable=unused-import import boto import boto.elasticache # pylint: enable=unused-import import boto.utils logging.getLogger('boto').setLevel(logging.CRITICAL) HAS_BOTO = True except ImportError: HAS_BOTO = False def __virtual__(): ''' Only load if boto libraries exist. ''' has_boto_reqs = salt.utils.versions.check_boto_reqs( check_boto3=False ) if has_boto_reqs is True: __utils__['boto.assign_funcs'](__name__, 'elasticache', pack=__salt__) return has_boto_reqs def exists(name, region=None, key=None, keyid=None, profile=None): ''' Check to see if a cache cluster exists. CLI example:: salt myminion boto_elasticache.exists myelasticache ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: conn.describe_cache_clusters(name) return True except boto.exception.BotoServerError as e: log.debug(e) return False def group_exists(name, region=None, key=None, keyid=None, profile=None): ''' Check to see if a replication group exists. CLI example:: salt myminion boto_elasticache.group_exists myelasticache ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: conn.describe_replication_groups(name) return True except boto.exception.BotoServerError as e: log.debug(e) return False def create_replication_group(name, primary_cluster_id, replication_group_description, wait=None, region=None, key=None, keyid=None, profile=None): ''' Create replication group. CLI example:: salt myminion boto_elasticache.create_replication_group myelasticache myprimarycluster description ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return None try: cc = conn.create_replication_group(name, primary_cluster_id, replication_group_description) if not wait: log.info('Created cache cluster %s.', name) return True while True: time.sleep(3) config = describe_replication_group(name, region, key, keyid, profile) if not config: return True if config['status'] == 'available': return True except boto.exception.BotoServerError as e: msg = 'Failed to create replication group {0}.'.format(name) log.error(msg) log.debug(e) return {} def delete_replication_group(name, region=None, key=None, keyid=None, profile=None): ''' Delete an ElastiCache replication group. CLI example:: salt myminion boto_elasticache.delete_replication_group my-replication-group \ region=us-east-1 ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return False try: conn.delete_replication_group(name) msg = 'Deleted ElastiCache replication group {0}.'.format(name) log.info(msg) return True except boto.exception.BotoServerError as e: log.debug(e) msg = 'Failed to delete ElastiCache replication group {0}'.format(name) log.error(msg) return False def describe_replication_group(name, region=None, key=None, keyid=None, profile=None, parameter=None): ''' Get replication group information. CLI example:: salt myminion boto_elasticache.describe_replication_group mygroup ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return None try: cc = conn.describe_replication_groups(name) except boto.exception.BotoServerError as e: msg = 'Failed to get config for cache cluster {0}.'.format(name) log.error(msg) log.debug(e) return {} ret = odict.OrderedDict() cc = cc['DescribeReplicationGroupsResponse']['DescribeReplicationGroupsResult'] cc = cc['ReplicationGroups'][0] attrs = ['status', 'description', 'primary_endpoint', 'member_clusters', 'replication_group_id', 'pending_modified_values', 'primary_cluster_id', 'node_groups'] for key, val in six.iteritems(cc): _key = boto.utils.pythonize_name(key) if _key == 'status': if val: ret[_key] = val else: ret[_key] = None if _key == 'description': if val: ret[_key] = val else: ret[_key] = None if _key == 'replication_group_id': if val: ret[_key] = val else: ret[_key] = None if _key == 'member_clusters': if val: ret[_key] = val else: ret[_key] = None if _key == 'node_groups': if val: ret[_key] = val else: ret[_key] = None if _key == 'pending_modified_values': if val: ret[_key] = val else: ret[_key] = None return ret def get_node_host(name, region=None, key=None, keyid=None, profile=None): ''' Get hostname from cache node CLI example:: salt myminion boto_elasticache.get_node_host myelasticache ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return None try: cc = conn.describe_cache_clusters(name, show_cache_node_info=True) except boto.exception.BotoServerError as e: msg = 'Failed to get config for cache cluster {0}.'.format(name) log.error(msg) log.debug(e) return {} cc = cc['DescribeCacheClustersResponse']['DescribeCacheClustersResult'] host = cc['CacheClusters'][0]['CacheNodes'][0]['Endpoint']['Address'] return host def get_group_host(name, region=None, key=None, keyid=None, profile=None): ''' Get hostname from replication cache group CLI example:: salt myminion boto_elasticache.get_group_host myelasticachegroup ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return None try: cc = conn.describe_replication_groups(name) except boto.exception.BotoServerError as e: msg = 'Failed to get config for cache cluster {0}.'.format(name) log.error(msg) log.debug(e) return {} cc = cc['DescribeReplicationGroupsResponse']['DescribeReplicationGroupsResult'] cc = cc['ReplicationGroups'][0]['NodeGroups'][0]['PrimaryEndpoint'] host = cc['Address'] return host def get_all_cache_subnet_groups(name=None, region=None, key=None, keyid=None, profile=None): ''' Return a list of all cache subnet groups with details CLI example:: salt myminion boto_elasticache.get_all_subnet_groups region=us-east-1 ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: marker = '' groups = [] while marker is not None: ret = conn.describe_cache_subnet_groups(cache_subnet_group_name=name, marker=marker) trimmed = ret.get('DescribeCacheSubnetGroupsResponse', {}).get('DescribeCacheSubnetGroupsResult', {}) groups += trimmed.get('CacheSubnetGroups', []) marker = trimmed.get('Marker', None) if not groups: log.debug('No ElastiCache subnet groups found.') return groups except boto.exception.BotoServerError as e: log.error(e) return [] def list_cache_subnet_groups(name=None, region=None, key=None, keyid=None, profile=None): ''' Return a list of all cache subnet group names CLI example:: salt myminion boto_elasticache.list_subnet_groups region=us-east-1 ''' return [g['CacheSubnetGroupName'] for g in get_all_cache_subnet_groups(name, region, key, keyid, profile)] def subnet_group_exists(name, tags=None, region=None, key=None, keyid=None, profile=None): ''' Check to see if an ElastiCache subnet group exists. CLI example:: salt myminion boto_elasticache.subnet_group_exists my-param-group \ region=us-east-1 ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return False try: ec = conn.describe_cache_subnet_groups(cache_subnet_group_name=name) if not ec: msg = ('ElastiCache subnet group does not exist in region {0}'.format(region)) log.debug(msg) return False return True except boto.exception.BotoServerError as e: log.debug(e) return False def create_subnet_group(name, description, subnet_ids=None, subnet_names=None, tags=None, region=None, key=None, keyid=None, profile=None): ''' Create an ElastiCache subnet group CLI example to create an ElastiCache subnet group:: salt myminion boto_elasticache.create_subnet_group my-subnet-group \ "group description" subnet_ids='[subnet-12345678, subnet-87654321]' \ region=us-east-1 ''' if not _exactly_one((subnet_ids, subnet_names)): raise SaltInvocationError("Exactly one of either 'subnet_ids' or " "'subnet_names' must be provided.") conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return False if subnet_group_exists(name, tags, region, key, keyid, profile): return True if subnet_names: subnet_ids = [] for n in subnet_names: r = __salt__['boto_vpc.get_resource_id']('subnet', n, region=region, key=key, keyid=keyid, profile=profile) if 'id' not in r: log.error('Couldn\'t resolve subnet name %s to an ID.', subnet_name) return False subnet_ids += [r['id']] try: ec = conn.create_cache_subnet_group(name, description, subnet_ids) if not ec: msg = 'Failed to create ElastiCache subnet group {0}'.format(name) log.error(msg) return False log.info('Created ElastiCache subnet group %s', name) return True except boto.exception.BotoServerError as e: log.debug(e) msg = 'Failed to create ElastiCache subnet group {0}'.format(name) log.error(msg) return False def get_cache_subnet_group(name, region=None, key=None, keyid=None, profile=None): ''' Get information about a cache subnet group. CLI example:: salt myminion boto_elasticache.get_cache_subnet_group mycache_subnet_group ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: csg = conn.describe_cache_subnet_groups(name) csg = csg['DescribeCacheSubnetGroupsResponse'] csg = csg['DescribeCacheSubnetGroupsResult']['CacheSubnetGroups'][0] except boto.exception.BotoServerError as e: msg = 'Failed to get cache subnet group {0}.'.format(name) log.error(msg) log.debug(e) return False except (IndexError, TypeError, KeyError): msg = 'Failed to get cache subnet group {0} (2).'.format(name) log.error(msg) return False ret = {} for key, val in six.iteritems(csg): if key == 'CacheSubnetGroupName': ret['cache_subnet_group_name'] = val elif key == 'CacheSubnetGroupDescription': ret['cache_subnet_group_description'] = val elif key == 'VpcId': ret['vpc_id'] = val elif key == 'Subnets': ret['subnets'] = [] for subnet in val: _subnet = {} _subnet['subnet_id'] = subnet['SubnetIdentifier'] _az = subnet['SubnetAvailabilityZone']['Name'] _subnet['subnet_availability_zone'] = _az ret['subnets'].append(_subnet) else: ret[key] = val return ret def delete_subnet_group(name, region=None, key=None, keyid=None, profile=None): ''' Delete an ElastiCache subnet group. CLI example:: salt myminion boto_elasticache.delete_subnet_group my-subnet-group \ region=us-east-1 ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return False try: conn.delete_cache_subnet_group(name) msg = 'Deleted ElastiCache subnet group {0}.'.format(name) log.info(msg) return True except boto.exception.BotoServerError as e: log.debug(e) msg = 'Failed to delete ElastiCache subnet group {0}'.format(name) log.error(msg) return False def create(name, num_cache_nodes=None, engine=None, cache_node_type=None, replication_group_id=None, engine_version=None, cache_parameter_group_name=None, cache_subnet_group_name=None, cache_security_group_names=None, security_group_ids=None, snapshot_arns=None, preferred_availability_zone=None, preferred_maintenance_window=None, port=None, notification_topic_arn=None, auto_minor_version_upgrade=None, wait=None, region=None, key=None, keyid=None, profile=None): ''' Create a cache cluster. CLI example:: salt myminion boto_elasticache.create myelasticache 1 redis cache.t1.micro cache_security_group_names='["myelasticachesg"]' ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: conn.create_cache_cluster( name, num_cache_nodes, cache_node_type, engine, replication_group_id, engine_version, cache_parameter_group_name, cache_subnet_group_name, cache_security_group_names, security_group_ids, snapshot_arns, preferred_availability_zone, preferred_maintenance_window, port, notification_topic_arn, auto_minor_version_upgrade) if not wait: log.info('Created cache cluster %s.', name) return True while True: time.sleep(3) config = get_config(name, region, key, keyid, profile) if not config: return True if config['cache_cluster_status'] == 'available': return True log.info('Created cache cluster %s.', name) except boto.exception.BotoServerError as e: msg = 'Failed to create cache cluster {0}.'.format(name) log.error(msg) log.debug(e) return False def delete(name, wait=False, region=None, key=None, keyid=None, profile=None): ''' Delete a cache cluster. CLI example:: salt myminion boto_elasticache.delete myelasticache ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: conn.delete_cache_cluster(name) if not wait: log.info('Deleted cache cluster %s.', name) return True while True: config = get_config(name, region, key, keyid, profile) if not config: return True if config['cache_cluster_status'] == 'deleting': return True time.sleep(2) log.info('Deleted cache cluster %s.', name) return True except boto.exception.BotoServerError as e: msg = 'Failed to delete cache cluster {0}.'.format(name) log.error(msg) log.debug(e) return False def create_cache_security_group(name, description, region=None, key=None, keyid=None, profile=None): ''' Create a cache security group. CLI example:: salt myminion boto_elasticache.create_cache_security_group myelasticachesg 'My Cache Security Group' ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) created = conn.create_cache_security_group(name, description) if created: log.info('Created cache security group %s.', name) return True else: msg = 'Failed to create cache security group {0}.'.format(name) log.error(msg) return False def delete_cache_security_group(name, region=None, key=None, keyid=None, profile=None): ''' Delete a cache security group. CLI example:: salt myminion boto_elasticache.delete_cache_security_group myelasticachesg 'My Cache Security Group' ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) deleted = conn.delete_cache_security_group(name) if deleted: log.info('Deleted cache security group %s.', name) return True else: msg = 'Failed to delete cache security group {0}.'.format(name) log.error(msg) return False def authorize_cache_security_group_ingress(name, ec2_security_group_name, ec2_security_group_owner_id, region=None, key=None, keyid=None, profile=None): ''' Authorize network ingress from an ec2 security group to a cache security group. CLI example:: salt myminion boto_elasticache.authorize_cache_security_group_ingress myelasticachesg myec2sg 879879 ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: added = conn.authorize_cache_security_group_ingress( name, ec2_security_group_name, ec2_security_group_owner_id) if added: msg = 'Added {0} to cache security group {1}.' msg = msg.format(name, ec2_security_group_name) log.info(msg) return True else: msg = 'Failed to add {0} to cache security group {1}.' msg = msg.format(name, ec2_security_group_name) log.error(msg) return False except boto.exception.EC2ResponseError as e: log.debug(e) msg = 'Failed to add {0} to cache security group {1}.' msg = msg.format(name, ec2_security_group_name) log.error(msg) return False def revoke_cache_security_group_ingress(name, ec2_security_group_name, ec2_security_group_owner_id, region=None, key=None, keyid=None, profile=None): ''' Revoke network ingress from an ec2 security group to a cache security group. CLI example:: salt myminion boto_elasticache.revoke_cache_security_group_ingress myelasticachesg myec2sg 879879 ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: removed = conn.revoke_cache_security_group_ingress( name, ec2_security_group_name, ec2_security_group_owner_id) if removed: msg = 'Removed {0} from cache security group {1}.' msg = msg.format(name, ec2_security_group_name) log.info(msg) return True else: msg = 'Failed to remove {0} from cache security group {1}.' msg = msg.format(name, ec2_security_group_name) log.error(msg) return False except boto.exception.EC2ResponseError as e: log.debug(e) msg = 'Failed to remove {0} from cache security group {1}.' msg = msg.format(name, ec2_security_group_name) log.error(msg) return False
saltstack/salt
salt/modules/boto_elasticache.py
get_node_host
python
def get_node_host(name, region=None, key=None, keyid=None, profile=None): ''' Get hostname from cache node CLI example:: salt myminion boto_elasticache.get_node_host myelasticache ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return None try: cc = conn.describe_cache_clusters(name, show_cache_node_info=True) except boto.exception.BotoServerError as e: msg = 'Failed to get config for cache cluster {0}.'.format(name) log.error(msg) log.debug(e) return {} cc = cc['DescribeCacheClustersResponse']['DescribeCacheClustersResult'] host = cc['CacheClusters'][0]['CacheNodes'][0]['Endpoint']['Address'] return host
Get hostname from cache node CLI example:: salt myminion boto_elasticache.get_node_host myelasticache
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/boto_elasticache.py#L310-L333
null
# -*- coding: utf-8 -*- ''' Connection module for Amazon Elasticache .. versionadded:: 2014.7.0 :configuration: This module accepts explicit elasticache credentials but can also utilize IAM roles assigned to the instance through Instance Profiles. Dynamic credentials are then automatically obtained from AWS API and no further configuration is necessary. More Information available at: .. code-block:: text http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html If IAM roles are not used you need to specify them either in a pillar or in the minion's config file: .. code-block:: yaml elasticache.keyid: GKTADJGHEIQSXMKKRBJ08H elasticache.key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs A region may also be specified in the configuration: .. code-block:: yaml elasticache.region: us-east-1 If a region is not specified, the default is us-east-1. It's also possible to specify key, keyid and region via a profile, either as a passed in dict, or as a string to pull from pillars or minion config: .. code-block:: yaml myprofile: keyid: GKTADJGHEIQSXMKKRBJ08H key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs region: us-east-1 :depends: boto ''' # keep lint from choking on _get_conn and _cache_id #pylint: disable=E0602 from __future__ import absolute_import, print_function, unicode_literals # Import Python libs import logging import time # Import Salt libs from salt.ext import six from salt.exceptions import SaltInvocationError import salt.utils.odict as odict import salt.utils.versions log = logging.getLogger(__name__) # Import third party libs try: # pylint: disable=unused-import import boto import boto.elasticache # pylint: enable=unused-import import boto.utils logging.getLogger('boto').setLevel(logging.CRITICAL) HAS_BOTO = True except ImportError: HAS_BOTO = False def __virtual__(): ''' Only load if boto libraries exist. ''' has_boto_reqs = salt.utils.versions.check_boto_reqs( check_boto3=False ) if has_boto_reqs is True: __utils__['boto.assign_funcs'](__name__, 'elasticache', pack=__salt__) return has_boto_reqs def exists(name, region=None, key=None, keyid=None, profile=None): ''' Check to see if a cache cluster exists. CLI example:: salt myminion boto_elasticache.exists myelasticache ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: conn.describe_cache_clusters(name) return True except boto.exception.BotoServerError as e: log.debug(e) return False def group_exists(name, region=None, key=None, keyid=None, profile=None): ''' Check to see if a replication group exists. CLI example:: salt myminion boto_elasticache.group_exists myelasticache ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: conn.describe_replication_groups(name) return True except boto.exception.BotoServerError as e: log.debug(e) return False def create_replication_group(name, primary_cluster_id, replication_group_description, wait=None, region=None, key=None, keyid=None, profile=None): ''' Create replication group. CLI example:: salt myminion boto_elasticache.create_replication_group myelasticache myprimarycluster description ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return None try: cc = conn.create_replication_group(name, primary_cluster_id, replication_group_description) if not wait: log.info('Created cache cluster %s.', name) return True while True: time.sleep(3) config = describe_replication_group(name, region, key, keyid, profile) if not config: return True if config['status'] == 'available': return True except boto.exception.BotoServerError as e: msg = 'Failed to create replication group {0}.'.format(name) log.error(msg) log.debug(e) return {} def delete_replication_group(name, region=None, key=None, keyid=None, profile=None): ''' Delete an ElastiCache replication group. CLI example:: salt myminion boto_elasticache.delete_replication_group my-replication-group \ region=us-east-1 ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return False try: conn.delete_replication_group(name) msg = 'Deleted ElastiCache replication group {0}.'.format(name) log.info(msg) return True except boto.exception.BotoServerError as e: log.debug(e) msg = 'Failed to delete ElastiCache replication group {0}'.format(name) log.error(msg) return False def describe_replication_group(name, region=None, key=None, keyid=None, profile=None, parameter=None): ''' Get replication group information. CLI example:: salt myminion boto_elasticache.describe_replication_group mygroup ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return None try: cc = conn.describe_replication_groups(name) except boto.exception.BotoServerError as e: msg = 'Failed to get config for cache cluster {0}.'.format(name) log.error(msg) log.debug(e) return {} ret = odict.OrderedDict() cc = cc['DescribeReplicationGroupsResponse']['DescribeReplicationGroupsResult'] cc = cc['ReplicationGroups'][0] attrs = ['status', 'description', 'primary_endpoint', 'member_clusters', 'replication_group_id', 'pending_modified_values', 'primary_cluster_id', 'node_groups'] for key, val in six.iteritems(cc): _key = boto.utils.pythonize_name(key) if _key == 'status': if val: ret[_key] = val else: ret[_key] = None if _key == 'description': if val: ret[_key] = val else: ret[_key] = None if _key == 'replication_group_id': if val: ret[_key] = val else: ret[_key] = None if _key == 'member_clusters': if val: ret[_key] = val else: ret[_key] = None if _key == 'node_groups': if val: ret[_key] = val else: ret[_key] = None if _key == 'pending_modified_values': if val: ret[_key] = val else: ret[_key] = None return ret def get_config(name, region=None, key=None, keyid=None, profile=None): ''' Get the configuration for a cache cluster. CLI example:: salt myminion boto_elasticache.get_config myelasticache ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return None try: cc = conn.describe_cache_clusters(name, show_cache_node_info=True) except boto.exception.BotoServerError as e: msg = 'Failed to get config for cache cluster {0}.'.format(name) log.error(msg) log.debug(e) return {} cc = cc['DescribeCacheClustersResponse']['DescribeCacheClustersResult'] cc = cc['CacheClusters'][0] ret = odict.OrderedDict() attrs = ['engine', 'cache_parameter_group', 'cache_cluster_id', 'cache_security_groups', 'replication_group_id', 'auto_minor_version_upgrade', 'num_cache_nodes', 'preferred_availability_zone', 'security_groups', 'cache_subnet_group_name', 'engine_version', 'cache_node_type', 'notification_configuration', 'preferred_maintenance_window', 'configuration_endpoint', 'cache_cluster_status', 'cache_nodes'] for key, val in six.iteritems(cc): _key = boto.utils.pythonize_name(key) if _key not in attrs: continue if _key == 'cache_parameter_group': if val: ret[_key] = val['CacheParameterGroupName'] else: ret[_key] = None elif _key == 'cache_nodes': if val: ret[_key] = [k for k in val] else: ret[_key] = [] elif _key == 'cache_security_groups': if val: ret[_key] = [k['CacheSecurityGroupName'] for k in val] else: ret[_key] = [] elif _key == 'configuration_endpoint': if val: ret['port'] = val['Port'] ret['address'] = val['Address'] else: ret['port'] = None ret['address'] = None elif _key == 'notification_configuration': if val: ret['notification_topic_arn'] = val['TopicArn'] else: ret['notification_topic_arn'] = None else: ret[_key] = val return ret def get_group_host(name, region=None, key=None, keyid=None, profile=None): ''' Get hostname from replication cache group CLI example:: salt myminion boto_elasticache.get_group_host myelasticachegroup ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return None try: cc = conn.describe_replication_groups(name) except boto.exception.BotoServerError as e: msg = 'Failed to get config for cache cluster {0}.'.format(name) log.error(msg) log.debug(e) return {} cc = cc['DescribeReplicationGroupsResponse']['DescribeReplicationGroupsResult'] cc = cc['ReplicationGroups'][0]['NodeGroups'][0]['PrimaryEndpoint'] host = cc['Address'] return host def get_all_cache_subnet_groups(name=None, region=None, key=None, keyid=None, profile=None): ''' Return a list of all cache subnet groups with details CLI example:: salt myminion boto_elasticache.get_all_subnet_groups region=us-east-1 ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: marker = '' groups = [] while marker is not None: ret = conn.describe_cache_subnet_groups(cache_subnet_group_name=name, marker=marker) trimmed = ret.get('DescribeCacheSubnetGroupsResponse', {}).get('DescribeCacheSubnetGroupsResult', {}) groups += trimmed.get('CacheSubnetGroups', []) marker = trimmed.get('Marker', None) if not groups: log.debug('No ElastiCache subnet groups found.') return groups except boto.exception.BotoServerError as e: log.error(e) return [] def list_cache_subnet_groups(name=None, region=None, key=None, keyid=None, profile=None): ''' Return a list of all cache subnet group names CLI example:: salt myminion boto_elasticache.list_subnet_groups region=us-east-1 ''' return [g['CacheSubnetGroupName'] for g in get_all_cache_subnet_groups(name, region, key, keyid, profile)] def subnet_group_exists(name, tags=None, region=None, key=None, keyid=None, profile=None): ''' Check to see if an ElastiCache subnet group exists. CLI example:: salt myminion boto_elasticache.subnet_group_exists my-param-group \ region=us-east-1 ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return False try: ec = conn.describe_cache_subnet_groups(cache_subnet_group_name=name) if not ec: msg = ('ElastiCache subnet group does not exist in region {0}'.format(region)) log.debug(msg) return False return True except boto.exception.BotoServerError as e: log.debug(e) return False def create_subnet_group(name, description, subnet_ids=None, subnet_names=None, tags=None, region=None, key=None, keyid=None, profile=None): ''' Create an ElastiCache subnet group CLI example to create an ElastiCache subnet group:: salt myminion boto_elasticache.create_subnet_group my-subnet-group \ "group description" subnet_ids='[subnet-12345678, subnet-87654321]' \ region=us-east-1 ''' if not _exactly_one((subnet_ids, subnet_names)): raise SaltInvocationError("Exactly one of either 'subnet_ids' or " "'subnet_names' must be provided.") conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return False if subnet_group_exists(name, tags, region, key, keyid, profile): return True if subnet_names: subnet_ids = [] for n in subnet_names: r = __salt__['boto_vpc.get_resource_id']('subnet', n, region=region, key=key, keyid=keyid, profile=profile) if 'id' not in r: log.error('Couldn\'t resolve subnet name %s to an ID.', subnet_name) return False subnet_ids += [r['id']] try: ec = conn.create_cache_subnet_group(name, description, subnet_ids) if not ec: msg = 'Failed to create ElastiCache subnet group {0}'.format(name) log.error(msg) return False log.info('Created ElastiCache subnet group %s', name) return True except boto.exception.BotoServerError as e: log.debug(e) msg = 'Failed to create ElastiCache subnet group {0}'.format(name) log.error(msg) return False def get_cache_subnet_group(name, region=None, key=None, keyid=None, profile=None): ''' Get information about a cache subnet group. CLI example:: salt myminion boto_elasticache.get_cache_subnet_group mycache_subnet_group ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: csg = conn.describe_cache_subnet_groups(name) csg = csg['DescribeCacheSubnetGroupsResponse'] csg = csg['DescribeCacheSubnetGroupsResult']['CacheSubnetGroups'][0] except boto.exception.BotoServerError as e: msg = 'Failed to get cache subnet group {0}.'.format(name) log.error(msg) log.debug(e) return False except (IndexError, TypeError, KeyError): msg = 'Failed to get cache subnet group {0} (2).'.format(name) log.error(msg) return False ret = {} for key, val in six.iteritems(csg): if key == 'CacheSubnetGroupName': ret['cache_subnet_group_name'] = val elif key == 'CacheSubnetGroupDescription': ret['cache_subnet_group_description'] = val elif key == 'VpcId': ret['vpc_id'] = val elif key == 'Subnets': ret['subnets'] = [] for subnet in val: _subnet = {} _subnet['subnet_id'] = subnet['SubnetIdentifier'] _az = subnet['SubnetAvailabilityZone']['Name'] _subnet['subnet_availability_zone'] = _az ret['subnets'].append(_subnet) else: ret[key] = val return ret def delete_subnet_group(name, region=None, key=None, keyid=None, profile=None): ''' Delete an ElastiCache subnet group. CLI example:: salt myminion boto_elasticache.delete_subnet_group my-subnet-group \ region=us-east-1 ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return False try: conn.delete_cache_subnet_group(name) msg = 'Deleted ElastiCache subnet group {0}.'.format(name) log.info(msg) return True except boto.exception.BotoServerError as e: log.debug(e) msg = 'Failed to delete ElastiCache subnet group {0}'.format(name) log.error(msg) return False def create(name, num_cache_nodes=None, engine=None, cache_node_type=None, replication_group_id=None, engine_version=None, cache_parameter_group_name=None, cache_subnet_group_name=None, cache_security_group_names=None, security_group_ids=None, snapshot_arns=None, preferred_availability_zone=None, preferred_maintenance_window=None, port=None, notification_topic_arn=None, auto_minor_version_upgrade=None, wait=None, region=None, key=None, keyid=None, profile=None): ''' Create a cache cluster. CLI example:: salt myminion boto_elasticache.create myelasticache 1 redis cache.t1.micro cache_security_group_names='["myelasticachesg"]' ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: conn.create_cache_cluster( name, num_cache_nodes, cache_node_type, engine, replication_group_id, engine_version, cache_parameter_group_name, cache_subnet_group_name, cache_security_group_names, security_group_ids, snapshot_arns, preferred_availability_zone, preferred_maintenance_window, port, notification_topic_arn, auto_minor_version_upgrade) if not wait: log.info('Created cache cluster %s.', name) return True while True: time.sleep(3) config = get_config(name, region, key, keyid, profile) if not config: return True if config['cache_cluster_status'] == 'available': return True log.info('Created cache cluster %s.', name) except boto.exception.BotoServerError as e: msg = 'Failed to create cache cluster {0}.'.format(name) log.error(msg) log.debug(e) return False def delete(name, wait=False, region=None, key=None, keyid=None, profile=None): ''' Delete a cache cluster. CLI example:: salt myminion boto_elasticache.delete myelasticache ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: conn.delete_cache_cluster(name) if not wait: log.info('Deleted cache cluster %s.', name) return True while True: config = get_config(name, region, key, keyid, profile) if not config: return True if config['cache_cluster_status'] == 'deleting': return True time.sleep(2) log.info('Deleted cache cluster %s.', name) return True except boto.exception.BotoServerError as e: msg = 'Failed to delete cache cluster {0}.'.format(name) log.error(msg) log.debug(e) return False def create_cache_security_group(name, description, region=None, key=None, keyid=None, profile=None): ''' Create a cache security group. CLI example:: salt myminion boto_elasticache.create_cache_security_group myelasticachesg 'My Cache Security Group' ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) created = conn.create_cache_security_group(name, description) if created: log.info('Created cache security group %s.', name) return True else: msg = 'Failed to create cache security group {0}.'.format(name) log.error(msg) return False def delete_cache_security_group(name, region=None, key=None, keyid=None, profile=None): ''' Delete a cache security group. CLI example:: salt myminion boto_elasticache.delete_cache_security_group myelasticachesg 'My Cache Security Group' ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) deleted = conn.delete_cache_security_group(name) if deleted: log.info('Deleted cache security group %s.', name) return True else: msg = 'Failed to delete cache security group {0}.'.format(name) log.error(msg) return False def authorize_cache_security_group_ingress(name, ec2_security_group_name, ec2_security_group_owner_id, region=None, key=None, keyid=None, profile=None): ''' Authorize network ingress from an ec2 security group to a cache security group. CLI example:: salt myminion boto_elasticache.authorize_cache_security_group_ingress myelasticachesg myec2sg 879879 ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: added = conn.authorize_cache_security_group_ingress( name, ec2_security_group_name, ec2_security_group_owner_id) if added: msg = 'Added {0} to cache security group {1}.' msg = msg.format(name, ec2_security_group_name) log.info(msg) return True else: msg = 'Failed to add {0} to cache security group {1}.' msg = msg.format(name, ec2_security_group_name) log.error(msg) return False except boto.exception.EC2ResponseError as e: log.debug(e) msg = 'Failed to add {0} to cache security group {1}.' msg = msg.format(name, ec2_security_group_name) log.error(msg) return False def revoke_cache_security_group_ingress(name, ec2_security_group_name, ec2_security_group_owner_id, region=None, key=None, keyid=None, profile=None): ''' Revoke network ingress from an ec2 security group to a cache security group. CLI example:: salt myminion boto_elasticache.revoke_cache_security_group_ingress myelasticachesg myec2sg 879879 ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: removed = conn.revoke_cache_security_group_ingress( name, ec2_security_group_name, ec2_security_group_owner_id) if removed: msg = 'Removed {0} from cache security group {1}.' msg = msg.format(name, ec2_security_group_name) log.info(msg) return True else: msg = 'Failed to remove {0} from cache security group {1}.' msg = msg.format(name, ec2_security_group_name) log.error(msg) return False except boto.exception.EC2ResponseError as e: log.debug(e) msg = 'Failed to remove {0} from cache security group {1}.' msg = msg.format(name, ec2_security_group_name) log.error(msg) return False
saltstack/salt
salt/modules/boto_elasticache.py
get_group_host
python
def get_group_host(name, region=None, key=None, keyid=None, profile=None): ''' Get hostname from replication cache group CLI example:: salt myminion boto_elasticache.get_group_host myelasticachegroup ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return None try: cc = conn.describe_replication_groups(name) except boto.exception.BotoServerError as e: msg = 'Failed to get config for cache cluster {0}.'.format(name) log.error(msg) log.debug(e) return {} cc = cc['DescribeReplicationGroupsResponse']['DescribeReplicationGroupsResult'] cc = cc['ReplicationGroups'][0]['NodeGroups'][0]['PrimaryEndpoint'] host = cc['Address'] return host
Get hostname from replication cache group CLI example:: salt myminion boto_elasticache.get_group_host myelasticachegroup
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/boto_elasticache.py#L336-L359
null
# -*- coding: utf-8 -*- ''' Connection module for Amazon Elasticache .. versionadded:: 2014.7.0 :configuration: This module accepts explicit elasticache credentials but can also utilize IAM roles assigned to the instance through Instance Profiles. Dynamic credentials are then automatically obtained from AWS API and no further configuration is necessary. More Information available at: .. code-block:: text http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html If IAM roles are not used you need to specify them either in a pillar or in the minion's config file: .. code-block:: yaml elasticache.keyid: GKTADJGHEIQSXMKKRBJ08H elasticache.key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs A region may also be specified in the configuration: .. code-block:: yaml elasticache.region: us-east-1 If a region is not specified, the default is us-east-1. It's also possible to specify key, keyid and region via a profile, either as a passed in dict, or as a string to pull from pillars or minion config: .. code-block:: yaml myprofile: keyid: GKTADJGHEIQSXMKKRBJ08H key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs region: us-east-1 :depends: boto ''' # keep lint from choking on _get_conn and _cache_id #pylint: disable=E0602 from __future__ import absolute_import, print_function, unicode_literals # Import Python libs import logging import time # Import Salt libs from salt.ext import six from salt.exceptions import SaltInvocationError import salt.utils.odict as odict import salt.utils.versions log = logging.getLogger(__name__) # Import third party libs try: # pylint: disable=unused-import import boto import boto.elasticache # pylint: enable=unused-import import boto.utils logging.getLogger('boto').setLevel(logging.CRITICAL) HAS_BOTO = True except ImportError: HAS_BOTO = False def __virtual__(): ''' Only load if boto libraries exist. ''' has_boto_reqs = salt.utils.versions.check_boto_reqs( check_boto3=False ) if has_boto_reqs is True: __utils__['boto.assign_funcs'](__name__, 'elasticache', pack=__salt__) return has_boto_reqs def exists(name, region=None, key=None, keyid=None, profile=None): ''' Check to see if a cache cluster exists. CLI example:: salt myminion boto_elasticache.exists myelasticache ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: conn.describe_cache_clusters(name) return True except boto.exception.BotoServerError as e: log.debug(e) return False def group_exists(name, region=None, key=None, keyid=None, profile=None): ''' Check to see if a replication group exists. CLI example:: salt myminion boto_elasticache.group_exists myelasticache ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: conn.describe_replication_groups(name) return True except boto.exception.BotoServerError as e: log.debug(e) return False def create_replication_group(name, primary_cluster_id, replication_group_description, wait=None, region=None, key=None, keyid=None, profile=None): ''' Create replication group. CLI example:: salt myminion boto_elasticache.create_replication_group myelasticache myprimarycluster description ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return None try: cc = conn.create_replication_group(name, primary_cluster_id, replication_group_description) if not wait: log.info('Created cache cluster %s.', name) return True while True: time.sleep(3) config = describe_replication_group(name, region, key, keyid, profile) if not config: return True if config['status'] == 'available': return True except boto.exception.BotoServerError as e: msg = 'Failed to create replication group {0}.'.format(name) log.error(msg) log.debug(e) return {} def delete_replication_group(name, region=None, key=None, keyid=None, profile=None): ''' Delete an ElastiCache replication group. CLI example:: salt myminion boto_elasticache.delete_replication_group my-replication-group \ region=us-east-1 ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return False try: conn.delete_replication_group(name) msg = 'Deleted ElastiCache replication group {0}.'.format(name) log.info(msg) return True except boto.exception.BotoServerError as e: log.debug(e) msg = 'Failed to delete ElastiCache replication group {0}'.format(name) log.error(msg) return False def describe_replication_group(name, region=None, key=None, keyid=None, profile=None, parameter=None): ''' Get replication group information. CLI example:: salt myminion boto_elasticache.describe_replication_group mygroup ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return None try: cc = conn.describe_replication_groups(name) except boto.exception.BotoServerError as e: msg = 'Failed to get config for cache cluster {0}.'.format(name) log.error(msg) log.debug(e) return {} ret = odict.OrderedDict() cc = cc['DescribeReplicationGroupsResponse']['DescribeReplicationGroupsResult'] cc = cc['ReplicationGroups'][0] attrs = ['status', 'description', 'primary_endpoint', 'member_clusters', 'replication_group_id', 'pending_modified_values', 'primary_cluster_id', 'node_groups'] for key, val in six.iteritems(cc): _key = boto.utils.pythonize_name(key) if _key == 'status': if val: ret[_key] = val else: ret[_key] = None if _key == 'description': if val: ret[_key] = val else: ret[_key] = None if _key == 'replication_group_id': if val: ret[_key] = val else: ret[_key] = None if _key == 'member_clusters': if val: ret[_key] = val else: ret[_key] = None if _key == 'node_groups': if val: ret[_key] = val else: ret[_key] = None if _key == 'pending_modified_values': if val: ret[_key] = val else: ret[_key] = None return ret def get_config(name, region=None, key=None, keyid=None, profile=None): ''' Get the configuration for a cache cluster. CLI example:: salt myminion boto_elasticache.get_config myelasticache ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return None try: cc = conn.describe_cache_clusters(name, show_cache_node_info=True) except boto.exception.BotoServerError as e: msg = 'Failed to get config for cache cluster {0}.'.format(name) log.error(msg) log.debug(e) return {} cc = cc['DescribeCacheClustersResponse']['DescribeCacheClustersResult'] cc = cc['CacheClusters'][0] ret = odict.OrderedDict() attrs = ['engine', 'cache_parameter_group', 'cache_cluster_id', 'cache_security_groups', 'replication_group_id', 'auto_minor_version_upgrade', 'num_cache_nodes', 'preferred_availability_zone', 'security_groups', 'cache_subnet_group_name', 'engine_version', 'cache_node_type', 'notification_configuration', 'preferred_maintenance_window', 'configuration_endpoint', 'cache_cluster_status', 'cache_nodes'] for key, val in six.iteritems(cc): _key = boto.utils.pythonize_name(key) if _key not in attrs: continue if _key == 'cache_parameter_group': if val: ret[_key] = val['CacheParameterGroupName'] else: ret[_key] = None elif _key == 'cache_nodes': if val: ret[_key] = [k for k in val] else: ret[_key] = [] elif _key == 'cache_security_groups': if val: ret[_key] = [k['CacheSecurityGroupName'] for k in val] else: ret[_key] = [] elif _key == 'configuration_endpoint': if val: ret['port'] = val['Port'] ret['address'] = val['Address'] else: ret['port'] = None ret['address'] = None elif _key == 'notification_configuration': if val: ret['notification_topic_arn'] = val['TopicArn'] else: ret['notification_topic_arn'] = None else: ret[_key] = val return ret def get_node_host(name, region=None, key=None, keyid=None, profile=None): ''' Get hostname from cache node CLI example:: salt myminion boto_elasticache.get_node_host myelasticache ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return None try: cc = conn.describe_cache_clusters(name, show_cache_node_info=True) except boto.exception.BotoServerError as e: msg = 'Failed to get config for cache cluster {0}.'.format(name) log.error(msg) log.debug(e) return {} cc = cc['DescribeCacheClustersResponse']['DescribeCacheClustersResult'] host = cc['CacheClusters'][0]['CacheNodes'][0]['Endpoint']['Address'] return host def get_all_cache_subnet_groups(name=None, region=None, key=None, keyid=None, profile=None): ''' Return a list of all cache subnet groups with details CLI example:: salt myminion boto_elasticache.get_all_subnet_groups region=us-east-1 ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: marker = '' groups = [] while marker is not None: ret = conn.describe_cache_subnet_groups(cache_subnet_group_name=name, marker=marker) trimmed = ret.get('DescribeCacheSubnetGroupsResponse', {}).get('DescribeCacheSubnetGroupsResult', {}) groups += trimmed.get('CacheSubnetGroups', []) marker = trimmed.get('Marker', None) if not groups: log.debug('No ElastiCache subnet groups found.') return groups except boto.exception.BotoServerError as e: log.error(e) return [] def list_cache_subnet_groups(name=None, region=None, key=None, keyid=None, profile=None): ''' Return a list of all cache subnet group names CLI example:: salt myminion boto_elasticache.list_subnet_groups region=us-east-1 ''' return [g['CacheSubnetGroupName'] for g in get_all_cache_subnet_groups(name, region, key, keyid, profile)] def subnet_group_exists(name, tags=None, region=None, key=None, keyid=None, profile=None): ''' Check to see if an ElastiCache subnet group exists. CLI example:: salt myminion boto_elasticache.subnet_group_exists my-param-group \ region=us-east-1 ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return False try: ec = conn.describe_cache_subnet_groups(cache_subnet_group_name=name) if not ec: msg = ('ElastiCache subnet group does not exist in region {0}'.format(region)) log.debug(msg) return False return True except boto.exception.BotoServerError as e: log.debug(e) return False def create_subnet_group(name, description, subnet_ids=None, subnet_names=None, tags=None, region=None, key=None, keyid=None, profile=None): ''' Create an ElastiCache subnet group CLI example to create an ElastiCache subnet group:: salt myminion boto_elasticache.create_subnet_group my-subnet-group \ "group description" subnet_ids='[subnet-12345678, subnet-87654321]' \ region=us-east-1 ''' if not _exactly_one((subnet_ids, subnet_names)): raise SaltInvocationError("Exactly one of either 'subnet_ids' or " "'subnet_names' must be provided.") conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return False if subnet_group_exists(name, tags, region, key, keyid, profile): return True if subnet_names: subnet_ids = [] for n in subnet_names: r = __salt__['boto_vpc.get_resource_id']('subnet', n, region=region, key=key, keyid=keyid, profile=profile) if 'id' not in r: log.error('Couldn\'t resolve subnet name %s to an ID.', subnet_name) return False subnet_ids += [r['id']] try: ec = conn.create_cache_subnet_group(name, description, subnet_ids) if not ec: msg = 'Failed to create ElastiCache subnet group {0}'.format(name) log.error(msg) return False log.info('Created ElastiCache subnet group %s', name) return True except boto.exception.BotoServerError as e: log.debug(e) msg = 'Failed to create ElastiCache subnet group {0}'.format(name) log.error(msg) return False def get_cache_subnet_group(name, region=None, key=None, keyid=None, profile=None): ''' Get information about a cache subnet group. CLI example:: salt myminion boto_elasticache.get_cache_subnet_group mycache_subnet_group ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: csg = conn.describe_cache_subnet_groups(name) csg = csg['DescribeCacheSubnetGroupsResponse'] csg = csg['DescribeCacheSubnetGroupsResult']['CacheSubnetGroups'][0] except boto.exception.BotoServerError as e: msg = 'Failed to get cache subnet group {0}.'.format(name) log.error(msg) log.debug(e) return False except (IndexError, TypeError, KeyError): msg = 'Failed to get cache subnet group {0} (2).'.format(name) log.error(msg) return False ret = {} for key, val in six.iteritems(csg): if key == 'CacheSubnetGroupName': ret['cache_subnet_group_name'] = val elif key == 'CacheSubnetGroupDescription': ret['cache_subnet_group_description'] = val elif key == 'VpcId': ret['vpc_id'] = val elif key == 'Subnets': ret['subnets'] = [] for subnet in val: _subnet = {} _subnet['subnet_id'] = subnet['SubnetIdentifier'] _az = subnet['SubnetAvailabilityZone']['Name'] _subnet['subnet_availability_zone'] = _az ret['subnets'].append(_subnet) else: ret[key] = val return ret def delete_subnet_group(name, region=None, key=None, keyid=None, profile=None): ''' Delete an ElastiCache subnet group. CLI example:: salt myminion boto_elasticache.delete_subnet_group my-subnet-group \ region=us-east-1 ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return False try: conn.delete_cache_subnet_group(name) msg = 'Deleted ElastiCache subnet group {0}.'.format(name) log.info(msg) return True except boto.exception.BotoServerError as e: log.debug(e) msg = 'Failed to delete ElastiCache subnet group {0}'.format(name) log.error(msg) return False def create(name, num_cache_nodes=None, engine=None, cache_node_type=None, replication_group_id=None, engine_version=None, cache_parameter_group_name=None, cache_subnet_group_name=None, cache_security_group_names=None, security_group_ids=None, snapshot_arns=None, preferred_availability_zone=None, preferred_maintenance_window=None, port=None, notification_topic_arn=None, auto_minor_version_upgrade=None, wait=None, region=None, key=None, keyid=None, profile=None): ''' Create a cache cluster. CLI example:: salt myminion boto_elasticache.create myelasticache 1 redis cache.t1.micro cache_security_group_names='["myelasticachesg"]' ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: conn.create_cache_cluster( name, num_cache_nodes, cache_node_type, engine, replication_group_id, engine_version, cache_parameter_group_name, cache_subnet_group_name, cache_security_group_names, security_group_ids, snapshot_arns, preferred_availability_zone, preferred_maintenance_window, port, notification_topic_arn, auto_minor_version_upgrade) if not wait: log.info('Created cache cluster %s.', name) return True while True: time.sleep(3) config = get_config(name, region, key, keyid, profile) if not config: return True if config['cache_cluster_status'] == 'available': return True log.info('Created cache cluster %s.', name) except boto.exception.BotoServerError as e: msg = 'Failed to create cache cluster {0}.'.format(name) log.error(msg) log.debug(e) return False def delete(name, wait=False, region=None, key=None, keyid=None, profile=None): ''' Delete a cache cluster. CLI example:: salt myminion boto_elasticache.delete myelasticache ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: conn.delete_cache_cluster(name) if not wait: log.info('Deleted cache cluster %s.', name) return True while True: config = get_config(name, region, key, keyid, profile) if not config: return True if config['cache_cluster_status'] == 'deleting': return True time.sleep(2) log.info('Deleted cache cluster %s.', name) return True except boto.exception.BotoServerError as e: msg = 'Failed to delete cache cluster {0}.'.format(name) log.error(msg) log.debug(e) return False def create_cache_security_group(name, description, region=None, key=None, keyid=None, profile=None): ''' Create a cache security group. CLI example:: salt myminion boto_elasticache.create_cache_security_group myelasticachesg 'My Cache Security Group' ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) created = conn.create_cache_security_group(name, description) if created: log.info('Created cache security group %s.', name) return True else: msg = 'Failed to create cache security group {0}.'.format(name) log.error(msg) return False def delete_cache_security_group(name, region=None, key=None, keyid=None, profile=None): ''' Delete a cache security group. CLI example:: salt myminion boto_elasticache.delete_cache_security_group myelasticachesg 'My Cache Security Group' ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) deleted = conn.delete_cache_security_group(name) if deleted: log.info('Deleted cache security group %s.', name) return True else: msg = 'Failed to delete cache security group {0}.'.format(name) log.error(msg) return False def authorize_cache_security_group_ingress(name, ec2_security_group_name, ec2_security_group_owner_id, region=None, key=None, keyid=None, profile=None): ''' Authorize network ingress from an ec2 security group to a cache security group. CLI example:: salt myminion boto_elasticache.authorize_cache_security_group_ingress myelasticachesg myec2sg 879879 ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: added = conn.authorize_cache_security_group_ingress( name, ec2_security_group_name, ec2_security_group_owner_id) if added: msg = 'Added {0} to cache security group {1}.' msg = msg.format(name, ec2_security_group_name) log.info(msg) return True else: msg = 'Failed to add {0} to cache security group {1}.' msg = msg.format(name, ec2_security_group_name) log.error(msg) return False except boto.exception.EC2ResponseError as e: log.debug(e) msg = 'Failed to add {0} to cache security group {1}.' msg = msg.format(name, ec2_security_group_name) log.error(msg) return False def revoke_cache_security_group_ingress(name, ec2_security_group_name, ec2_security_group_owner_id, region=None, key=None, keyid=None, profile=None): ''' Revoke network ingress from an ec2 security group to a cache security group. CLI example:: salt myminion boto_elasticache.revoke_cache_security_group_ingress myelasticachesg myec2sg 879879 ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: removed = conn.revoke_cache_security_group_ingress( name, ec2_security_group_name, ec2_security_group_owner_id) if removed: msg = 'Removed {0} from cache security group {1}.' msg = msg.format(name, ec2_security_group_name) log.info(msg) return True else: msg = 'Failed to remove {0} from cache security group {1}.' msg = msg.format(name, ec2_security_group_name) log.error(msg) return False except boto.exception.EC2ResponseError as e: log.debug(e) msg = 'Failed to remove {0} from cache security group {1}.' msg = msg.format(name, ec2_security_group_name) log.error(msg) return False
saltstack/salt
salt/modules/boto_elasticache.py
get_all_cache_subnet_groups
python
def get_all_cache_subnet_groups(name=None, region=None, key=None, keyid=None, profile=None): ''' Return a list of all cache subnet groups with details CLI example:: salt myminion boto_elasticache.get_all_subnet_groups region=us-east-1 ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: marker = '' groups = [] while marker is not None: ret = conn.describe_cache_subnet_groups(cache_subnet_group_name=name, marker=marker) trimmed = ret.get('DescribeCacheSubnetGroupsResponse', {}).get('DescribeCacheSubnetGroupsResult', {}) groups += trimmed.get('CacheSubnetGroups', []) marker = trimmed.get('Marker', None) if not groups: log.debug('No ElastiCache subnet groups found.') return groups except boto.exception.BotoServerError as e: log.error(e) return []
Return a list of all cache subnet groups with details CLI example:: salt myminion boto_elasticache.get_all_subnet_groups region=us-east-1
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/boto_elasticache.py#L362-L387
null
# -*- coding: utf-8 -*- ''' Connection module for Amazon Elasticache .. versionadded:: 2014.7.0 :configuration: This module accepts explicit elasticache credentials but can also utilize IAM roles assigned to the instance through Instance Profiles. Dynamic credentials are then automatically obtained from AWS API and no further configuration is necessary. More Information available at: .. code-block:: text http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html If IAM roles are not used you need to specify them either in a pillar or in the minion's config file: .. code-block:: yaml elasticache.keyid: GKTADJGHEIQSXMKKRBJ08H elasticache.key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs A region may also be specified in the configuration: .. code-block:: yaml elasticache.region: us-east-1 If a region is not specified, the default is us-east-1. It's also possible to specify key, keyid and region via a profile, either as a passed in dict, or as a string to pull from pillars or minion config: .. code-block:: yaml myprofile: keyid: GKTADJGHEIQSXMKKRBJ08H key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs region: us-east-1 :depends: boto ''' # keep lint from choking on _get_conn and _cache_id #pylint: disable=E0602 from __future__ import absolute_import, print_function, unicode_literals # Import Python libs import logging import time # Import Salt libs from salt.ext import six from salt.exceptions import SaltInvocationError import salt.utils.odict as odict import salt.utils.versions log = logging.getLogger(__name__) # Import third party libs try: # pylint: disable=unused-import import boto import boto.elasticache # pylint: enable=unused-import import boto.utils logging.getLogger('boto').setLevel(logging.CRITICAL) HAS_BOTO = True except ImportError: HAS_BOTO = False def __virtual__(): ''' Only load if boto libraries exist. ''' has_boto_reqs = salt.utils.versions.check_boto_reqs( check_boto3=False ) if has_boto_reqs is True: __utils__['boto.assign_funcs'](__name__, 'elasticache', pack=__salt__) return has_boto_reqs def exists(name, region=None, key=None, keyid=None, profile=None): ''' Check to see if a cache cluster exists. CLI example:: salt myminion boto_elasticache.exists myelasticache ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: conn.describe_cache_clusters(name) return True except boto.exception.BotoServerError as e: log.debug(e) return False def group_exists(name, region=None, key=None, keyid=None, profile=None): ''' Check to see if a replication group exists. CLI example:: salt myminion boto_elasticache.group_exists myelasticache ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: conn.describe_replication_groups(name) return True except boto.exception.BotoServerError as e: log.debug(e) return False def create_replication_group(name, primary_cluster_id, replication_group_description, wait=None, region=None, key=None, keyid=None, profile=None): ''' Create replication group. CLI example:: salt myminion boto_elasticache.create_replication_group myelasticache myprimarycluster description ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return None try: cc = conn.create_replication_group(name, primary_cluster_id, replication_group_description) if not wait: log.info('Created cache cluster %s.', name) return True while True: time.sleep(3) config = describe_replication_group(name, region, key, keyid, profile) if not config: return True if config['status'] == 'available': return True except boto.exception.BotoServerError as e: msg = 'Failed to create replication group {0}.'.format(name) log.error(msg) log.debug(e) return {} def delete_replication_group(name, region=None, key=None, keyid=None, profile=None): ''' Delete an ElastiCache replication group. CLI example:: salt myminion boto_elasticache.delete_replication_group my-replication-group \ region=us-east-1 ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return False try: conn.delete_replication_group(name) msg = 'Deleted ElastiCache replication group {0}.'.format(name) log.info(msg) return True except boto.exception.BotoServerError as e: log.debug(e) msg = 'Failed to delete ElastiCache replication group {0}'.format(name) log.error(msg) return False def describe_replication_group(name, region=None, key=None, keyid=None, profile=None, parameter=None): ''' Get replication group information. CLI example:: salt myminion boto_elasticache.describe_replication_group mygroup ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return None try: cc = conn.describe_replication_groups(name) except boto.exception.BotoServerError as e: msg = 'Failed to get config for cache cluster {0}.'.format(name) log.error(msg) log.debug(e) return {} ret = odict.OrderedDict() cc = cc['DescribeReplicationGroupsResponse']['DescribeReplicationGroupsResult'] cc = cc['ReplicationGroups'][0] attrs = ['status', 'description', 'primary_endpoint', 'member_clusters', 'replication_group_id', 'pending_modified_values', 'primary_cluster_id', 'node_groups'] for key, val in six.iteritems(cc): _key = boto.utils.pythonize_name(key) if _key == 'status': if val: ret[_key] = val else: ret[_key] = None if _key == 'description': if val: ret[_key] = val else: ret[_key] = None if _key == 'replication_group_id': if val: ret[_key] = val else: ret[_key] = None if _key == 'member_clusters': if val: ret[_key] = val else: ret[_key] = None if _key == 'node_groups': if val: ret[_key] = val else: ret[_key] = None if _key == 'pending_modified_values': if val: ret[_key] = val else: ret[_key] = None return ret def get_config(name, region=None, key=None, keyid=None, profile=None): ''' Get the configuration for a cache cluster. CLI example:: salt myminion boto_elasticache.get_config myelasticache ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return None try: cc = conn.describe_cache_clusters(name, show_cache_node_info=True) except boto.exception.BotoServerError as e: msg = 'Failed to get config for cache cluster {0}.'.format(name) log.error(msg) log.debug(e) return {} cc = cc['DescribeCacheClustersResponse']['DescribeCacheClustersResult'] cc = cc['CacheClusters'][0] ret = odict.OrderedDict() attrs = ['engine', 'cache_parameter_group', 'cache_cluster_id', 'cache_security_groups', 'replication_group_id', 'auto_minor_version_upgrade', 'num_cache_nodes', 'preferred_availability_zone', 'security_groups', 'cache_subnet_group_name', 'engine_version', 'cache_node_type', 'notification_configuration', 'preferred_maintenance_window', 'configuration_endpoint', 'cache_cluster_status', 'cache_nodes'] for key, val in six.iteritems(cc): _key = boto.utils.pythonize_name(key) if _key not in attrs: continue if _key == 'cache_parameter_group': if val: ret[_key] = val['CacheParameterGroupName'] else: ret[_key] = None elif _key == 'cache_nodes': if val: ret[_key] = [k for k in val] else: ret[_key] = [] elif _key == 'cache_security_groups': if val: ret[_key] = [k['CacheSecurityGroupName'] for k in val] else: ret[_key] = [] elif _key == 'configuration_endpoint': if val: ret['port'] = val['Port'] ret['address'] = val['Address'] else: ret['port'] = None ret['address'] = None elif _key == 'notification_configuration': if val: ret['notification_topic_arn'] = val['TopicArn'] else: ret['notification_topic_arn'] = None else: ret[_key] = val return ret def get_node_host(name, region=None, key=None, keyid=None, profile=None): ''' Get hostname from cache node CLI example:: salt myminion boto_elasticache.get_node_host myelasticache ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return None try: cc = conn.describe_cache_clusters(name, show_cache_node_info=True) except boto.exception.BotoServerError as e: msg = 'Failed to get config for cache cluster {0}.'.format(name) log.error(msg) log.debug(e) return {} cc = cc['DescribeCacheClustersResponse']['DescribeCacheClustersResult'] host = cc['CacheClusters'][0]['CacheNodes'][0]['Endpoint']['Address'] return host def get_group_host(name, region=None, key=None, keyid=None, profile=None): ''' Get hostname from replication cache group CLI example:: salt myminion boto_elasticache.get_group_host myelasticachegroup ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return None try: cc = conn.describe_replication_groups(name) except boto.exception.BotoServerError as e: msg = 'Failed to get config for cache cluster {0}.'.format(name) log.error(msg) log.debug(e) return {} cc = cc['DescribeReplicationGroupsResponse']['DescribeReplicationGroupsResult'] cc = cc['ReplicationGroups'][0]['NodeGroups'][0]['PrimaryEndpoint'] host = cc['Address'] return host def list_cache_subnet_groups(name=None, region=None, key=None, keyid=None, profile=None): ''' Return a list of all cache subnet group names CLI example:: salt myminion boto_elasticache.list_subnet_groups region=us-east-1 ''' return [g['CacheSubnetGroupName'] for g in get_all_cache_subnet_groups(name, region, key, keyid, profile)] def subnet_group_exists(name, tags=None, region=None, key=None, keyid=None, profile=None): ''' Check to see if an ElastiCache subnet group exists. CLI example:: salt myminion boto_elasticache.subnet_group_exists my-param-group \ region=us-east-1 ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return False try: ec = conn.describe_cache_subnet_groups(cache_subnet_group_name=name) if not ec: msg = ('ElastiCache subnet group does not exist in region {0}'.format(region)) log.debug(msg) return False return True except boto.exception.BotoServerError as e: log.debug(e) return False def create_subnet_group(name, description, subnet_ids=None, subnet_names=None, tags=None, region=None, key=None, keyid=None, profile=None): ''' Create an ElastiCache subnet group CLI example to create an ElastiCache subnet group:: salt myminion boto_elasticache.create_subnet_group my-subnet-group \ "group description" subnet_ids='[subnet-12345678, subnet-87654321]' \ region=us-east-1 ''' if not _exactly_one((subnet_ids, subnet_names)): raise SaltInvocationError("Exactly one of either 'subnet_ids' or " "'subnet_names' must be provided.") conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return False if subnet_group_exists(name, tags, region, key, keyid, profile): return True if subnet_names: subnet_ids = [] for n in subnet_names: r = __salt__['boto_vpc.get_resource_id']('subnet', n, region=region, key=key, keyid=keyid, profile=profile) if 'id' not in r: log.error('Couldn\'t resolve subnet name %s to an ID.', subnet_name) return False subnet_ids += [r['id']] try: ec = conn.create_cache_subnet_group(name, description, subnet_ids) if not ec: msg = 'Failed to create ElastiCache subnet group {0}'.format(name) log.error(msg) return False log.info('Created ElastiCache subnet group %s', name) return True except boto.exception.BotoServerError as e: log.debug(e) msg = 'Failed to create ElastiCache subnet group {0}'.format(name) log.error(msg) return False def get_cache_subnet_group(name, region=None, key=None, keyid=None, profile=None): ''' Get information about a cache subnet group. CLI example:: salt myminion boto_elasticache.get_cache_subnet_group mycache_subnet_group ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: csg = conn.describe_cache_subnet_groups(name) csg = csg['DescribeCacheSubnetGroupsResponse'] csg = csg['DescribeCacheSubnetGroupsResult']['CacheSubnetGroups'][0] except boto.exception.BotoServerError as e: msg = 'Failed to get cache subnet group {0}.'.format(name) log.error(msg) log.debug(e) return False except (IndexError, TypeError, KeyError): msg = 'Failed to get cache subnet group {0} (2).'.format(name) log.error(msg) return False ret = {} for key, val in six.iteritems(csg): if key == 'CacheSubnetGroupName': ret['cache_subnet_group_name'] = val elif key == 'CacheSubnetGroupDescription': ret['cache_subnet_group_description'] = val elif key == 'VpcId': ret['vpc_id'] = val elif key == 'Subnets': ret['subnets'] = [] for subnet in val: _subnet = {} _subnet['subnet_id'] = subnet['SubnetIdentifier'] _az = subnet['SubnetAvailabilityZone']['Name'] _subnet['subnet_availability_zone'] = _az ret['subnets'].append(_subnet) else: ret[key] = val return ret def delete_subnet_group(name, region=None, key=None, keyid=None, profile=None): ''' Delete an ElastiCache subnet group. CLI example:: salt myminion boto_elasticache.delete_subnet_group my-subnet-group \ region=us-east-1 ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return False try: conn.delete_cache_subnet_group(name) msg = 'Deleted ElastiCache subnet group {0}.'.format(name) log.info(msg) return True except boto.exception.BotoServerError as e: log.debug(e) msg = 'Failed to delete ElastiCache subnet group {0}'.format(name) log.error(msg) return False def create(name, num_cache_nodes=None, engine=None, cache_node_type=None, replication_group_id=None, engine_version=None, cache_parameter_group_name=None, cache_subnet_group_name=None, cache_security_group_names=None, security_group_ids=None, snapshot_arns=None, preferred_availability_zone=None, preferred_maintenance_window=None, port=None, notification_topic_arn=None, auto_minor_version_upgrade=None, wait=None, region=None, key=None, keyid=None, profile=None): ''' Create a cache cluster. CLI example:: salt myminion boto_elasticache.create myelasticache 1 redis cache.t1.micro cache_security_group_names='["myelasticachesg"]' ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: conn.create_cache_cluster( name, num_cache_nodes, cache_node_type, engine, replication_group_id, engine_version, cache_parameter_group_name, cache_subnet_group_name, cache_security_group_names, security_group_ids, snapshot_arns, preferred_availability_zone, preferred_maintenance_window, port, notification_topic_arn, auto_minor_version_upgrade) if not wait: log.info('Created cache cluster %s.', name) return True while True: time.sleep(3) config = get_config(name, region, key, keyid, profile) if not config: return True if config['cache_cluster_status'] == 'available': return True log.info('Created cache cluster %s.', name) except boto.exception.BotoServerError as e: msg = 'Failed to create cache cluster {0}.'.format(name) log.error(msg) log.debug(e) return False def delete(name, wait=False, region=None, key=None, keyid=None, profile=None): ''' Delete a cache cluster. CLI example:: salt myminion boto_elasticache.delete myelasticache ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: conn.delete_cache_cluster(name) if not wait: log.info('Deleted cache cluster %s.', name) return True while True: config = get_config(name, region, key, keyid, profile) if not config: return True if config['cache_cluster_status'] == 'deleting': return True time.sleep(2) log.info('Deleted cache cluster %s.', name) return True except boto.exception.BotoServerError as e: msg = 'Failed to delete cache cluster {0}.'.format(name) log.error(msg) log.debug(e) return False def create_cache_security_group(name, description, region=None, key=None, keyid=None, profile=None): ''' Create a cache security group. CLI example:: salt myminion boto_elasticache.create_cache_security_group myelasticachesg 'My Cache Security Group' ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) created = conn.create_cache_security_group(name, description) if created: log.info('Created cache security group %s.', name) return True else: msg = 'Failed to create cache security group {0}.'.format(name) log.error(msg) return False def delete_cache_security_group(name, region=None, key=None, keyid=None, profile=None): ''' Delete a cache security group. CLI example:: salt myminion boto_elasticache.delete_cache_security_group myelasticachesg 'My Cache Security Group' ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) deleted = conn.delete_cache_security_group(name) if deleted: log.info('Deleted cache security group %s.', name) return True else: msg = 'Failed to delete cache security group {0}.'.format(name) log.error(msg) return False def authorize_cache_security_group_ingress(name, ec2_security_group_name, ec2_security_group_owner_id, region=None, key=None, keyid=None, profile=None): ''' Authorize network ingress from an ec2 security group to a cache security group. CLI example:: salt myminion boto_elasticache.authorize_cache_security_group_ingress myelasticachesg myec2sg 879879 ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: added = conn.authorize_cache_security_group_ingress( name, ec2_security_group_name, ec2_security_group_owner_id) if added: msg = 'Added {0} to cache security group {1}.' msg = msg.format(name, ec2_security_group_name) log.info(msg) return True else: msg = 'Failed to add {0} to cache security group {1}.' msg = msg.format(name, ec2_security_group_name) log.error(msg) return False except boto.exception.EC2ResponseError as e: log.debug(e) msg = 'Failed to add {0} to cache security group {1}.' msg = msg.format(name, ec2_security_group_name) log.error(msg) return False def revoke_cache_security_group_ingress(name, ec2_security_group_name, ec2_security_group_owner_id, region=None, key=None, keyid=None, profile=None): ''' Revoke network ingress from an ec2 security group to a cache security group. CLI example:: salt myminion boto_elasticache.revoke_cache_security_group_ingress myelasticachesg myec2sg 879879 ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: removed = conn.revoke_cache_security_group_ingress( name, ec2_security_group_name, ec2_security_group_owner_id) if removed: msg = 'Removed {0} from cache security group {1}.' msg = msg.format(name, ec2_security_group_name) log.info(msg) return True else: msg = 'Failed to remove {0} from cache security group {1}.' msg = msg.format(name, ec2_security_group_name) log.error(msg) return False except boto.exception.EC2ResponseError as e: log.debug(e) msg = 'Failed to remove {0} from cache security group {1}.' msg = msg.format(name, ec2_security_group_name) log.error(msg) return False
saltstack/salt
salt/modules/boto_elasticache.py
list_cache_subnet_groups
python
def list_cache_subnet_groups(name=None, region=None, key=None, keyid=None, profile=None): ''' Return a list of all cache subnet group names CLI example:: salt myminion boto_elasticache.list_subnet_groups region=us-east-1 ''' return [g['CacheSubnetGroupName'] for g in get_all_cache_subnet_groups(name, region, key, keyid, profile)]
Return a list of all cache subnet group names CLI example:: salt myminion boto_elasticache.list_subnet_groups region=us-east-1
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/boto_elasticache.py#L390-L400
[ "def get_all_cache_subnet_groups(name=None, region=None, key=None,\n keyid=None, profile=None):\n '''\n Return a list of all cache subnet groups with details\n\n CLI example::\n\n salt myminion boto_elasticache.get_all_subnet_groups region=us-east-1\n '''\n conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)\n try:\n marker = ''\n groups = []\n while marker is not None:\n ret = conn.describe_cache_subnet_groups(cache_subnet_group_name=name,\n marker=marker)\n trimmed = ret.get('DescribeCacheSubnetGroupsResponse',\n {}).get('DescribeCacheSubnetGroupsResult', {})\n groups += trimmed.get('CacheSubnetGroups', [])\n marker = trimmed.get('Marker', None)\n if not groups:\n log.debug('No ElastiCache subnet groups found.')\n return groups\n except boto.exception.BotoServerError as e:\n log.error(e)\n return []\n" ]
# -*- coding: utf-8 -*- ''' Connection module for Amazon Elasticache .. versionadded:: 2014.7.0 :configuration: This module accepts explicit elasticache credentials but can also utilize IAM roles assigned to the instance through Instance Profiles. Dynamic credentials are then automatically obtained from AWS API and no further configuration is necessary. More Information available at: .. code-block:: text http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html If IAM roles are not used you need to specify them either in a pillar or in the minion's config file: .. code-block:: yaml elasticache.keyid: GKTADJGHEIQSXMKKRBJ08H elasticache.key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs A region may also be specified in the configuration: .. code-block:: yaml elasticache.region: us-east-1 If a region is not specified, the default is us-east-1. It's also possible to specify key, keyid and region via a profile, either as a passed in dict, or as a string to pull from pillars or minion config: .. code-block:: yaml myprofile: keyid: GKTADJGHEIQSXMKKRBJ08H key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs region: us-east-1 :depends: boto ''' # keep lint from choking on _get_conn and _cache_id #pylint: disable=E0602 from __future__ import absolute_import, print_function, unicode_literals # Import Python libs import logging import time # Import Salt libs from salt.ext import six from salt.exceptions import SaltInvocationError import salt.utils.odict as odict import salt.utils.versions log = logging.getLogger(__name__) # Import third party libs try: # pylint: disable=unused-import import boto import boto.elasticache # pylint: enable=unused-import import boto.utils logging.getLogger('boto').setLevel(logging.CRITICAL) HAS_BOTO = True except ImportError: HAS_BOTO = False def __virtual__(): ''' Only load if boto libraries exist. ''' has_boto_reqs = salt.utils.versions.check_boto_reqs( check_boto3=False ) if has_boto_reqs is True: __utils__['boto.assign_funcs'](__name__, 'elasticache', pack=__salt__) return has_boto_reqs def exists(name, region=None, key=None, keyid=None, profile=None): ''' Check to see if a cache cluster exists. CLI example:: salt myminion boto_elasticache.exists myelasticache ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: conn.describe_cache_clusters(name) return True except boto.exception.BotoServerError as e: log.debug(e) return False def group_exists(name, region=None, key=None, keyid=None, profile=None): ''' Check to see if a replication group exists. CLI example:: salt myminion boto_elasticache.group_exists myelasticache ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: conn.describe_replication_groups(name) return True except boto.exception.BotoServerError as e: log.debug(e) return False def create_replication_group(name, primary_cluster_id, replication_group_description, wait=None, region=None, key=None, keyid=None, profile=None): ''' Create replication group. CLI example:: salt myminion boto_elasticache.create_replication_group myelasticache myprimarycluster description ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return None try: cc = conn.create_replication_group(name, primary_cluster_id, replication_group_description) if not wait: log.info('Created cache cluster %s.', name) return True while True: time.sleep(3) config = describe_replication_group(name, region, key, keyid, profile) if not config: return True if config['status'] == 'available': return True except boto.exception.BotoServerError as e: msg = 'Failed to create replication group {0}.'.format(name) log.error(msg) log.debug(e) return {} def delete_replication_group(name, region=None, key=None, keyid=None, profile=None): ''' Delete an ElastiCache replication group. CLI example:: salt myminion boto_elasticache.delete_replication_group my-replication-group \ region=us-east-1 ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return False try: conn.delete_replication_group(name) msg = 'Deleted ElastiCache replication group {0}.'.format(name) log.info(msg) return True except boto.exception.BotoServerError as e: log.debug(e) msg = 'Failed to delete ElastiCache replication group {0}'.format(name) log.error(msg) return False def describe_replication_group(name, region=None, key=None, keyid=None, profile=None, parameter=None): ''' Get replication group information. CLI example:: salt myminion boto_elasticache.describe_replication_group mygroup ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return None try: cc = conn.describe_replication_groups(name) except boto.exception.BotoServerError as e: msg = 'Failed to get config for cache cluster {0}.'.format(name) log.error(msg) log.debug(e) return {} ret = odict.OrderedDict() cc = cc['DescribeReplicationGroupsResponse']['DescribeReplicationGroupsResult'] cc = cc['ReplicationGroups'][0] attrs = ['status', 'description', 'primary_endpoint', 'member_clusters', 'replication_group_id', 'pending_modified_values', 'primary_cluster_id', 'node_groups'] for key, val in six.iteritems(cc): _key = boto.utils.pythonize_name(key) if _key == 'status': if val: ret[_key] = val else: ret[_key] = None if _key == 'description': if val: ret[_key] = val else: ret[_key] = None if _key == 'replication_group_id': if val: ret[_key] = val else: ret[_key] = None if _key == 'member_clusters': if val: ret[_key] = val else: ret[_key] = None if _key == 'node_groups': if val: ret[_key] = val else: ret[_key] = None if _key == 'pending_modified_values': if val: ret[_key] = val else: ret[_key] = None return ret def get_config(name, region=None, key=None, keyid=None, profile=None): ''' Get the configuration for a cache cluster. CLI example:: salt myminion boto_elasticache.get_config myelasticache ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return None try: cc = conn.describe_cache_clusters(name, show_cache_node_info=True) except boto.exception.BotoServerError as e: msg = 'Failed to get config for cache cluster {0}.'.format(name) log.error(msg) log.debug(e) return {} cc = cc['DescribeCacheClustersResponse']['DescribeCacheClustersResult'] cc = cc['CacheClusters'][0] ret = odict.OrderedDict() attrs = ['engine', 'cache_parameter_group', 'cache_cluster_id', 'cache_security_groups', 'replication_group_id', 'auto_minor_version_upgrade', 'num_cache_nodes', 'preferred_availability_zone', 'security_groups', 'cache_subnet_group_name', 'engine_version', 'cache_node_type', 'notification_configuration', 'preferred_maintenance_window', 'configuration_endpoint', 'cache_cluster_status', 'cache_nodes'] for key, val in six.iteritems(cc): _key = boto.utils.pythonize_name(key) if _key not in attrs: continue if _key == 'cache_parameter_group': if val: ret[_key] = val['CacheParameterGroupName'] else: ret[_key] = None elif _key == 'cache_nodes': if val: ret[_key] = [k for k in val] else: ret[_key] = [] elif _key == 'cache_security_groups': if val: ret[_key] = [k['CacheSecurityGroupName'] for k in val] else: ret[_key] = [] elif _key == 'configuration_endpoint': if val: ret['port'] = val['Port'] ret['address'] = val['Address'] else: ret['port'] = None ret['address'] = None elif _key == 'notification_configuration': if val: ret['notification_topic_arn'] = val['TopicArn'] else: ret['notification_topic_arn'] = None else: ret[_key] = val return ret def get_node_host(name, region=None, key=None, keyid=None, profile=None): ''' Get hostname from cache node CLI example:: salt myminion boto_elasticache.get_node_host myelasticache ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return None try: cc = conn.describe_cache_clusters(name, show_cache_node_info=True) except boto.exception.BotoServerError as e: msg = 'Failed to get config for cache cluster {0}.'.format(name) log.error(msg) log.debug(e) return {} cc = cc['DescribeCacheClustersResponse']['DescribeCacheClustersResult'] host = cc['CacheClusters'][0]['CacheNodes'][0]['Endpoint']['Address'] return host def get_group_host(name, region=None, key=None, keyid=None, profile=None): ''' Get hostname from replication cache group CLI example:: salt myminion boto_elasticache.get_group_host myelasticachegroup ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return None try: cc = conn.describe_replication_groups(name) except boto.exception.BotoServerError as e: msg = 'Failed to get config for cache cluster {0}.'.format(name) log.error(msg) log.debug(e) return {} cc = cc['DescribeReplicationGroupsResponse']['DescribeReplicationGroupsResult'] cc = cc['ReplicationGroups'][0]['NodeGroups'][0]['PrimaryEndpoint'] host = cc['Address'] return host def get_all_cache_subnet_groups(name=None, region=None, key=None, keyid=None, profile=None): ''' Return a list of all cache subnet groups with details CLI example:: salt myminion boto_elasticache.get_all_subnet_groups region=us-east-1 ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: marker = '' groups = [] while marker is not None: ret = conn.describe_cache_subnet_groups(cache_subnet_group_name=name, marker=marker) trimmed = ret.get('DescribeCacheSubnetGroupsResponse', {}).get('DescribeCacheSubnetGroupsResult', {}) groups += trimmed.get('CacheSubnetGroups', []) marker = trimmed.get('Marker', None) if not groups: log.debug('No ElastiCache subnet groups found.') return groups except boto.exception.BotoServerError as e: log.error(e) return [] def subnet_group_exists(name, tags=None, region=None, key=None, keyid=None, profile=None): ''' Check to see if an ElastiCache subnet group exists. CLI example:: salt myminion boto_elasticache.subnet_group_exists my-param-group \ region=us-east-1 ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return False try: ec = conn.describe_cache_subnet_groups(cache_subnet_group_name=name) if not ec: msg = ('ElastiCache subnet group does not exist in region {0}'.format(region)) log.debug(msg) return False return True except boto.exception.BotoServerError as e: log.debug(e) return False def create_subnet_group(name, description, subnet_ids=None, subnet_names=None, tags=None, region=None, key=None, keyid=None, profile=None): ''' Create an ElastiCache subnet group CLI example to create an ElastiCache subnet group:: salt myminion boto_elasticache.create_subnet_group my-subnet-group \ "group description" subnet_ids='[subnet-12345678, subnet-87654321]' \ region=us-east-1 ''' if not _exactly_one((subnet_ids, subnet_names)): raise SaltInvocationError("Exactly one of either 'subnet_ids' or " "'subnet_names' must be provided.") conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return False if subnet_group_exists(name, tags, region, key, keyid, profile): return True if subnet_names: subnet_ids = [] for n in subnet_names: r = __salt__['boto_vpc.get_resource_id']('subnet', n, region=region, key=key, keyid=keyid, profile=profile) if 'id' not in r: log.error('Couldn\'t resolve subnet name %s to an ID.', subnet_name) return False subnet_ids += [r['id']] try: ec = conn.create_cache_subnet_group(name, description, subnet_ids) if not ec: msg = 'Failed to create ElastiCache subnet group {0}'.format(name) log.error(msg) return False log.info('Created ElastiCache subnet group %s', name) return True except boto.exception.BotoServerError as e: log.debug(e) msg = 'Failed to create ElastiCache subnet group {0}'.format(name) log.error(msg) return False def get_cache_subnet_group(name, region=None, key=None, keyid=None, profile=None): ''' Get information about a cache subnet group. CLI example:: salt myminion boto_elasticache.get_cache_subnet_group mycache_subnet_group ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: csg = conn.describe_cache_subnet_groups(name) csg = csg['DescribeCacheSubnetGroupsResponse'] csg = csg['DescribeCacheSubnetGroupsResult']['CacheSubnetGroups'][0] except boto.exception.BotoServerError as e: msg = 'Failed to get cache subnet group {0}.'.format(name) log.error(msg) log.debug(e) return False except (IndexError, TypeError, KeyError): msg = 'Failed to get cache subnet group {0} (2).'.format(name) log.error(msg) return False ret = {} for key, val in six.iteritems(csg): if key == 'CacheSubnetGroupName': ret['cache_subnet_group_name'] = val elif key == 'CacheSubnetGroupDescription': ret['cache_subnet_group_description'] = val elif key == 'VpcId': ret['vpc_id'] = val elif key == 'Subnets': ret['subnets'] = [] for subnet in val: _subnet = {} _subnet['subnet_id'] = subnet['SubnetIdentifier'] _az = subnet['SubnetAvailabilityZone']['Name'] _subnet['subnet_availability_zone'] = _az ret['subnets'].append(_subnet) else: ret[key] = val return ret def delete_subnet_group(name, region=None, key=None, keyid=None, profile=None): ''' Delete an ElastiCache subnet group. CLI example:: salt myminion boto_elasticache.delete_subnet_group my-subnet-group \ region=us-east-1 ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return False try: conn.delete_cache_subnet_group(name) msg = 'Deleted ElastiCache subnet group {0}.'.format(name) log.info(msg) return True except boto.exception.BotoServerError as e: log.debug(e) msg = 'Failed to delete ElastiCache subnet group {0}'.format(name) log.error(msg) return False def create(name, num_cache_nodes=None, engine=None, cache_node_type=None, replication_group_id=None, engine_version=None, cache_parameter_group_name=None, cache_subnet_group_name=None, cache_security_group_names=None, security_group_ids=None, snapshot_arns=None, preferred_availability_zone=None, preferred_maintenance_window=None, port=None, notification_topic_arn=None, auto_minor_version_upgrade=None, wait=None, region=None, key=None, keyid=None, profile=None): ''' Create a cache cluster. CLI example:: salt myminion boto_elasticache.create myelasticache 1 redis cache.t1.micro cache_security_group_names='["myelasticachesg"]' ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: conn.create_cache_cluster( name, num_cache_nodes, cache_node_type, engine, replication_group_id, engine_version, cache_parameter_group_name, cache_subnet_group_name, cache_security_group_names, security_group_ids, snapshot_arns, preferred_availability_zone, preferred_maintenance_window, port, notification_topic_arn, auto_minor_version_upgrade) if not wait: log.info('Created cache cluster %s.', name) return True while True: time.sleep(3) config = get_config(name, region, key, keyid, profile) if not config: return True if config['cache_cluster_status'] == 'available': return True log.info('Created cache cluster %s.', name) except boto.exception.BotoServerError as e: msg = 'Failed to create cache cluster {0}.'.format(name) log.error(msg) log.debug(e) return False def delete(name, wait=False, region=None, key=None, keyid=None, profile=None): ''' Delete a cache cluster. CLI example:: salt myminion boto_elasticache.delete myelasticache ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: conn.delete_cache_cluster(name) if not wait: log.info('Deleted cache cluster %s.', name) return True while True: config = get_config(name, region, key, keyid, profile) if not config: return True if config['cache_cluster_status'] == 'deleting': return True time.sleep(2) log.info('Deleted cache cluster %s.', name) return True except boto.exception.BotoServerError as e: msg = 'Failed to delete cache cluster {0}.'.format(name) log.error(msg) log.debug(e) return False def create_cache_security_group(name, description, region=None, key=None, keyid=None, profile=None): ''' Create a cache security group. CLI example:: salt myminion boto_elasticache.create_cache_security_group myelasticachesg 'My Cache Security Group' ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) created = conn.create_cache_security_group(name, description) if created: log.info('Created cache security group %s.', name) return True else: msg = 'Failed to create cache security group {0}.'.format(name) log.error(msg) return False def delete_cache_security_group(name, region=None, key=None, keyid=None, profile=None): ''' Delete a cache security group. CLI example:: salt myminion boto_elasticache.delete_cache_security_group myelasticachesg 'My Cache Security Group' ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) deleted = conn.delete_cache_security_group(name) if deleted: log.info('Deleted cache security group %s.', name) return True else: msg = 'Failed to delete cache security group {0}.'.format(name) log.error(msg) return False def authorize_cache_security_group_ingress(name, ec2_security_group_name, ec2_security_group_owner_id, region=None, key=None, keyid=None, profile=None): ''' Authorize network ingress from an ec2 security group to a cache security group. CLI example:: salt myminion boto_elasticache.authorize_cache_security_group_ingress myelasticachesg myec2sg 879879 ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: added = conn.authorize_cache_security_group_ingress( name, ec2_security_group_name, ec2_security_group_owner_id) if added: msg = 'Added {0} to cache security group {1}.' msg = msg.format(name, ec2_security_group_name) log.info(msg) return True else: msg = 'Failed to add {0} to cache security group {1}.' msg = msg.format(name, ec2_security_group_name) log.error(msg) return False except boto.exception.EC2ResponseError as e: log.debug(e) msg = 'Failed to add {0} to cache security group {1}.' msg = msg.format(name, ec2_security_group_name) log.error(msg) return False def revoke_cache_security_group_ingress(name, ec2_security_group_name, ec2_security_group_owner_id, region=None, key=None, keyid=None, profile=None): ''' Revoke network ingress from an ec2 security group to a cache security group. CLI example:: salt myminion boto_elasticache.revoke_cache_security_group_ingress myelasticachesg myec2sg 879879 ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: removed = conn.revoke_cache_security_group_ingress( name, ec2_security_group_name, ec2_security_group_owner_id) if removed: msg = 'Removed {0} from cache security group {1}.' msg = msg.format(name, ec2_security_group_name) log.info(msg) return True else: msg = 'Failed to remove {0} from cache security group {1}.' msg = msg.format(name, ec2_security_group_name) log.error(msg) return False except boto.exception.EC2ResponseError as e: log.debug(e) msg = 'Failed to remove {0} from cache security group {1}.' msg = msg.format(name, ec2_security_group_name) log.error(msg) return False
saltstack/salt
salt/modules/boto_elasticache.py
subnet_group_exists
python
def subnet_group_exists(name, tags=None, region=None, key=None, keyid=None, profile=None): ''' Check to see if an ElastiCache subnet group exists. CLI example:: salt myminion boto_elasticache.subnet_group_exists my-param-group \ region=us-east-1 ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return False try: ec = conn.describe_cache_subnet_groups(cache_subnet_group_name=name) if not ec: msg = ('ElastiCache subnet group does not exist in region {0}'.format(region)) log.debug(msg) return False return True except boto.exception.BotoServerError as e: log.debug(e) return False
Check to see if an ElastiCache subnet group exists. CLI example:: salt myminion boto_elasticache.subnet_group_exists my-param-group \ region=us-east-1
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/boto_elasticache.py#L403-L424
null
# -*- coding: utf-8 -*- ''' Connection module for Amazon Elasticache .. versionadded:: 2014.7.0 :configuration: This module accepts explicit elasticache credentials but can also utilize IAM roles assigned to the instance through Instance Profiles. Dynamic credentials are then automatically obtained from AWS API and no further configuration is necessary. More Information available at: .. code-block:: text http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html If IAM roles are not used you need to specify them either in a pillar or in the minion's config file: .. code-block:: yaml elasticache.keyid: GKTADJGHEIQSXMKKRBJ08H elasticache.key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs A region may also be specified in the configuration: .. code-block:: yaml elasticache.region: us-east-1 If a region is not specified, the default is us-east-1. It's also possible to specify key, keyid and region via a profile, either as a passed in dict, or as a string to pull from pillars or minion config: .. code-block:: yaml myprofile: keyid: GKTADJGHEIQSXMKKRBJ08H key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs region: us-east-1 :depends: boto ''' # keep lint from choking on _get_conn and _cache_id #pylint: disable=E0602 from __future__ import absolute_import, print_function, unicode_literals # Import Python libs import logging import time # Import Salt libs from salt.ext import six from salt.exceptions import SaltInvocationError import salt.utils.odict as odict import salt.utils.versions log = logging.getLogger(__name__) # Import third party libs try: # pylint: disable=unused-import import boto import boto.elasticache # pylint: enable=unused-import import boto.utils logging.getLogger('boto').setLevel(logging.CRITICAL) HAS_BOTO = True except ImportError: HAS_BOTO = False def __virtual__(): ''' Only load if boto libraries exist. ''' has_boto_reqs = salt.utils.versions.check_boto_reqs( check_boto3=False ) if has_boto_reqs is True: __utils__['boto.assign_funcs'](__name__, 'elasticache', pack=__salt__) return has_boto_reqs def exists(name, region=None, key=None, keyid=None, profile=None): ''' Check to see if a cache cluster exists. CLI example:: salt myminion boto_elasticache.exists myelasticache ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: conn.describe_cache_clusters(name) return True except boto.exception.BotoServerError as e: log.debug(e) return False def group_exists(name, region=None, key=None, keyid=None, profile=None): ''' Check to see if a replication group exists. CLI example:: salt myminion boto_elasticache.group_exists myelasticache ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: conn.describe_replication_groups(name) return True except boto.exception.BotoServerError as e: log.debug(e) return False def create_replication_group(name, primary_cluster_id, replication_group_description, wait=None, region=None, key=None, keyid=None, profile=None): ''' Create replication group. CLI example:: salt myminion boto_elasticache.create_replication_group myelasticache myprimarycluster description ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return None try: cc = conn.create_replication_group(name, primary_cluster_id, replication_group_description) if not wait: log.info('Created cache cluster %s.', name) return True while True: time.sleep(3) config = describe_replication_group(name, region, key, keyid, profile) if not config: return True if config['status'] == 'available': return True except boto.exception.BotoServerError as e: msg = 'Failed to create replication group {0}.'.format(name) log.error(msg) log.debug(e) return {} def delete_replication_group(name, region=None, key=None, keyid=None, profile=None): ''' Delete an ElastiCache replication group. CLI example:: salt myminion boto_elasticache.delete_replication_group my-replication-group \ region=us-east-1 ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return False try: conn.delete_replication_group(name) msg = 'Deleted ElastiCache replication group {0}.'.format(name) log.info(msg) return True except boto.exception.BotoServerError as e: log.debug(e) msg = 'Failed to delete ElastiCache replication group {0}'.format(name) log.error(msg) return False def describe_replication_group(name, region=None, key=None, keyid=None, profile=None, parameter=None): ''' Get replication group information. CLI example:: salt myminion boto_elasticache.describe_replication_group mygroup ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return None try: cc = conn.describe_replication_groups(name) except boto.exception.BotoServerError as e: msg = 'Failed to get config for cache cluster {0}.'.format(name) log.error(msg) log.debug(e) return {} ret = odict.OrderedDict() cc = cc['DescribeReplicationGroupsResponse']['DescribeReplicationGroupsResult'] cc = cc['ReplicationGroups'][0] attrs = ['status', 'description', 'primary_endpoint', 'member_clusters', 'replication_group_id', 'pending_modified_values', 'primary_cluster_id', 'node_groups'] for key, val in six.iteritems(cc): _key = boto.utils.pythonize_name(key) if _key == 'status': if val: ret[_key] = val else: ret[_key] = None if _key == 'description': if val: ret[_key] = val else: ret[_key] = None if _key == 'replication_group_id': if val: ret[_key] = val else: ret[_key] = None if _key == 'member_clusters': if val: ret[_key] = val else: ret[_key] = None if _key == 'node_groups': if val: ret[_key] = val else: ret[_key] = None if _key == 'pending_modified_values': if val: ret[_key] = val else: ret[_key] = None return ret def get_config(name, region=None, key=None, keyid=None, profile=None): ''' Get the configuration for a cache cluster. CLI example:: salt myminion boto_elasticache.get_config myelasticache ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return None try: cc = conn.describe_cache_clusters(name, show_cache_node_info=True) except boto.exception.BotoServerError as e: msg = 'Failed to get config for cache cluster {0}.'.format(name) log.error(msg) log.debug(e) return {} cc = cc['DescribeCacheClustersResponse']['DescribeCacheClustersResult'] cc = cc['CacheClusters'][0] ret = odict.OrderedDict() attrs = ['engine', 'cache_parameter_group', 'cache_cluster_id', 'cache_security_groups', 'replication_group_id', 'auto_minor_version_upgrade', 'num_cache_nodes', 'preferred_availability_zone', 'security_groups', 'cache_subnet_group_name', 'engine_version', 'cache_node_type', 'notification_configuration', 'preferred_maintenance_window', 'configuration_endpoint', 'cache_cluster_status', 'cache_nodes'] for key, val in six.iteritems(cc): _key = boto.utils.pythonize_name(key) if _key not in attrs: continue if _key == 'cache_parameter_group': if val: ret[_key] = val['CacheParameterGroupName'] else: ret[_key] = None elif _key == 'cache_nodes': if val: ret[_key] = [k for k in val] else: ret[_key] = [] elif _key == 'cache_security_groups': if val: ret[_key] = [k['CacheSecurityGroupName'] for k in val] else: ret[_key] = [] elif _key == 'configuration_endpoint': if val: ret['port'] = val['Port'] ret['address'] = val['Address'] else: ret['port'] = None ret['address'] = None elif _key == 'notification_configuration': if val: ret['notification_topic_arn'] = val['TopicArn'] else: ret['notification_topic_arn'] = None else: ret[_key] = val return ret def get_node_host(name, region=None, key=None, keyid=None, profile=None): ''' Get hostname from cache node CLI example:: salt myminion boto_elasticache.get_node_host myelasticache ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return None try: cc = conn.describe_cache_clusters(name, show_cache_node_info=True) except boto.exception.BotoServerError as e: msg = 'Failed to get config for cache cluster {0}.'.format(name) log.error(msg) log.debug(e) return {} cc = cc['DescribeCacheClustersResponse']['DescribeCacheClustersResult'] host = cc['CacheClusters'][0]['CacheNodes'][0]['Endpoint']['Address'] return host def get_group_host(name, region=None, key=None, keyid=None, profile=None): ''' Get hostname from replication cache group CLI example:: salt myminion boto_elasticache.get_group_host myelasticachegroup ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return None try: cc = conn.describe_replication_groups(name) except boto.exception.BotoServerError as e: msg = 'Failed to get config for cache cluster {0}.'.format(name) log.error(msg) log.debug(e) return {} cc = cc['DescribeReplicationGroupsResponse']['DescribeReplicationGroupsResult'] cc = cc['ReplicationGroups'][0]['NodeGroups'][0]['PrimaryEndpoint'] host = cc['Address'] return host def get_all_cache_subnet_groups(name=None, region=None, key=None, keyid=None, profile=None): ''' Return a list of all cache subnet groups with details CLI example:: salt myminion boto_elasticache.get_all_subnet_groups region=us-east-1 ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: marker = '' groups = [] while marker is not None: ret = conn.describe_cache_subnet_groups(cache_subnet_group_name=name, marker=marker) trimmed = ret.get('DescribeCacheSubnetGroupsResponse', {}).get('DescribeCacheSubnetGroupsResult', {}) groups += trimmed.get('CacheSubnetGroups', []) marker = trimmed.get('Marker', None) if not groups: log.debug('No ElastiCache subnet groups found.') return groups except boto.exception.BotoServerError as e: log.error(e) return [] def list_cache_subnet_groups(name=None, region=None, key=None, keyid=None, profile=None): ''' Return a list of all cache subnet group names CLI example:: salt myminion boto_elasticache.list_subnet_groups region=us-east-1 ''' return [g['CacheSubnetGroupName'] for g in get_all_cache_subnet_groups(name, region, key, keyid, profile)] def create_subnet_group(name, description, subnet_ids=None, subnet_names=None, tags=None, region=None, key=None, keyid=None, profile=None): ''' Create an ElastiCache subnet group CLI example to create an ElastiCache subnet group:: salt myminion boto_elasticache.create_subnet_group my-subnet-group \ "group description" subnet_ids='[subnet-12345678, subnet-87654321]' \ region=us-east-1 ''' if not _exactly_one((subnet_ids, subnet_names)): raise SaltInvocationError("Exactly one of either 'subnet_ids' or " "'subnet_names' must be provided.") conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return False if subnet_group_exists(name, tags, region, key, keyid, profile): return True if subnet_names: subnet_ids = [] for n in subnet_names: r = __salt__['boto_vpc.get_resource_id']('subnet', n, region=region, key=key, keyid=keyid, profile=profile) if 'id' not in r: log.error('Couldn\'t resolve subnet name %s to an ID.', subnet_name) return False subnet_ids += [r['id']] try: ec = conn.create_cache_subnet_group(name, description, subnet_ids) if not ec: msg = 'Failed to create ElastiCache subnet group {0}'.format(name) log.error(msg) return False log.info('Created ElastiCache subnet group %s', name) return True except boto.exception.BotoServerError as e: log.debug(e) msg = 'Failed to create ElastiCache subnet group {0}'.format(name) log.error(msg) return False def get_cache_subnet_group(name, region=None, key=None, keyid=None, profile=None): ''' Get information about a cache subnet group. CLI example:: salt myminion boto_elasticache.get_cache_subnet_group mycache_subnet_group ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: csg = conn.describe_cache_subnet_groups(name) csg = csg['DescribeCacheSubnetGroupsResponse'] csg = csg['DescribeCacheSubnetGroupsResult']['CacheSubnetGroups'][0] except boto.exception.BotoServerError as e: msg = 'Failed to get cache subnet group {0}.'.format(name) log.error(msg) log.debug(e) return False except (IndexError, TypeError, KeyError): msg = 'Failed to get cache subnet group {0} (2).'.format(name) log.error(msg) return False ret = {} for key, val in six.iteritems(csg): if key == 'CacheSubnetGroupName': ret['cache_subnet_group_name'] = val elif key == 'CacheSubnetGroupDescription': ret['cache_subnet_group_description'] = val elif key == 'VpcId': ret['vpc_id'] = val elif key == 'Subnets': ret['subnets'] = [] for subnet in val: _subnet = {} _subnet['subnet_id'] = subnet['SubnetIdentifier'] _az = subnet['SubnetAvailabilityZone']['Name'] _subnet['subnet_availability_zone'] = _az ret['subnets'].append(_subnet) else: ret[key] = val return ret def delete_subnet_group(name, region=None, key=None, keyid=None, profile=None): ''' Delete an ElastiCache subnet group. CLI example:: salt myminion boto_elasticache.delete_subnet_group my-subnet-group \ region=us-east-1 ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return False try: conn.delete_cache_subnet_group(name) msg = 'Deleted ElastiCache subnet group {0}.'.format(name) log.info(msg) return True except boto.exception.BotoServerError as e: log.debug(e) msg = 'Failed to delete ElastiCache subnet group {0}'.format(name) log.error(msg) return False def create(name, num_cache_nodes=None, engine=None, cache_node_type=None, replication_group_id=None, engine_version=None, cache_parameter_group_name=None, cache_subnet_group_name=None, cache_security_group_names=None, security_group_ids=None, snapshot_arns=None, preferred_availability_zone=None, preferred_maintenance_window=None, port=None, notification_topic_arn=None, auto_minor_version_upgrade=None, wait=None, region=None, key=None, keyid=None, profile=None): ''' Create a cache cluster. CLI example:: salt myminion boto_elasticache.create myelasticache 1 redis cache.t1.micro cache_security_group_names='["myelasticachesg"]' ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: conn.create_cache_cluster( name, num_cache_nodes, cache_node_type, engine, replication_group_id, engine_version, cache_parameter_group_name, cache_subnet_group_name, cache_security_group_names, security_group_ids, snapshot_arns, preferred_availability_zone, preferred_maintenance_window, port, notification_topic_arn, auto_minor_version_upgrade) if not wait: log.info('Created cache cluster %s.', name) return True while True: time.sleep(3) config = get_config(name, region, key, keyid, profile) if not config: return True if config['cache_cluster_status'] == 'available': return True log.info('Created cache cluster %s.', name) except boto.exception.BotoServerError as e: msg = 'Failed to create cache cluster {0}.'.format(name) log.error(msg) log.debug(e) return False def delete(name, wait=False, region=None, key=None, keyid=None, profile=None): ''' Delete a cache cluster. CLI example:: salt myminion boto_elasticache.delete myelasticache ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: conn.delete_cache_cluster(name) if not wait: log.info('Deleted cache cluster %s.', name) return True while True: config = get_config(name, region, key, keyid, profile) if not config: return True if config['cache_cluster_status'] == 'deleting': return True time.sleep(2) log.info('Deleted cache cluster %s.', name) return True except boto.exception.BotoServerError as e: msg = 'Failed to delete cache cluster {0}.'.format(name) log.error(msg) log.debug(e) return False def create_cache_security_group(name, description, region=None, key=None, keyid=None, profile=None): ''' Create a cache security group. CLI example:: salt myminion boto_elasticache.create_cache_security_group myelasticachesg 'My Cache Security Group' ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) created = conn.create_cache_security_group(name, description) if created: log.info('Created cache security group %s.', name) return True else: msg = 'Failed to create cache security group {0}.'.format(name) log.error(msg) return False def delete_cache_security_group(name, region=None, key=None, keyid=None, profile=None): ''' Delete a cache security group. CLI example:: salt myminion boto_elasticache.delete_cache_security_group myelasticachesg 'My Cache Security Group' ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) deleted = conn.delete_cache_security_group(name) if deleted: log.info('Deleted cache security group %s.', name) return True else: msg = 'Failed to delete cache security group {0}.'.format(name) log.error(msg) return False def authorize_cache_security_group_ingress(name, ec2_security_group_name, ec2_security_group_owner_id, region=None, key=None, keyid=None, profile=None): ''' Authorize network ingress from an ec2 security group to a cache security group. CLI example:: salt myminion boto_elasticache.authorize_cache_security_group_ingress myelasticachesg myec2sg 879879 ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: added = conn.authorize_cache_security_group_ingress( name, ec2_security_group_name, ec2_security_group_owner_id) if added: msg = 'Added {0} to cache security group {1}.' msg = msg.format(name, ec2_security_group_name) log.info(msg) return True else: msg = 'Failed to add {0} to cache security group {1}.' msg = msg.format(name, ec2_security_group_name) log.error(msg) return False except boto.exception.EC2ResponseError as e: log.debug(e) msg = 'Failed to add {0} to cache security group {1}.' msg = msg.format(name, ec2_security_group_name) log.error(msg) return False def revoke_cache_security_group_ingress(name, ec2_security_group_name, ec2_security_group_owner_id, region=None, key=None, keyid=None, profile=None): ''' Revoke network ingress from an ec2 security group to a cache security group. CLI example:: salt myminion boto_elasticache.revoke_cache_security_group_ingress myelasticachesg myec2sg 879879 ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: removed = conn.revoke_cache_security_group_ingress( name, ec2_security_group_name, ec2_security_group_owner_id) if removed: msg = 'Removed {0} from cache security group {1}.' msg = msg.format(name, ec2_security_group_name) log.info(msg) return True else: msg = 'Failed to remove {0} from cache security group {1}.' msg = msg.format(name, ec2_security_group_name) log.error(msg) return False except boto.exception.EC2ResponseError as e: log.debug(e) msg = 'Failed to remove {0} from cache security group {1}.' msg = msg.format(name, ec2_security_group_name) log.error(msg) return False
saltstack/salt
salt/modules/boto_elasticache.py
create_subnet_group
python
def create_subnet_group(name, description, subnet_ids=None, subnet_names=None, tags=None, region=None, key=None, keyid=None, profile=None): ''' Create an ElastiCache subnet group CLI example to create an ElastiCache subnet group:: salt myminion boto_elasticache.create_subnet_group my-subnet-group \ "group description" subnet_ids='[subnet-12345678, subnet-87654321]' \ region=us-east-1 ''' if not _exactly_one((subnet_ids, subnet_names)): raise SaltInvocationError("Exactly one of either 'subnet_ids' or " "'subnet_names' must be provided.") conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return False if subnet_group_exists(name, tags, region, key, keyid, profile): return True if subnet_names: subnet_ids = [] for n in subnet_names: r = __salt__['boto_vpc.get_resource_id']('subnet', n, region=region, key=key, keyid=keyid, profile=profile) if 'id' not in r: log.error('Couldn\'t resolve subnet name %s to an ID.', subnet_name) return False subnet_ids += [r['id']] try: ec = conn.create_cache_subnet_group(name, description, subnet_ids) if not ec: msg = 'Failed to create ElastiCache subnet group {0}'.format(name) log.error(msg) return False log.info('Created ElastiCache subnet group %s', name) return True except boto.exception.BotoServerError as e: log.debug(e) msg = 'Failed to create ElastiCache subnet group {0}'.format(name) log.error(msg) return False
Create an ElastiCache subnet group CLI example to create an ElastiCache subnet group:: salt myminion boto_elasticache.create_subnet_group my-subnet-group \ "group description" subnet_ids='[subnet-12345678, subnet-87654321]' \ region=us-east-1
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/boto_elasticache.py#L427-L468
[ "def subnet_group_exists(name, tags=None, region=None, key=None, keyid=None, profile=None):\n '''\n Check to see if an ElastiCache subnet group exists.\n\n CLI example::\n\n salt myminion boto_elasticache.subnet_group_exists my-param-group \\\n region=us-east-1\n '''\n conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)\n if not conn:\n return False\n try:\n ec = conn.describe_cache_subnet_groups(cache_subnet_group_name=name)\n if not ec:\n msg = ('ElastiCache subnet group does not exist in region {0}'.format(region))\n log.debug(msg)\n return False\n return True\n except boto.exception.BotoServerError as e:\n log.debug(e)\n return False\n" ]
# -*- coding: utf-8 -*- ''' Connection module for Amazon Elasticache .. versionadded:: 2014.7.0 :configuration: This module accepts explicit elasticache credentials but can also utilize IAM roles assigned to the instance through Instance Profiles. Dynamic credentials are then automatically obtained from AWS API and no further configuration is necessary. More Information available at: .. code-block:: text http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html If IAM roles are not used you need to specify them either in a pillar or in the minion's config file: .. code-block:: yaml elasticache.keyid: GKTADJGHEIQSXMKKRBJ08H elasticache.key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs A region may also be specified in the configuration: .. code-block:: yaml elasticache.region: us-east-1 If a region is not specified, the default is us-east-1. It's also possible to specify key, keyid and region via a profile, either as a passed in dict, or as a string to pull from pillars or minion config: .. code-block:: yaml myprofile: keyid: GKTADJGHEIQSXMKKRBJ08H key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs region: us-east-1 :depends: boto ''' # keep lint from choking on _get_conn and _cache_id #pylint: disable=E0602 from __future__ import absolute_import, print_function, unicode_literals # Import Python libs import logging import time # Import Salt libs from salt.ext import six from salt.exceptions import SaltInvocationError import salt.utils.odict as odict import salt.utils.versions log = logging.getLogger(__name__) # Import third party libs try: # pylint: disable=unused-import import boto import boto.elasticache # pylint: enable=unused-import import boto.utils logging.getLogger('boto').setLevel(logging.CRITICAL) HAS_BOTO = True except ImportError: HAS_BOTO = False def __virtual__(): ''' Only load if boto libraries exist. ''' has_boto_reqs = salt.utils.versions.check_boto_reqs( check_boto3=False ) if has_boto_reqs is True: __utils__['boto.assign_funcs'](__name__, 'elasticache', pack=__salt__) return has_boto_reqs def exists(name, region=None, key=None, keyid=None, profile=None): ''' Check to see if a cache cluster exists. CLI example:: salt myminion boto_elasticache.exists myelasticache ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: conn.describe_cache_clusters(name) return True except boto.exception.BotoServerError as e: log.debug(e) return False def group_exists(name, region=None, key=None, keyid=None, profile=None): ''' Check to see if a replication group exists. CLI example:: salt myminion boto_elasticache.group_exists myelasticache ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: conn.describe_replication_groups(name) return True except boto.exception.BotoServerError as e: log.debug(e) return False def create_replication_group(name, primary_cluster_id, replication_group_description, wait=None, region=None, key=None, keyid=None, profile=None): ''' Create replication group. CLI example:: salt myminion boto_elasticache.create_replication_group myelasticache myprimarycluster description ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return None try: cc = conn.create_replication_group(name, primary_cluster_id, replication_group_description) if not wait: log.info('Created cache cluster %s.', name) return True while True: time.sleep(3) config = describe_replication_group(name, region, key, keyid, profile) if not config: return True if config['status'] == 'available': return True except boto.exception.BotoServerError as e: msg = 'Failed to create replication group {0}.'.format(name) log.error(msg) log.debug(e) return {} def delete_replication_group(name, region=None, key=None, keyid=None, profile=None): ''' Delete an ElastiCache replication group. CLI example:: salt myminion boto_elasticache.delete_replication_group my-replication-group \ region=us-east-1 ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return False try: conn.delete_replication_group(name) msg = 'Deleted ElastiCache replication group {0}.'.format(name) log.info(msg) return True except boto.exception.BotoServerError as e: log.debug(e) msg = 'Failed to delete ElastiCache replication group {0}'.format(name) log.error(msg) return False def describe_replication_group(name, region=None, key=None, keyid=None, profile=None, parameter=None): ''' Get replication group information. CLI example:: salt myminion boto_elasticache.describe_replication_group mygroup ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return None try: cc = conn.describe_replication_groups(name) except boto.exception.BotoServerError as e: msg = 'Failed to get config for cache cluster {0}.'.format(name) log.error(msg) log.debug(e) return {} ret = odict.OrderedDict() cc = cc['DescribeReplicationGroupsResponse']['DescribeReplicationGroupsResult'] cc = cc['ReplicationGroups'][0] attrs = ['status', 'description', 'primary_endpoint', 'member_clusters', 'replication_group_id', 'pending_modified_values', 'primary_cluster_id', 'node_groups'] for key, val in six.iteritems(cc): _key = boto.utils.pythonize_name(key) if _key == 'status': if val: ret[_key] = val else: ret[_key] = None if _key == 'description': if val: ret[_key] = val else: ret[_key] = None if _key == 'replication_group_id': if val: ret[_key] = val else: ret[_key] = None if _key == 'member_clusters': if val: ret[_key] = val else: ret[_key] = None if _key == 'node_groups': if val: ret[_key] = val else: ret[_key] = None if _key == 'pending_modified_values': if val: ret[_key] = val else: ret[_key] = None return ret def get_config(name, region=None, key=None, keyid=None, profile=None): ''' Get the configuration for a cache cluster. CLI example:: salt myminion boto_elasticache.get_config myelasticache ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return None try: cc = conn.describe_cache_clusters(name, show_cache_node_info=True) except boto.exception.BotoServerError as e: msg = 'Failed to get config for cache cluster {0}.'.format(name) log.error(msg) log.debug(e) return {} cc = cc['DescribeCacheClustersResponse']['DescribeCacheClustersResult'] cc = cc['CacheClusters'][0] ret = odict.OrderedDict() attrs = ['engine', 'cache_parameter_group', 'cache_cluster_id', 'cache_security_groups', 'replication_group_id', 'auto_minor_version_upgrade', 'num_cache_nodes', 'preferred_availability_zone', 'security_groups', 'cache_subnet_group_name', 'engine_version', 'cache_node_type', 'notification_configuration', 'preferred_maintenance_window', 'configuration_endpoint', 'cache_cluster_status', 'cache_nodes'] for key, val in six.iteritems(cc): _key = boto.utils.pythonize_name(key) if _key not in attrs: continue if _key == 'cache_parameter_group': if val: ret[_key] = val['CacheParameterGroupName'] else: ret[_key] = None elif _key == 'cache_nodes': if val: ret[_key] = [k for k in val] else: ret[_key] = [] elif _key == 'cache_security_groups': if val: ret[_key] = [k['CacheSecurityGroupName'] for k in val] else: ret[_key] = [] elif _key == 'configuration_endpoint': if val: ret['port'] = val['Port'] ret['address'] = val['Address'] else: ret['port'] = None ret['address'] = None elif _key == 'notification_configuration': if val: ret['notification_topic_arn'] = val['TopicArn'] else: ret['notification_topic_arn'] = None else: ret[_key] = val return ret def get_node_host(name, region=None, key=None, keyid=None, profile=None): ''' Get hostname from cache node CLI example:: salt myminion boto_elasticache.get_node_host myelasticache ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return None try: cc = conn.describe_cache_clusters(name, show_cache_node_info=True) except boto.exception.BotoServerError as e: msg = 'Failed to get config for cache cluster {0}.'.format(name) log.error(msg) log.debug(e) return {} cc = cc['DescribeCacheClustersResponse']['DescribeCacheClustersResult'] host = cc['CacheClusters'][0]['CacheNodes'][0]['Endpoint']['Address'] return host def get_group_host(name, region=None, key=None, keyid=None, profile=None): ''' Get hostname from replication cache group CLI example:: salt myminion boto_elasticache.get_group_host myelasticachegroup ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return None try: cc = conn.describe_replication_groups(name) except boto.exception.BotoServerError as e: msg = 'Failed to get config for cache cluster {0}.'.format(name) log.error(msg) log.debug(e) return {} cc = cc['DescribeReplicationGroupsResponse']['DescribeReplicationGroupsResult'] cc = cc['ReplicationGroups'][0]['NodeGroups'][0]['PrimaryEndpoint'] host = cc['Address'] return host def get_all_cache_subnet_groups(name=None, region=None, key=None, keyid=None, profile=None): ''' Return a list of all cache subnet groups with details CLI example:: salt myminion boto_elasticache.get_all_subnet_groups region=us-east-1 ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: marker = '' groups = [] while marker is not None: ret = conn.describe_cache_subnet_groups(cache_subnet_group_name=name, marker=marker) trimmed = ret.get('DescribeCacheSubnetGroupsResponse', {}).get('DescribeCacheSubnetGroupsResult', {}) groups += trimmed.get('CacheSubnetGroups', []) marker = trimmed.get('Marker', None) if not groups: log.debug('No ElastiCache subnet groups found.') return groups except boto.exception.BotoServerError as e: log.error(e) return [] def list_cache_subnet_groups(name=None, region=None, key=None, keyid=None, profile=None): ''' Return a list of all cache subnet group names CLI example:: salt myminion boto_elasticache.list_subnet_groups region=us-east-1 ''' return [g['CacheSubnetGroupName'] for g in get_all_cache_subnet_groups(name, region, key, keyid, profile)] def subnet_group_exists(name, tags=None, region=None, key=None, keyid=None, profile=None): ''' Check to see if an ElastiCache subnet group exists. CLI example:: salt myminion boto_elasticache.subnet_group_exists my-param-group \ region=us-east-1 ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return False try: ec = conn.describe_cache_subnet_groups(cache_subnet_group_name=name) if not ec: msg = ('ElastiCache subnet group does not exist in region {0}'.format(region)) log.debug(msg) return False return True except boto.exception.BotoServerError as e: log.debug(e) return False def get_cache_subnet_group(name, region=None, key=None, keyid=None, profile=None): ''' Get information about a cache subnet group. CLI example:: salt myminion boto_elasticache.get_cache_subnet_group mycache_subnet_group ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: csg = conn.describe_cache_subnet_groups(name) csg = csg['DescribeCacheSubnetGroupsResponse'] csg = csg['DescribeCacheSubnetGroupsResult']['CacheSubnetGroups'][0] except boto.exception.BotoServerError as e: msg = 'Failed to get cache subnet group {0}.'.format(name) log.error(msg) log.debug(e) return False except (IndexError, TypeError, KeyError): msg = 'Failed to get cache subnet group {0} (2).'.format(name) log.error(msg) return False ret = {} for key, val in six.iteritems(csg): if key == 'CacheSubnetGroupName': ret['cache_subnet_group_name'] = val elif key == 'CacheSubnetGroupDescription': ret['cache_subnet_group_description'] = val elif key == 'VpcId': ret['vpc_id'] = val elif key == 'Subnets': ret['subnets'] = [] for subnet in val: _subnet = {} _subnet['subnet_id'] = subnet['SubnetIdentifier'] _az = subnet['SubnetAvailabilityZone']['Name'] _subnet['subnet_availability_zone'] = _az ret['subnets'].append(_subnet) else: ret[key] = val return ret def delete_subnet_group(name, region=None, key=None, keyid=None, profile=None): ''' Delete an ElastiCache subnet group. CLI example:: salt myminion boto_elasticache.delete_subnet_group my-subnet-group \ region=us-east-1 ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return False try: conn.delete_cache_subnet_group(name) msg = 'Deleted ElastiCache subnet group {0}.'.format(name) log.info(msg) return True except boto.exception.BotoServerError as e: log.debug(e) msg = 'Failed to delete ElastiCache subnet group {0}'.format(name) log.error(msg) return False def create(name, num_cache_nodes=None, engine=None, cache_node_type=None, replication_group_id=None, engine_version=None, cache_parameter_group_name=None, cache_subnet_group_name=None, cache_security_group_names=None, security_group_ids=None, snapshot_arns=None, preferred_availability_zone=None, preferred_maintenance_window=None, port=None, notification_topic_arn=None, auto_minor_version_upgrade=None, wait=None, region=None, key=None, keyid=None, profile=None): ''' Create a cache cluster. CLI example:: salt myminion boto_elasticache.create myelasticache 1 redis cache.t1.micro cache_security_group_names='["myelasticachesg"]' ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: conn.create_cache_cluster( name, num_cache_nodes, cache_node_type, engine, replication_group_id, engine_version, cache_parameter_group_name, cache_subnet_group_name, cache_security_group_names, security_group_ids, snapshot_arns, preferred_availability_zone, preferred_maintenance_window, port, notification_topic_arn, auto_minor_version_upgrade) if not wait: log.info('Created cache cluster %s.', name) return True while True: time.sleep(3) config = get_config(name, region, key, keyid, profile) if not config: return True if config['cache_cluster_status'] == 'available': return True log.info('Created cache cluster %s.', name) except boto.exception.BotoServerError as e: msg = 'Failed to create cache cluster {0}.'.format(name) log.error(msg) log.debug(e) return False def delete(name, wait=False, region=None, key=None, keyid=None, profile=None): ''' Delete a cache cluster. CLI example:: salt myminion boto_elasticache.delete myelasticache ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: conn.delete_cache_cluster(name) if not wait: log.info('Deleted cache cluster %s.', name) return True while True: config = get_config(name, region, key, keyid, profile) if not config: return True if config['cache_cluster_status'] == 'deleting': return True time.sleep(2) log.info('Deleted cache cluster %s.', name) return True except boto.exception.BotoServerError as e: msg = 'Failed to delete cache cluster {0}.'.format(name) log.error(msg) log.debug(e) return False def create_cache_security_group(name, description, region=None, key=None, keyid=None, profile=None): ''' Create a cache security group. CLI example:: salt myminion boto_elasticache.create_cache_security_group myelasticachesg 'My Cache Security Group' ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) created = conn.create_cache_security_group(name, description) if created: log.info('Created cache security group %s.', name) return True else: msg = 'Failed to create cache security group {0}.'.format(name) log.error(msg) return False def delete_cache_security_group(name, region=None, key=None, keyid=None, profile=None): ''' Delete a cache security group. CLI example:: salt myminion boto_elasticache.delete_cache_security_group myelasticachesg 'My Cache Security Group' ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) deleted = conn.delete_cache_security_group(name) if deleted: log.info('Deleted cache security group %s.', name) return True else: msg = 'Failed to delete cache security group {0}.'.format(name) log.error(msg) return False def authorize_cache_security_group_ingress(name, ec2_security_group_name, ec2_security_group_owner_id, region=None, key=None, keyid=None, profile=None): ''' Authorize network ingress from an ec2 security group to a cache security group. CLI example:: salt myminion boto_elasticache.authorize_cache_security_group_ingress myelasticachesg myec2sg 879879 ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: added = conn.authorize_cache_security_group_ingress( name, ec2_security_group_name, ec2_security_group_owner_id) if added: msg = 'Added {0} to cache security group {1}.' msg = msg.format(name, ec2_security_group_name) log.info(msg) return True else: msg = 'Failed to add {0} to cache security group {1}.' msg = msg.format(name, ec2_security_group_name) log.error(msg) return False except boto.exception.EC2ResponseError as e: log.debug(e) msg = 'Failed to add {0} to cache security group {1}.' msg = msg.format(name, ec2_security_group_name) log.error(msg) return False def revoke_cache_security_group_ingress(name, ec2_security_group_name, ec2_security_group_owner_id, region=None, key=None, keyid=None, profile=None): ''' Revoke network ingress from an ec2 security group to a cache security group. CLI example:: salt myminion boto_elasticache.revoke_cache_security_group_ingress myelasticachesg myec2sg 879879 ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: removed = conn.revoke_cache_security_group_ingress( name, ec2_security_group_name, ec2_security_group_owner_id) if removed: msg = 'Removed {0} from cache security group {1}.' msg = msg.format(name, ec2_security_group_name) log.info(msg) return True else: msg = 'Failed to remove {0} from cache security group {1}.' msg = msg.format(name, ec2_security_group_name) log.error(msg) return False except boto.exception.EC2ResponseError as e: log.debug(e) msg = 'Failed to remove {0} from cache security group {1}.' msg = msg.format(name, ec2_security_group_name) log.error(msg) return False
saltstack/salt
salt/modules/boto_elasticache.py
get_cache_subnet_group
python
def get_cache_subnet_group(name, region=None, key=None, keyid=None, profile=None): ''' Get information about a cache subnet group. CLI example:: salt myminion boto_elasticache.get_cache_subnet_group mycache_subnet_group ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: csg = conn.describe_cache_subnet_groups(name) csg = csg['DescribeCacheSubnetGroupsResponse'] csg = csg['DescribeCacheSubnetGroupsResult']['CacheSubnetGroups'][0] except boto.exception.BotoServerError as e: msg = 'Failed to get cache subnet group {0}.'.format(name) log.error(msg) log.debug(e) return False except (IndexError, TypeError, KeyError): msg = 'Failed to get cache subnet group {0} (2).'.format(name) log.error(msg) return False ret = {} for key, val in six.iteritems(csg): if key == 'CacheSubnetGroupName': ret['cache_subnet_group_name'] = val elif key == 'CacheSubnetGroupDescription': ret['cache_subnet_group_description'] = val elif key == 'VpcId': ret['vpc_id'] = val elif key == 'Subnets': ret['subnets'] = [] for subnet in val: _subnet = {} _subnet['subnet_id'] = subnet['SubnetIdentifier'] _az = subnet['SubnetAvailabilityZone']['Name'] _subnet['subnet_availability_zone'] = _az ret['subnets'].append(_subnet) else: ret[key] = val return ret
Get information about a cache subnet group. CLI example:: salt myminion boto_elasticache.get_cache_subnet_group mycache_subnet_group
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/boto_elasticache.py#L471-L513
[ "def iteritems(d, **kw):\n return d.iteritems(**kw)\n" ]
# -*- coding: utf-8 -*- ''' Connection module for Amazon Elasticache .. versionadded:: 2014.7.0 :configuration: This module accepts explicit elasticache credentials but can also utilize IAM roles assigned to the instance through Instance Profiles. Dynamic credentials are then automatically obtained from AWS API and no further configuration is necessary. More Information available at: .. code-block:: text http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html If IAM roles are not used you need to specify them either in a pillar or in the minion's config file: .. code-block:: yaml elasticache.keyid: GKTADJGHEIQSXMKKRBJ08H elasticache.key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs A region may also be specified in the configuration: .. code-block:: yaml elasticache.region: us-east-1 If a region is not specified, the default is us-east-1. It's also possible to specify key, keyid and region via a profile, either as a passed in dict, or as a string to pull from pillars or minion config: .. code-block:: yaml myprofile: keyid: GKTADJGHEIQSXMKKRBJ08H key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs region: us-east-1 :depends: boto ''' # keep lint from choking on _get_conn and _cache_id #pylint: disable=E0602 from __future__ import absolute_import, print_function, unicode_literals # Import Python libs import logging import time # Import Salt libs from salt.ext import six from salt.exceptions import SaltInvocationError import salt.utils.odict as odict import salt.utils.versions log = logging.getLogger(__name__) # Import third party libs try: # pylint: disable=unused-import import boto import boto.elasticache # pylint: enable=unused-import import boto.utils logging.getLogger('boto').setLevel(logging.CRITICAL) HAS_BOTO = True except ImportError: HAS_BOTO = False def __virtual__(): ''' Only load if boto libraries exist. ''' has_boto_reqs = salt.utils.versions.check_boto_reqs( check_boto3=False ) if has_boto_reqs is True: __utils__['boto.assign_funcs'](__name__, 'elasticache', pack=__salt__) return has_boto_reqs def exists(name, region=None, key=None, keyid=None, profile=None): ''' Check to see if a cache cluster exists. CLI example:: salt myminion boto_elasticache.exists myelasticache ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: conn.describe_cache_clusters(name) return True except boto.exception.BotoServerError as e: log.debug(e) return False def group_exists(name, region=None, key=None, keyid=None, profile=None): ''' Check to see if a replication group exists. CLI example:: salt myminion boto_elasticache.group_exists myelasticache ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: conn.describe_replication_groups(name) return True except boto.exception.BotoServerError as e: log.debug(e) return False def create_replication_group(name, primary_cluster_id, replication_group_description, wait=None, region=None, key=None, keyid=None, profile=None): ''' Create replication group. CLI example:: salt myminion boto_elasticache.create_replication_group myelasticache myprimarycluster description ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return None try: cc = conn.create_replication_group(name, primary_cluster_id, replication_group_description) if not wait: log.info('Created cache cluster %s.', name) return True while True: time.sleep(3) config = describe_replication_group(name, region, key, keyid, profile) if not config: return True if config['status'] == 'available': return True except boto.exception.BotoServerError as e: msg = 'Failed to create replication group {0}.'.format(name) log.error(msg) log.debug(e) return {} def delete_replication_group(name, region=None, key=None, keyid=None, profile=None): ''' Delete an ElastiCache replication group. CLI example:: salt myminion boto_elasticache.delete_replication_group my-replication-group \ region=us-east-1 ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return False try: conn.delete_replication_group(name) msg = 'Deleted ElastiCache replication group {0}.'.format(name) log.info(msg) return True except boto.exception.BotoServerError as e: log.debug(e) msg = 'Failed to delete ElastiCache replication group {0}'.format(name) log.error(msg) return False def describe_replication_group(name, region=None, key=None, keyid=None, profile=None, parameter=None): ''' Get replication group information. CLI example:: salt myminion boto_elasticache.describe_replication_group mygroup ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return None try: cc = conn.describe_replication_groups(name) except boto.exception.BotoServerError as e: msg = 'Failed to get config for cache cluster {0}.'.format(name) log.error(msg) log.debug(e) return {} ret = odict.OrderedDict() cc = cc['DescribeReplicationGroupsResponse']['DescribeReplicationGroupsResult'] cc = cc['ReplicationGroups'][0] attrs = ['status', 'description', 'primary_endpoint', 'member_clusters', 'replication_group_id', 'pending_modified_values', 'primary_cluster_id', 'node_groups'] for key, val in six.iteritems(cc): _key = boto.utils.pythonize_name(key) if _key == 'status': if val: ret[_key] = val else: ret[_key] = None if _key == 'description': if val: ret[_key] = val else: ret[_key] = None if _key == 'replication_group_id': if val: ret[_key] = val else: ret[_key] = None if _key == 'member_clusters': if val: ret[_key] = val else: ret[_key] = None if _key == 'node_groups': if val: ret[_key] = val else: ret[_key] = None if _key == 'pending_modified_values': if val: ret[_key] = val else: ret[_key] = None return ret def get_config(name, region=None, key=None, keyid=None, profile=None): ''' Get the configuration for a cache cluster. CLI example:: salt myminion boto_elasticache.get_config myelasticache ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return None try: cc = conn.describe_cache_clusters(name, show_cache_node_info=True) except boto.exception.BotoServerError as e: msg = 'Failed to get config for cache cluster {0}.'.format(name) log.error(msg) log.debug(e) return {} cc = cc['DescribeCacheClustersResponse']['DescribeCacheClustersResult'] cc = cc['CacheClusters'][0] ret = odict.OrderedDict() attrs = ['engine', 'cache_parameter_group', 'cache_cluster_id', 'cache_security_groups', 'replication_group_id', 'auto_minor_version_upgrade', 'num_cache_nodes', 'preferred_availability_zone', 'security_groups', 'cache_subnet_group_name', 'engine_version', 'cache_node_type', 'notification_configuration', 'preferred_maintenance_window', 'configuration_endpoint', 'cache_cluster_status', 'cache_nodes'] for key, val in six.iteritems(cc): _key = boto.utils.pythonize_name(key) if _key not in attrs: continue if _key == 'cache_parameter_group': if val: ret[_key] = val['CacheParameterGroupName'] else: ret[_key] = None elif _key == 'cache_nodes': if val: ret[_key] = [k for k in val] else: ret[_key] = [] elif _key == 'cache_security_groups': if val: ret[_key] = [k['CacheSecurityGroupName'] for k in val] else: ret[_key] = [] elif _key == 'configuration_endpoint': if val: ret['port'] = val['Port'] ret['address'] = val['Address'] else: ret['port'] = None ret['address'] = None elif _key == 'notification_configuration': if val: ret['notification_topic_arn'] = val['TopicArn'] else: ret['notification_topic_arn'] = None else: ret[_key] = val return ret def get_node_host(name, region=None, key=None, keyid=None, profile=None): ''' Get hostname from cache node CLI example:: salt myminion boto_elasticache.get_node_host myelasticache ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return None try: cc = conn.describe_cache_clusters(name, show_cache_node_info=True) except boto.exception.BotoServerError as e: msg = 'Failed to get config for cache cluster {0}.'.format(name) log.error(msg) log.debug(e) return {} cc = cc['DescribeCacheClustersResponse']['DescribeCacheClustersResult'] host = cc['CacheClusters'][0]['CacheNodes'][0]['Endpoint']['Address'] return host def get_group_host(name, region=None, key=None, keyid=None, profile=None): ''' Get hostname from replication cache group CLI example:: salt myminion boto_elasticache.get_group_host myelasticachegroup ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return None try: cc = conn.describe_replication_groups(name) except boto.exception.BotoServerError as e: msg = 'Failed to get config for cache cluster {0}.'.format(name) log.error(msg) log.debug(e) return {} cc = cc['DescribeReplicationGroupsResponse']['DescribeReplicationGroupsResult'] cc = cc['ReplicationGroups'][0]['NodeGroups'][0]['PrimaryEndpoint'] host = cc['Address'] return host def get_all_cache_subnet_groups(name=None, region=None, key=None, keyid=None, profile=None): ''' Return a list of all cache subnet groups with details CLI example:: salt myminion boto_elasticache.get_all_subnet_groups region=us-east-1 ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: marker = '' groups = [] while marker is not None: ret = conn.describe_cache_subnet_groups(cache_subnet_group_name=name, marker=marker) trimmed = ret.get('DescribeCacheSubnetGroupsResponse', {}).get('DescribeCacheSubnetGroupsResult', {}) groups += trimmed.get('CacheSubnetGroups', []) marker = trimmed.get('Marker', None) if not groups: log.debug('No ElastiCache subnet groups found.') return groups except boto.exception.BotoServerError as e: log.error(e) return [] def list_cache_subnet_groups(name=None, region=None, key=None, keyid=None, profile=None): ''' Return a list of all cache subnet group names CLI example:: salt myminion boto_elasticache.list_subnet_groups region=us-east-1 ''' return [g['CacheSubnetGroupName'] for g in get_all_cache_subnet_groups(name, region, key, keyid, profile)] def subnet_group_exists(name, tags=None, region=None, key=None, keyid=None, profile=None): ''' Check to see if an ElastiCache subnet group exists. CLI example:: salt myminion boto_elasticache.subnet_group_exists my-param-group \ region=us-east-1 ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return False try: ec = conn.describe_cache_subnet_groups(cache_subnet_group_name=name) if not ec: msg = ('ElastiCache subnet group does not exist in region {0}'.format(region)) log.debug(msg) return False return True except boto.exception.BotoServerError as e: log.debug(e) return False def create_subnet_group(name, description, subnet_ids=None, subnet_names=None, tags=None, region=None, key=None, keyid=None, profile=None): ''' Create an ElastiCache subnet group CLI example to create an ElastiCache subnet group:: salt myminion boto_elasticache.create_subnet_group my-subnet-group \ "group description" subnet_ids='[subnet-12345678, subnet-87654321]' \ region=us-east-1 ''' if not _exactly_one((subnet_ids, subnet_names)): raise SaltInvocationError("Exactly one of either 'subnet_ids' or " "'subnet_names' must be provided.") conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return False if subnet_group_exists(name, tags, region, key, keyid, profile): return True if subnet_names: subnet_ids = [] for n in subnet_names: r = __salt__['boto_vpc.get_resource_id']('subnet', n, region=region, key=key, keyid=keyid, profile=profile) if 'id' not in r: log.error('Couldn\'t resolve subnet name %s to an ID.', subnet_name) return False subnet_ids += [r['id']] try: ec = conn.create_cache_subnet_group(name, description, subnet_ids) if not ec: msg = 'Failed to create ElastiCache subnet group {0}'.format(name) log.error(msg) return False log.info('Created ElastiCache subnet group %s', name) return True except boto.exception.BotoServerError as e: log.debug(e) msg = 'Failed to create ElastiCache subnet group {0}'.format(name) log.error(msg) return False def delete_subnet_group(name, region=None, key=None, keyid=None, profile=None): ''' Delete an ElastiCache subnet group. CLI example:: salt myminion boto_elasticache.delete_subnet_group my-subnet-group \ region=us-east-1 ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return False try: conn.delete_cache_subnet_group(name) msg = 'Deleted ElastiCache subnet group {0}.'.format(name) log.info(msg) return True except boto.exception.BotoServerError as e: log.debug(e) msg = 'Failed to delete ElastiCache subnet group {0}'.format(name) log.error(msg) return False def create(name, num_cache_nodes=None, engine=None, cache_node_type=None, replication_group_id=None, engine_version=None, cache_parameter_group_name=None, cache_subnet_group_name=None, cache_security_group_names=None, security_group_ids=None, snapshot_arns=None, preferred_availability_zone=None, preferred_maintenance_window=None, port=None, notification_topic_arn=None, auto_minor_version_upgrade=None, wait=None, region=None, key=None, keyid=None, profile=None): ''' Create a cache cluster. CLI example:: salt myminion boto_elasticache.create myelasticache 1 redis cache.t1.micro cache_security_group_names='["myelasticachesg"]' ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: conn.create_cache_cluster( name, num_cache_nodes, cache_node_type, engine, replication_group_id, engine_version, cache_parameter_group_name, cache_subnet_group_name, cache_security_group_names, security_group_ids, snapshot_arns, preferred_availability_zone, preferred_maintenance_window, port, notification_topic_arn, auto_minor_version_upgrade) if not wait: log.info('Created cache cluster %s.', name) return True while True: time.sleep(3) config = get_config(name, region, key, keyid, profile) if not config: return True if config['cache_cluster_status'] == 'available': return True log.info('Created cache cluster %s.', name) except boto.exception.BotoServerError as e: msg = 'Failed to create cache cluster {0}.'.format(name) log.error(msg) log.debug(e) return False def delete(name, wait=False, region=None, key=None, keyid=None, profile=None): ''' Delete a cache cluster. CLI example:: salt myminion boto_elasticache.delete myelasticache ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: conn.delete_cache_cluster(name) if not wait: log.info('Deleted cache cluster %s.', name) return True while True: config = get_config(name, region, key, keyid, profile) if not config: return True if config['cache_cluster_status'] == 'deleting': return True time.sleep(2) log.info('Deleted cache cluster %s.', name) return True except boto.exception.BotoServerError as e: msg = 'Failed to delete cache cluster {0}.'.format(name) log.error(msg) log.debug(e) return False def create_cache_security_group(name, description, region=None, key=None, keyid=None, profile=None): ''' Create a cache security group. CLI example:: salt myminion boto_elasticache.create_cache_security_group myelasticachesg 'My Cache Security Group' ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) created = conn.create_cache_security_group(name, description) if created: log.info('Created cache security group %s.', name) return True else: msg = 'Failed to create cache security group {0}.'.format(name) log.error(msg) return False def delete_cache_security_group(name, region=None, key=None, keyid=None, profile=None): ''' Delete a cache security group. CLI example:: salt myminion boto_elasticache.delete_cache_security_group myelasticachesg 'My Cache Security Group' ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) deleted = conn.delete_cache_security_group(name) if deleted: log.info('Deleted cache security group %s.', name) return True else: msg = 'Failed to delete cache security group {0}.'.format(name) log.error(msg) return False def authorize_cache_security_group_ingress(name, ec2_security_group_name, ec2_security_group_owner_id, region=None, key=None, keyid=None, profile=None): ''' Authorize network ingress from an ec2 security group to a cache security group. CLI example:: salt myminion boto_elasticache.authorize_cache_security_group_ingress myelasticachesg myec2sg 879879 ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: added = conn.authorize_cache_security_group_ingress( name, ec2_security_group_name, ec2_security_group_owner_id) if added: msg = 'Added {0} to cache security group {1}.' msg = msg.format(name, ec2_security_group_name) log.info(msg) return True else: msg = 'Failed to add {0} to cache security group {1}.' msg = msg.format(name, ec2_security_group_name) log.error(msg) return False except boto.exception.EC2ResponseError as e: log.debug(e) msg = 'Failed to add {0} to cache security group {1}.' msg = msg.format(name, ec2_security_group_name) log.error(msg) return False def revoke_cache_security_group_ingress(name, ec2_security_group_name, ec2_security_group_owner_id, region=None, key=None, keyid=None, profile=None): ''' Revoke network ingress from an ec2 security group to a cache security group. CLI example:: salt myminion boto_elasticache.revoke_cache_security_group_ingress myelasticachesg myec2sg 879879 ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: removed = conn.revoke_cache_security_group_ingress( name, ec2_security_group_name, ec2_security_group_owner_id) if removed: msg = 'Removed {0} from cache security group {1}.' msg = msg.format(name, ec2_security_group_name) log.info(msg) return True else: msg = 'Failed to remove {0} from cache security group {1}.' msg = msg.format(name, ec2_security_group_name) log.error(msg) return False except boto.exception.EC2ResponseError as e: log.debug(e) msg = 'Failed to remove {0} from cache security group {1}.' msg = msg.format(name, ec2_security_group_name) log.error(msg) return False
saltstack/salt
salt/modules/boto_elasticache.py
delete_subnet_group
python
def delete_subnet_group(name, region=None, key=None, keyid=None, profile=None): ''' Delete an ElastiCache subnet group. CLI example:: salt myminion boto_elasticache.delete_subnet_group my-subnet-group \ region=us-east-1 ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return False try: conn.delete_cache_subnet_group(name) msg = 'Deleted ElastiCache subnet group {0}.'.format(name) log.info(msg) return True except boto.exception.BotoServerError as e: log.debug(e) msg = 'Failed to delete ElastiCache subnet group {0}'.format(name) log.error(msg) return False
Delete an ElastiCache subnet group. CLI example:: salt myminion boto_elasticache.delete_subnet_group my-subnet-group \ region=us-east-1
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/boto_elasticache.py#L516-L537
null
# -*- coding: utf-8 -*- ''' Connection module for Amazon Elasticache .. versionadded:: 2014.7.0 :configuration: This module accepts explicit elasticache credentials but can also utilize IAM roles assigned to the instance through Instance Profiles. Dynamic credentials are then automatically obtained from AWS API and no further configuration is necessary. More Information available at: .. code-block:: text http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html If IAM roles are not used you need to specify them either in a pillar or in the minion's config file: .. code-block:: yaml elasticache.keyid: GKTADJGHEIQSXMKKRBJ08H elasticache.key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs A region may also be specified in the configuration: .. code-block:: yaml elasticache.region: us-east-1 If a region is not specified, the default is us-east-1. It's also possible to specify key, keyid and region via a profile, either as a passed in dict, or as a string to pull from pillars or minion config: .. code-block:: yaml myprofile: keyid: GKTADJGHEIQSXMKKRBJ08H key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs region: us-east-1 :depends: boto ''' # keep lint from choking on _get_conn and _cache_id #pylint: disable=E0602 from __future__ import absolute_import, print_function, unicode_literals # Import Python libs import logging import time # Import Salt libs from salt.ext import six from salt.exceptions import SaltInvocationError import salt.utils.odict as odict import salt.utils.versions log = logging.getLogger(__name__) # Import third party libs try: # pylint: disable=unused-import import boto import boto.elasticache # pylint: enable=unused-import import boto.utils logging.getLogger('boto').setLevel(logging.CRITICAL) HAS_BOTO = True except ImportError: HAS_BOTO = False def __virtual__(): ''' Only load if boto libraries exist. ''' has_boto_reqs = salt.utils.versions.check_boto_reqs( check_boto3=False ) if has_boto_reqs is True: __utils__['boto.assign_funcs'](__name__, 'elasticache', pack=__salt__) return has_boto_reqs def exists(name, region=None, key=None, keyid=None, profile=None): ''' Check to see if a cache cluster exists. CLI example:: salt myminion boto_elasticache.exists myelasticache ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: conn.describe_cache_clusters(name) return True except boto.exception.BotoServerError as e: log.debug(e) return False def group_exists(name, region=None, key=None, keyid=None, profile=None): ''' Check to see if a replication group exists. CLI example:: salt myminion boto_elasticache.group_exists myelasticache ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: conn.describe_replication_groups(name) return True except boto.exception.BotoServerError as e: log.debug(e) return False def create_replication_group(name, primary_cluster_id, replication_group_description, wait=None, region=None, key=None, keyid=None, profile=None): ''' Create replication group. CLI example:: salt myminion boto_elasticache.create_replication_group myelasticache myprimarycluster description ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return None try: cc = conn.create_replication_group(name, primary_cluster_id, replication_group_description) if not wait: log.info('Created cache cluster %s.', name) return True while True: time.sleep(3) config = describe_replication_group(name, region, key, keyid, profile) if not config: return True if config['status'] == 'available': return True except boto.exception.BotoServerError as e: msg = 'Failed to create replication group {0}.'.format(name) log.error(msg) log.debug(e) return {} def delete_replication_group(name, region=None, key=None, keyid=None, profile=None): ''' Delete an ElastiCache replication group. CLI example:: salt myminion boto_elasticache.delete_replication_group my-replication-group \ region=us-east-1 ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return False try: conn.delete_replication_group(name) msg = 'Deleted ElastiCache replication group {0}.'.format(name) log.info(msg) return True except boto.exception.BotoServerError as e: log.debug(e) msg = 'Failed to delete ElastiCache replication group {0}'.format(name) log.error(msg) return False def describe_replication_group(name, region=None, key=None, keyid=None, profile=None, parameter=None): ''' Get replication group information. CLI example:: salt myminion boto_elasticache.describe_replication_group mygroup ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return None try: cc = conn.describe_replication_groups(name) except boto.exception.BotoServerError as e: msg = 'Failed to get config for cache cluster {0}.'.format(name) log.error(msg) log.debug(e) return {} ret = odict.OrderedDict() cc = cc['DescribeReplicationGroupsResponse']['DescribeReplicationGroupsResult'] cc = cc['ReplicationGroups'][0] attrs = ['status', 'description', 'primary_endpoint', 'member_clusters', 'replication_group_id', 'pending_modified_values', 'primary_cluster_id', 'node_groups'] for key, val in six.iteritems(cc): _key = boto.utils.pythonize_name(key) if _key == 'status': if val: ret[_key] = val else: ret[_key] = None if _key == 'description': if val: ret[_key] = val else: ret[_key] = None if _key == 'replication_group_id': if val: ret[_key] = val else: ret[_key] = None if _key == 'member_clusters': if val: ret[_key] = val else: ret[_key] = None if _key == 'node_groups': if val: ret[_key] = val else: ret[_key] = None if _key == 'pending_modified_values': if val: ret[_key] = val else: ret[_key] = None return ret def get_config(name, region=None, key=None, keyid=None, profile=None): ''' Get the configuration for a cache cluster. CLI example:: salt myminion boto_elasticache.get_config myelasticache ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return None try: cc = conn.describe_cache_clusters(name, show_cache_node_info=True) except boto.exception.BotoServerError as e: msg = 'Failed to get config for cache cluster {0}.'.format(name) log.error(msg) log.debug(e) return {} cc = cc['DescribeCacheClustersResponse']['DescribeCacheClustersResult'] cc = cc['CacheClusters'][0] ret = odict.OrderedDict() attrs = ['engine', 'cache_parameter_group', 'cache_cluster_id', 'cache_security_groups', 'replication_group_id', 'auto_minor_version_upgrade', 'num_cache_nodes', 'preferred_availability_zone', 'security_groups', 'cache_subnet_group_name', 'engine_version', 'cache_node_type', 'notification_configuration', 'preferred_maintenance_window', 'configuration_endpoint', 'cache_cluster_status', 'cache_nodes'] for key, val in six.iteritems(cc): _key = boto.utils.pythonize_name(key) if _key not in attrs: continue if _key == 'cache_parameter_group': if val: ret[_key] = val['CacheParameterGroupName'] else: ret[_key] = None elif _key == 'cache_nodes': if val: ret[_key] = [k for k in val] else: ret[_key] = [] elif _key == 'cache_security_groups': if val: ret[_key] = [k['CacheSecurityGroupName'] for k in val] else: ret[_key] = [] elif _key == 'configuration_endpoint': if val: ret['port'] = val['Port'] ret['address'] = val['Address'] else: ret['port'] = None ret['address'] = None elif _key == 'notification_configuration': if val: ret['notification_topic_arn'] = val['TopicArn'] else: ret['notification_topic_arn'] = None else: ret[_key] = val return ret def get_node_host(name, region=None, key=None, keyid=None, profile=None): ''' Get hostname from cache node CLI example:: salt myminion boto_elasticache.get_node_host myelasticache ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return None try: cc = conn.describe_cache_clusters(name, show_cache_node_info=True) except boto.exception.BotoServerError as e: msg = 'Failed to get config for cache cluster {0}.'.format(name) log.error(msg) log.debug(e) return {} cc = cc['DescribeCacheClustersResponse']['DescribeCacheClustersResult'] host = cc['CacheClusters'][0]['CacheNodes'][0]['Endpoint']['Address'] return host def get_group_host(name, region=None, key=None, keyid=None, profile=None): ''' Get hostname from replication cache group CLI example:: salt myminion boto_elasticache.get_group_host myelasticachegroup ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return None try: cc = conn.describe_replication_groups(name) except boto.exception.BotoServerError as e: msg = 'Failed to get config for cache cluster {0}.'.format(name) log.error(msg) log.debug(e) return {} cc = cc['DescribeReplicationGroupsResponse']['DescribeReplicationGroupsResult'] cc = cc['ReplicationGroups'][0]['NodeGroups'][0]['PrimaryEndpoint'] host = cc['Address'] return host def get_all_cache_subnet_groups(name=None, region=None, key=None, keyid=None, profile=None): ''' Return a list of all cache subnet groups with details CLI example:: salt myminion boto_elasticache.get_all_subnet_groups region=us-east-1 ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: marker = '' groups = [] while marker is not None: ret = conn.describe_cache_subnet_groups(cache_subnet_group_name=name, marker=marker) trimmed = ret.get('DescribeCacheSubnetGroupsResponse', {}).get('DescribeCacheSubnetGroupsResult', {}) groups += trimmed.get('CacheSubnetGroups', []) marker = trimmed.get('Marker', None) if not groups: log.debug('No ElastiCache subnet groups found.') return groups except boto.exception.BotoServerError as e: log.error(e) return [] def list_cache_subnet_groups(name=None, region=None, key=None, keyid=None, profile=None): ''' Return a list of all cache subnet group names CLI example:: salt myminion boto_elasticache.list_subnet_groups region=us-east-1 ''' return [g['CacheSubnetGroupName'] for g in get_all_cache_subnet_groups(name, region, key, keyid, profile)] def subnet_group_exists(name, tags=None, region=None, key=None, keyid=None, profile=None): ''' Check to see if an ElastiCache subnet group exists. CLI example:: salt myminion boto_elasticache.subnet_group_exists my-param-group \ region=us-east-1 ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return False try: ec = conn.describe_cache_subnet_groups(cache_subnet_group_name=name) if not ec: msg = ('ElastiCache subnet group does not exist in region {0}'.format(region)) log.debug(msg) return False return True except boto.exception.BotoServerError as e: log.debug(e) return False def create_subnet_group(name, description, subnet_ids=None, subnet_names=None, tags=None, region=None, key=None, keyid=None, profile=None): ''' Create an ElastiCache subnet group CLI example to create an ElastiCache subnet group:: salt myminion boto_elasticache.create_subnet_group my-subnet-group \ "group description" subnet_ids='[subnet-12345678, subnet-87654321]' \ region=us-east-1 ''' if not _exactly_one((subnet_ids, subnet_names)): raise SaltInvocationError("Exactly one of either 'subnet_ids' or " "'subnet_names' must be provided.") conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return False if subnet_group_exists(name, tags, region, key, keyid, profile): return True if subnet_names: subnet_ids = [] for n in subnet_names: r = __salt__['boto_vpc.get_resource_id']('subnet', n, region=region, key=key, keyid=keyid, profile=profile) if 'id' not in r: log.error('Couldn\'t resolve subnet name %s to an ID.', subnet_name) return False subnet_ids += [r['id']] try: ec = conn.create_cache_subnet_group(name, description, subnet_ids) if not ec: msg = 'Failed to create ElastiCache subnet group {0}'.format(name) log.error(msg) return False log.info('Created ElastiCache subnet group %s', name) return True except boto.exception.BotoServerError as e: log.debug(e) msg = 'Failed to create ElastiCache subnet group {0}'.format(name) log.error(msg) return False def get_cache_subnet_group(name, region=None, key=None, keyid=None, profile=None): ''' Get information about a cache subnet group. CLI example:: salt myminion boto_elasticache.get_cache_subnet_group mycache_subnet_group ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: csg = conn.describe_cache_subnet_groups(name) csg = csg['DescribeCacheSubnetGroupsResponse'] csg = csg['DescribeCacheSubnetGroupsResult']['CacheSubnetGroups'][0] except boto.exception.BotoServerError as e: msg = 'Failed to get cache subnet group {0}.'.format(name) log.error(msg) log.debug(e) return False except (IndexError, TypeError, KeyError): msg = 'Failed to get cache subnet group {0} (2).'.format(name) log.error(msg) return False ret = {} for key, val in six.iteritems(csg): if key == 'CacheSubnetGroupName': ret['cache_subnet_group_name'] = val elif key == 'CacheSubnetGroupDescription': ret['cache_subnet_group_description'] = val elif key == 'VpcId': ret['vpc_id'] = val elif key == 'Subnets': ret['subnets'] = [] for subnet in val: _subnet = {} _subnet['subnet_id'] = subnet['SubnetIdentifier'] _az = subnet['SubnetAvailabilityZone']['Name'] _subnet['subnet_availability_zone'] = _az ret['subnets'].append(_subnet) else: ret[key] = val return ret def create(name, num_cache_nodes=None, engine=None, cache_node_type=None, replication_group_id=None, engine_version=None, cache_parameter_group_name=None, cache_subnet_group_name=None, cache_security_group_names=None, security_group_ids=None, snapshot_arns=None, preferred_availability_zone=None, preferred_maintenance_window=None, port=None, notification_topic_arn=None, auto_minor_version_upgrade=None, wait=None, region=None, key=None, keyid=None, profile=None): ''' Create a cache cluster. CLI example:: salt myminion boto_elasticache.create myelasticache 1 redis cache.t1.micro cache_security_group_names='["myelasticachesg"]' ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: conn.create_cache_cluster( name, num_cache_nodes, cache_node_type, engine, replication_group_id, engine_version, cache_parameter_group_name, cache_subnet_group_name, cache_security_group_names, security_group_ids, snapshot_arns, preferred_availability_zone, preferred_maintenance_window, port, notification_topic_arn, auto_minor_version_upgrade) if not wait: log.info('Created cache cluster %s.', name) return True while True: time.sleep(3) config = get_config(name, region, key, keyid, profile) if not config: return True if config['cache_cluster_status'] == 'available': return True log.info('Created cache cluster %s.', name) except boto.exception.BotoServerError as e: msg = 'Failed to create cache cluster {0}.'.format(name) log.error(msg) log.debug(e) return False def delete(name, wait=False, region=None, key=None, keyid=None, profile=None): ''' Delete a cache cluster. CLI example:: salt myminion boto_elasticache.delete myelasticache ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: conn.delete_cache_cluster(name) if not wait: log.info('Deleted cache cluster %s.', name) return True while True: config = get_config(name, region, key, keyid, profile) if not config: return True if config['cache_cluster_status'] == 'deleting': return True time.sleep(2) log.info('Deleted cache cluster %s.', name) return True except boto.exception.BotoServerError as e: msg = 'Failed to delete cache cluster {0}.'.format(name) log.error(msg) log.debug(e) return False def create_cache_security_group(name, description, region=None, key=None, keyid=None, profile=None): ''' Create a cache security group. CLI example:: salt myminion boto_elasticache.create_cache_security_group myelasticachesg 'My Cache Security Group' ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) created = conn.create_cache_security_group(name, description) if created: log.info('Created cache security group %s.', name) return True else: msg = 'Failed to create cache security group {0}.'.format(name) log.error(msg) return False def delete_cache_security_group(name, region=None, key=None, keyid=None, profile=None): ''' Delete a cache security group. CLI example:: salt myminion boto_elasticache.delete_cache_security_group myelasticachesg 'My Cache Security Group' ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) deleted = conn.delete_cache_security_group(name) if deleted: log.info('Deleted cache security group %s.', name) return True else: msg = 'Failed to delete cache security group {0}.'.format(name) log.error(msg) return False def authorize_cache_security_group_ingress(name, ec2_security_group_name, ec2_security_group_owner_id, region=None, key=None, keyid=None, profile=None): ''' Authorize network ingress from an ec2 security group to a cache security group. CLI example:: salt myminion boto_elasticache.authorize_cache_security_group_ingress myelasticachesg myec2sg 879879 ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: added = conn.authorize_cache_security_group_ingress( name, ec2_security_group_name, ec2_security_group_owner_id) if added: msg = 'Added {0} to cache security group {1}.' msg = msg.format(name, ec2_security_group_name) log.info(msg) return True else: msg = 'Failed to add {0} to cache security group {1}.' msg = msg.format(name, ec2_security_group_name) log.error(msg) return False except boto.exception.EC2ResponseError as e: log.debug(e) msg = 'Failed to add {0} to cache security group {1}.' msg = msg.format(name, ec2_security_group_name) log.error(msg) return False def revoke_cache_security_group_ingress(name, ec2_security_group_name, ec2_security_group_owner_id, region=None, key=None, keyid=None, profile=None): ''' Revoke network ingress from an ec2 security group to a cache security group. CLI example:: salt myminion boto_elasticache.revoke_cache_security_group_ingress myelasticachesg myec2sg 879879 ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: removed = conn.revoke_cache_security_group_ingress( name, ec2_security_group_name, ec2_security_group_owner_id) if removed: msg = 'Removed {0} from cache security group {1}.' msg = msg.format(name, ec2_security_group_name) log.info(msg) return True else: msg = 'Failed to remove {0} from cache security group {1}.' msg = msg.format(name, ec2_security_group_name) log.error(msg) return False except boto.exception.EC2ResponseError as e: log.debug(e) msg = 'Failed to remove {0} from cache security group {1}.' msg = msg.format(name, ec2_security_group_name) log.error(msg) return False
saltstack/salt
salt/modules/boto_elasticache.py
create
python
def create(name, num_cache_nodes=None, engine=None, cache_node_type=None, replication_group_id=None, engine_version=None, cache_parameter_group_name=None, cache_subnet_group_name=None, cache_security_group_names=None, security_group_ids=None, snapshot_arns=None, preferred_availability_zone=None, preferred_maintenance_window=None, port=None, notification_topic_arn=None, auto_minor_version_upgrade=None, wait=None, region=None, key=None, keyid=None, profile=None): ''' Create a cache cluster. CLI example:: salt myminion boto_elasticache.create myelasticache 1 redis cache.t1.micro cache_security_group_names='["myelasticachesg"]' ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: conn.create_cache_cluster( name, num_cache_nodes, cache_node_type, engine, replication_group_id, engine_version, cache_parameter_group_name, cache_subnet_group_name, cache_security_group_names, security_group_ids, snapshot_arns, preferred_availability_zone, preferred_maintenance_window, port, notification_topic_arn, auto_minor_version_upgrade) if not wait: log.info('Created cache cluster %s.', name) return True while True: time.sleep(3) config = get_config(name, region, key, keyid, profile) if not config: return True if config['cache_cluster_status'] == 'available': return True log.info('Created cache cluster %s.', name) except boto.exception.BotoServerError as e: msg = 'Failed to create cache cluster {0}.'.format(name) log.error(msg) log.debug(e) return False
Create a cache cluster. CLI example:: salt myminion boto_elasticache.create myelasticache 1 redis cache.t1.micro cache_security_group_names='["myelasticachesg"]'
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/boto_elasticache.py#L540-L581
[ "def get_config(name, region=None, key=None, keyid=None, profile=None):\n '''\n Get the configuration for a cache cluster.\n\n CLI example::\n\n salt myminion boto_elasticache.get_config myelasticache\n '''\n conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)\n\n if not conn:\n return None\n try:\n cc = conn.describe_cache_clusters(name,\n show_cache_node_info=True)\n except boto.exception.BotoServerError as e:\n msg = 'Failed to get config for cache cluster {0}.'.format(name)\n log.error(msg)\n log.debug(e)\n return {}\n cc = cc['DescribeCacheClustersResponse']['DescribeCacheClustersResult']\n cc = cc['CacheClusters'][0]\n ret = odict.OrderedDict()\n attrs = ['engine', 'cache_parameter_group', 'cache_cluster_id',\n 'cache_security_groups', 'replication_group_id',\n 'auto_minor_version_upgrade', 'num_cache_nodes',\n 'preferred_availability_zone', 'security_groups',\n 'cache_subnet_group_name', 'engine_version', 'cache_node_type',\n 'notification_configuration', 'preferred_maintenance_window',\n 'configuration_endpoint', 'cache_cluster_status', 'cache_nodes']\n for key, val in six.iteritems(cc):\n _key = boto.utils.pythonize_name(key)\n if _key not in attrs:\n continue\n if _key == 'cache_parameter_group':\n if val:\n ret[_key] = val['CacheParameterGroupName']\n else:\n ret[_key] = None\n elif _key == 'cache_nodes':\n if val:\n ret[_key] = [k for k in val]\n else:\n ret[_key] = []\n elif _key == 'cache_security_groups':\n if val:\n ret[_key] = [k['CacheSecurityGroupName'] for k in val]\n else:\n ret[_key] = []\n elif _key == 'configuration_endpoint':\n if val:\n ret['port'] = val['Port']\n ret['address'] = val['Address']\n else:\n ret['port'] = None\n ret['address'] = None\n elif _key == 'notification_configuration':\n if val:\n ret['notification_topic_arn'] = val['TopicArn']\n else:\n ret['notification_topic_arn'] = None\n else:\n ret[_key] = val\n return ret\n" ]
# -*- coding: utf-8 -*- ''' Connection module for Amazon Elasticache .. versionadded:: 2014.7.0 :configuration: This module accepts explicit elasticache credentials but can also utilize IAM roles assigned to the instance through Instance Profiles. Dynamic credentials are then automatically obtained from AWS API and no further configuration is necessary. More Information available at: .. code-block:: text http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html If IAM roles are not used you need to specify them either in a pillar or in the minion's config file: .. code-block:: yaml elasticache.keyid: GKTADJGHEIQSXMKKRBJ08H elasticache.key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs A region may also be specified in the configuration: .. code-block:: yaml elasticache.region: us-east-1 If a region is not specified, the default is us-east-1. It's also possible to specify key, keyid and region via a profile, either as a passed in dict, or as a string to pull from pillars or minion config: .. code-block:: yaml myprofile: keyid: GKTADJGHEIQSXMKKRBJ08H key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs region: us-east-1 :depends: boto ''' # keep lint from choking on _get_conn and _cache_id #pylint: disable=E0602 from __future__ import absolute_import, print_function, unicode_literals # Import Python libs import logging import time # Import Salt libs from salt.ext import six from salt.exceptions import SaltInvocationError import salt.utils.odict as odict import salt.utils.versions log = logging.getLogger(__name__) # Import third party libs try: # pylint: disable=unused-import import boto import boto.elasticache # pylint: enable=unused-import import boto.utils logging.getLogger('boto').setLevel(logging.CRITICAL) HAS_BOTO = True except ImportError: HAS_BOTO = False def __virtual__(): ''' Only load if boto libraries exist. ''' has_boto_reqs = salt.utils.versions.check_boto_reqs( check_boto3=False ) if has_boto_reqs is True: __utils__['boto.assign_funcs'](__name__, 'elasticache', pack=__salt__) return has_boto_reqs def exists(name, region=None, key=None, keyid=None, profile=None): ''' Check to see if a cache cluster exists. CLI example:: salt myminion boto_elasticache.exists myelasticache ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: conn.describe_cache_clusters(name) return True except boto.exception.BotoServerError as e: log.debug(e) return False def group_exists(name, region=None, key=None, keyid=None, profile=None): ''' Check to see if a replication group exists. CLI example:: salt myminion boto_elasticache.group_exists myelasticache ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: conn.describe_replication_groups(name) return True except boto.exception.BotoServerError as e: log.debug(e) return False def create_replication_group(name, primary_cluster_id, replication_group_description, wait=None, region=None, key=None, keyid=None, profile=None): ''' Create replication group. CLI example:: salt myminion boto_elasticache.create_replication_group myelasticache myprimarycluster description ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return None try: cc = conn.create_replication_group(name, primary_cluster_id, replication_group_description) if not wait: log.info('Created cache cluster %s.', name) return True while True: time.sleep(3) config = describe_replication_group(name, region, key, keyid, profile) if not config: return True if config['status'] == 'available': return True except boto.exception.BotoServerError as e: msg = 'Failed to create replication group {0}.'.format(name) log.error(msg) log.debug(e) return {} def delete_replication_group(name, region=None, key=None, keyid=None, profile=None): ''' Delete an ElastiCache replication group. CLI example:: salt myminion boto_elasticache.delete_replication_group my-replication-group \ region=us-east-1 ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return False try: conn.delete_replication_group(name) msg = 'Deleted ElastiCache replication group {0}.'.format(name) log.info(msg) return True except boto.exception.BotoServerError as e: log.debug(e) msg = 'Failed to delete ElastiCache replication group {0}'.format(name) log.error(msg) return False def describe_replication_group(name, region=None, key=None, keyid=None, profile=None, parameter=None): ''' Get replication group information. CLI example:: salt myminion boto_elasticache.describe_replication_group mygroup ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return None try: cc = conn.describe_replication_groups(name) except boto.exception.BotoServerError as e: msg = 'Failed to get config for cache cluster {0}.'.format(name) log.error(msg) log.debug(e) return {} ret = odict.OrderedDict() cc = cc['DescribeReplicationGroupsResponse']['DescribeReplicationGroupsResult'] cc = cc['ReplicationGroups'][0] attrs = ['status', 'description', 'primary_endpoint', 'member_clusters', 'replication_group_id', 'pending_modified_values', 'primary_cluster_id', 'node_groups'] for key, val in six.iteritems(cc): _key = boto.utils.pythonize_name(key) if _key == 'status': if val: ret[_key] = val else: ret[_key] = None if _key == 'description': if val: ret[_key] = val else: ret[_key] = None if _key == 'replication_group_id': if val: ret[_key] = val else: ret[_key] = None if _key == 'member_clusters': if val: ret[_key] = val else: ret[_key] = None if _key == 'node_groups': if val: ret[_key] = val else: ret[_key] = None if _key == 'pending_modified_values': if val: ret[_key] = val else: ret[_key] = None return ret def get_config(name, region=None, key=None, keyid=None, profile=None): ''' Get the configuration for a cache cluster. CLI example:: salt myminion boto_elasticache.get_config myelasticache ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return None try: cc = conn.describe_cache_clusters(name, show_cache_node_info=True) except boto.exception.BotoServerError as e: msg = 'Failed to get config for cache cluster {0}.'.format(name) log.error(msg) log.debug(e) return {} cc = cc['DescribeCacheClustersResponse']['DescribeCacheClustersResult'] cc = cc['CacheClusters'][0] ret = odict.OrderedDict() attrs = ['engine', 'cache_parameter_group', 'cache_cluster_id', 'cache_security_groups', 'replication_group_id', 'auto_minor_version_upgrade', 'num_cache_nodes', 'preferred_availability_zone', 'security_groups', 'cache_subnet_group_name', 'engine_version', 'cache_node_type', 'notification_configuration', 'preferred_maintenance_window', 'configuration_endpoint', 'cache_cluster_status', 'cache_nodes'] for key, val in six.iteritems(cc): _key = boto.utils.pythonize_name(key) if _key not in attrs: continue if _key == 'cache_parameter_group': if val: ret[_key] = val['CacheParameterGroupName'] else: ret[_key] = None elif _key == 'cache_nodes': if val: ret[_key] = [k for k in val] else: ret[_key] = [] elif _key == 'cache_security_groups': if val: ret[_key] = [k['CacheSecurityGroupName'] for k in val] else: ret[_key] = [] elif _key == 'configuration_endpoint': if val: ret['port'] = val['Port'] ret['address'] = val['Address'] else: ret['port'] = None ret['address'] = None elif _key == 'notification_configuration': if val: ret['notification_topic_arn'] = val['TopicArn'] else: ret['notification_topic_arn'] = None else: ret[_key] = val return ret def get_node_host(name, region=None, key=None, keyid=None, profile=None): ''' Get hostname from cache node CLI example:: salt myminion boto_elasticache.get_node_host myelasticache ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return None try: cc = conn.describe_cache_clusters(name, show_cache_node_info=True) except boto.exception.BotoServerError as e: msg = 'Failed to get config for cache cluster {0}.'.format(name) log.error(msg) log.debug(e) return {} cc = cc['DescribeCacheClustersResponse']['DescribeCacheClustersResult'] host = cc['CacheClusters'][0]['CacheNodes'][0]['Endpoint']['Address'] return host def get_group_host(name, region=None, key=None, keyid=None, profile=None): ''' Get hostname from replication cache group CLI example:: salt myminion boto_elasticache.get_group_host myelasticachegroup ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return None try: cc = conn.describe_replication_groups(name) except boto.exception.BotoServerError as e: msg = 'Failed to get config for cache cluster {0}.'.format(name) log.error(msg) log.debug(e) return {} cc = cc['DescribeReplicationGroupsResponse']['DescribeReplicationGroupsResult'] cc = cc['ReplicationGroups'][0]['NodeGroups'][0]['PrimaryEndpoint'] host = cc['Address'] return host def get_all_cache_subnet_groups(name=None, region=None, key=None, keyid=None, profile=None): ''' Return a list of all cache subnet groups with details CLI example:: salt myminion boto_elasticache.get_all_subnet_groups region=us-east-1 ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: marker = '' groups = [] while marker is not None: ret = conn.describe_cache_subnet_groups(cache_subnet_group_name=name, marker=marker) trimmed = ret.get('DescribeCacheSubnetGroupsResponse', {}).get('DescribeCacheSubnetGroupsResult', {}) groups += trimmed.get('CacheSubnetGroups', []) marker = trimmed.get('Marker', None) if not groups: log.debug('No ElastiCache subnet groups found.') return groups except boto.exception.BotoServerError as e: log.error(e) return [] def list_cache_subnet_groups(name=None, region=None, key=None, keyid=None, profile=None): ''' Return a list of all cache subnet group names CLI example:: salt myminion boto_elasticache.list_subnet_groups region=us-east-1 ''' return [g['CacheSubnetGroupName'] for g in get_all_cache_subnet_groups(name, region, key, keyid, profile)] def subnet_group_exists(name, tags=None, region=None, key=None, keyid=None, profile=None): ''' Check to see if an ElastiCache subnet group exists. CLI example:: salt myminion boto_elasticache.subnet_group_exists my-param-group \ region=us-east-1 ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return False try: ec = conn.describe_cache_subnet_groups(cache_subnet_group_name=name) if not ec: msg = ('ElastiCache subnet group does not exist in region {0}'.format(region)) log.debug(msg) return False return True except boto.exception.BotoServerError as e: log.debug(e) return False def create_subnet_group(name, description, subnet_ids=None, subnet_names=None, tags=None, region=None, key=None, keyid=None, profile=None): ''' Create an ElastiCache subnet group CLI example to create an ElastiCache subnet group:: salt myminion boto_elasticache.create_subnet_group my-subnet-group \ "group description" subnet_ids='[subnet-12345678, subnet-87654321]' \ region=us-east-1 ''' if not _exactly_one((subnet_ids, subnet_names)): raise SaltInvocationError("Exactly one of either 'subnet_ids' or " "'subnet_names' must be provided.") conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return False if subnet_group_exists(name, tags, region, key, keyid, profile): return True if subnet_names: subnet_ids = [] for n in subnet_names: r = __salt__['boto_vpc.get_resource_id']('subnet', n, region=region, key=key, keyid=keyid, profile=profile) if 'id' not in r: log.error('Couldn\'t resolve subnet name %s to an ID.', subnet_name) return False subnet_ids += [r['id']] try: ec = conn.create_cache_subnet_group(name, description, subnet_ids) if not ec: msg = 'Failed to create ElastiCache subnet group {0}'.format(name) log.error(msg) return False log.info('Created ElastiCache subnet group %s', name) return True except boto.exception.BotoServerError as e: log.debug(e) msg = 'Failed to create ElastiCache subnet group {0}'.format(name) log.error(msg) return False def get_cache_subnet_group(name, region=None, key=None, keyid=None, profile=None): ''' Get information about a cache subnet group. CLI example:: salt myminion boto_elasticache.get_cache_subnet_group mycache_subnet_group ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: csg = conn.describe_cache_subnet_groups(name) csg = csg['DescribeCacheSubnetGroupsResponse'] csg = csg['DescribeCacheSubnetGroupsResult']['CacheSubnetGroups'][0] except boto.exception.BotoServerError as e: msg = 'Failed to get cache subnet group {0}.'.format(name) log.error(msg) log.debug(e) return False except (IndexError, TypeError, KeyError): msg = 'Failed to get cache subnet group {0} (2).'.format(name) log.error(msg) return False ret = {} for key, val in six.iteritems(csg): if key == 'CacheSubnetGroupName': ret['cache_subnet_group_name'] = val elif key == 'CacheSubnetGroupDescription': ret['cache_subnet_group_description'] = val elif key == 'VpcId': ret['vpc_id'] = val elif key == 'Subnets': ret['subnets'] = [] for subnet in val: _subnet = {} _subnet['subnet_id'] = subnet['SubnetIdentifier'] _az = subnet['SubnetAvailabilityZone']['Name'] _subnet['subnet_availability_zone'] = _az ret['subnets'].append(_subnet) else: ret[key] = val return ret def delete_subnet_group(name, region=None, key=None, keyid=None, profile=None): ''' Delete an ElastiCache subnet group. CLI example:: salt myminion boto_elasticache.delete_subnet_group my-subnet-group \ region=us-east-1 ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return False try: conn.delete_cache_subnet_group(name) msg = 'Deleted ElastiCache subnet group {0}.'.format(name) log.info(msg) return True except boto.exception.BotoServerError as e: log.debug(e) msg = 'Failed to delete ElastiCache subnet group {0}'.format(name) log.error(msg) return False def delete(name, wait=False, region=None, key=None, keyid=None, profile=None): ''' Delete a cache cluster. CLI example:: salt myminion boto_elasticache.delete myelasticache ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: conn.delete_cache_cluster(name) if not wait: log.info('Deleted cache cluster %s.', name) return True while True: config = get_config(name, region, key, keyid, profile) if not config: return True if config['cache_cluster_status'] == 'deleting': return True time.sleep(2) log.info('Deleted cache cluster %s.', name) return True except boto.exception.BotoServerError as e: msg = 'Failed to delete cache cluster {0}.'.format(name) log.error(msg) log.debug(e) return False def create_cache_security_group(name, description, region=None, key=None, keyid=None, profile=None): ''' Create a cache security group. CLI example:: salt myminion boto_elasticache.create_cache_security_group myelasticachesg 'My Cache Security Group' ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) created = conn.create_cache_security_group(name, description) if created: log.info('Created cache security group %s.', name) return True else: msg = 'Failed to create cache security group {0}.'.format(name) log.error(msg) return False def delete_cache_security_group(name, region=None, key=None, keyid=None, profile=None): ''' Delete a cache security group. CLI example:: salt myminion boto_elasticache.delete_cache_security_group myelasticachesg 'My Cache Security Group' ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) deleted = conn.delete_cache_security_group(name) if deleted: log.info('Deleted cache security group %s.', name) return True else: msg = 'Failed to delete cache security group {0}.'.format(name) log.error(msg) return False def authorize_cache_security_group_ingress(name, ec2_security_group_name, ec2_security_group_owner_id, region=None, key=None, keyid=None, profile=None): ''' Authorize network ingress from an ec2 security group to a cache security group. CLI example:: salt myminion boto_elasticache.authorize_cache_security_group_ingress myelasticachesg myec2sg 879879 ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: added = conn.authorize_cache_security_group_ingress( name, ec2_security_group_name, ec2_security_group_owner_id) if added: msg = 'Added {0} to cache security group {1}.' msg = msg.format(name, ec2_security_group_name) log.info(msg) return True else: msg = 'Failed to add {0} to cache security group {1}.' msg = msg.format(name, ec2_security_group_name) log.error(msg) return False except boto.exception.EC2ResponseError as e: log.debug(e) msg = 'Failed to add {0} to cache security group {1}.' msg = msg.format(name, ec2_security_group_name) log.error(msg) return False def revoke_cache_security_group_ingress(name, ec2_security_group_name, ec2_security_group_owner_id, region=None, key=None, keyid=None, profile=None): ''' Revoke network ingress from an ec2 security group to a cache security group. CLI example:: salt myminion boto_elasticache.revoke_cache_security_group_ingress myelasticachesg myec2sg 879879 ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: removed = conn.revoke_cache_security_group_ingress( name, ec2_security_group_name, ec2_security_group_owner_id) if removed: msg = 'Removed {0} from cache security group {1}.' msg = msg.format(name, ec2_security_group_name) log.info(msg) return True else: msg = 'Failed to remove {0} from cache security group {1}.' msg = msg.format(name, ec2_security_group_name) log.error(msg) return False except boto.exception.EC2ResponseError as e: log.debug(e) msg = 'Failed to remove {0} from cache security group {1}.' msg = msg.format(name, ec2_security_group_name) log.error(msg) return False
saltstack/salt
salt/modules/boto_elasticache.py
delete
python
def delete(name, wait=False, region=None, key=None, keyid=None, profile=None): ''' Delete a cache cluster. CLI example:: salt myminion boto_elasticache.delete myelasticache ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: conn.delete_cache_cluster(name) if not wait: log.info('Deleted cache cluster %s.', name) return True while True: config = get_config(name, region, key, keyid, profile) if not config: return True if config['cache_cluster_status'] == 'deleting': return True time.sleep(2) log.info('Deleted cache cluster %s.', name) return True except boto.exception.BotoServerError as e: msg = 'Failed to delete cache cluster {0}.'.format(name) log.error(msg) log.debug(e) return False
Delete a cache cluster. CLI example:: salt myminion boto_elasticache.delete myelasticache
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/boto_elasticache.py#L584-L612
[ "def get_config(name, region=None, key=None, keyid=None, profile=None):\n '''\n Get the configuration for a cache cluster.\n\n CLI example::\n\n salt myminion boto_elasticache.get_config myelasticache\n '''\n conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)\n\n if not conn:\n return None\n try:\n cc = conn.describe_cache_clusters(name,\n show_cache_node_info=True)\n except boto.exception.BotoServerError as e:\n msg = 'Failed to get config for cache cluster {0}.'.format(name)\n log.error(msg)\n log.debug(e)\n return {}\n cc = cc['DescribeCacheClustersResponse']['DescribeCacheClustersResult']\n cc = cc['CacheClusters'][0]\n ret = odict.OrderedDict()\n attrs = ['engine', 'cache_parameter_group', 'cache_cluster_id',\n 'cache_security_groups', 'replication_group_id',\n 'auto_minor_version_upgrade', 'num_cache_nodes',\n 'preferred_availability_zone', 'security_groups',\n 'cache_subnet_group_name', 'engine_version', 'cache_node_type',\n 'notification_configuration', 'preferred_maintenance_window',\n 'configuration_endpoint', 'cache_cluster_status', 'cache_nodes']\n for key, val in six.iteritems(cc):\n _key = boto.utils.pythonize_name(key)\n if _key not in attrs:\n continue\n if _key == 'cache_parameter_group':\n if val:\n ret[_key] = val['CacheParameterGroupName']\n else:\n ret[_key] = None\n elif _key == 'cache_nodes':\n if val:\n ret[_key] = [k for k in val]\n else:\n ret[_key] = []\n elif _key == 'cache_security_groups':\n if val:\n ret[_key] = [k['CacheSecurityGroupName'] for k in val]\n else:\n ret[_key] = []\n elif _key == 'configuration_endpoint':\n if val:\n ret['port'] = val['Port']\n ret['address'] = val['Address']\n else:\n ret['port'] = None\n ret['address'] = None\n elif _key == 'notification_configuration':\n if val:\n ret['notification_topic_arn'] = val['TopicArn']\n else:\n ret['notification_topic_arn'] = None\n else:\n ret[_key] = val\n return ret\n" ]
# -*- coding: utf-8 -*- ''' Connection module for Amazon Elasticache .. versionadded:: 2014.7.0 :configuration: This module accepts explicit elasticache credentials but can also utilize IAM roles assigned to the instance through Instance Profiles. Dynamic credentials are then automatically obtained from AWS API and no further configuration is necessary. More Information available at: .. code-block:: text http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html If IAM roles are not used you need to specify them either in a pillar or in the minion's config file: .. code-block:: yaml elasticache.keyid: GKTADJGHEIQSXMKKRBJ08H elasticache.key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs A region may also be specified in the configuration: .. code-block:: yaml elasticache.region: us-east-1 If a region is not specified, the default is us-east-1. It's also possible to specify key, keyid and region via a profile, either as a passed in dict, or as a string to pull from pillars or minion config: .. code-block:: yaml myprofile: keyid: GKTADJGHEIQSXMKKRBJ08H key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs region: us-east-1 :depends: boto ''' # keep lint from choking on _get_conn and _cache_id #pylint: disable=E0602 from __future__ import absolute_import, print_function, unicode_literals # Import Python libs import logging import time # Import Salt libs from salt.ext import six from salt.exceptions import SaltInvocationError import salt.utils.odict as odict import salt.utils.versions log = logging.getLogger(__name__) # Import third party libs try: # pylint: disable=unused-import import boto import boto.elasticache # pylint: enable=unused-import import boto.utils logging.getLogger('boto').setLevel(logging.CRITICAL) HAS_BOTO = True except ImportError: HAS_BOTO = False def __virtual__(): ''' Only load if boto libraries exist. ''' has_boto_reqs = salt.utils.versions.check_boto_reqs( check_boto3=False ) if has_boto_reqs is True: __utils__['boto.assign_funcs'](__name__, 'elasticache', pack=__salt__) return has_boto_reqs def exists(name, region=None, key=None, keyid=None, profile=None): ''' Check to see if a cache cluster exists. CLI example:: salt myminion boto_elasticache.exists myelasticache ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: conn.describe_cache_clusters(name) return True except boto.exception.BotoServerError as e: log.debug(e) return False def group_exists(name, region=None, key=None, keyid=None, profile=None): ''' Check to see if a replication group exists. CLI example:: salt myminion boto_elasticache.group_exists myelasticache ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: conn.describe_replication_groups(name) return True except boto.exception.BotoServerError as e: log.debug(e) return False def create_replication_group(name, primary_cluster_id, replication_group_description, wait=None, region=None, key=None, keyid=None, profile=None): ''' Create replication group. CLI example:: salt myminion boto_elasticache.create_replication_group myelasticache myprimarycluster description ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return None try: cc = conn.create_replication_group(name, primary_cluster_id, replication_group_description) if not wait: log.info('Created cache cluster %s.', name) return True while True: time.sleep(3) config = describe_replication_group(name, region, key, keyid, profile) if not config: return True if config['status'] == 'available': return True except boto.exception.BotoServerError as e: msg = 'Failed to create replication group {0}.'.format(name) log.error(msg) log.debug(e) return {} def delete_replication_group(name, region=None, key=None, keyid=None, profile=None): ''' Delete an ElastiCache replication group. CLI example:: salt myminion boto_elasticache.delete_replication_group my-replication-group \ region=us-east-1 ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return False try: conn.delete_replication_group(name) msg = 'Deleted ElastiCache replication group {0}.'.format(name) log.info(msg) return True except boto.exception.BotoServerError as e: log.debug(e) msg = 'Failed to delete ElastiCache replication group {0}'.format(name) log.error(msg) return False def describe_replication_group(name, region=None, key=None, keyid=None, profile=None, parameter=None): ''' Get replication group information. CLI example:: salt myminion boto_elasticache.describe_replication_group mygroup ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return None try: cc = conn.describe_replication_groups(name) except boto.exception.BotoServerError as e: msg = 'Failed to get config for cache cluster {0}.'.format(name) log.error(msg) log.debug(e) return {} ret = odict.OrderedDict() cc = cc['DescribeReplicationGroupsResponse']['DescribeReplicationGroupsResult'] cc = cc['ReplicationGroups'][0] attrs = ['status', 'description', 'primary_endpoint', 'member_clusters', 'replication_group_id', 'pending_modified_values', 'primary_cluster_id', 'node_groups'] for key, val in six.iteritems(cc): _key = boto.utils.pythonize_name(key) if _key == 'status': if val: ret[_key] = val else: ret[_key] = None if _key == 'description': if val: ret[_key] = val else: ret[_key] = None if _key == 'replication_group_id': if val: ret[_key] = val else: ret[_key] = None if _key == 'member_clusters': if val: ret[_key] = val else: ret[_key] = None if _key == 'node_groups': if val: ret[_key] = val else: ret[_key] = None if _key == 'pending_modified_values': if val: ret[_key] = val else: ret[_key] = None return ret def get_config(name, region=None, key=None, keyid=None, profile=None): ''' Get the configuration for a cache cluster. CLI example:: salt myminion boto_elasticache.get_config myelasticache ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return None try: cc = conn.describe_cache_clusters(name, show_cache_node_info=True) except boto.exception.BotoServerError as e: msg = 'Failed to get config for cache cluster {0}.'.format(name) log.error(msg) log.debug(e) return {} cc = cc['DescribeCacheClustersResponse']['DescribeCacheClustersResult'] cc = cc['CacheClusters'][0] ret = odict.OrderedDict() attrs = ['engine', 'cache_parameter_group', 'cache_cluster_id', 'cache_security_groups', 'replication_group_id', 'auto_minor_version_upgrade', 'num_cache_nodes', 'preferred_availability_zone', 'security_groups', 'cache_subnet_group_name', 'engine_version', 'cache_node_type', 'notification_configuration', 'preferred_maintenance_window', 'configuration_endpoint', 'cache_cluster_status', 'cache_nodes'] for key, val in six.iteritems(cc): _key = boto.utils.pythonize_name(key) if _key not in attrs: continue if _key == 'cache_parameter_group': if val: ret[_key] = val['CacheParameterGroupName'] else: ret[_key] = None elif _key == 'cache_nodes': if val: ret[_key] = [k for k in val] else: ret[_key] = [] elif _key == 'cache_security_groups': if val: ret[_key] = [k['CacheSecurityGroupName'] for k in val] else: ret[_key] = [] elif _key == 'configuration_endpoint': if val: ret['port'] = val['Port'] ret['address'] = val['Address'] else: ret['port'] = None ret['address'] = None elif _key == 'notification_configuration': if val: ret['notification_topic_arn'] = val['TopicArn'] else: ret['notification_topic_arn'] = None else: ret[_key] = val return ret def get_node_host(name, region=None, key=None, keyid=None, profile=None): ''' Get hostname from cache node CLI example:: salt myminion boto_elasticache.get_node_host myelasticache ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return None try: cc = conn.describe_cache_clusters(name, show_cache_node_info=True) except boto.exception.BotoServerError as e: msg = 'Failed to get config for cache cluster {0}.'.format(name) log.error(msg) log.debug(e) return {} cc = cc['DescribeCacheClustersResponse']['DescribeCacheClustersResult'] host = cc['CacheClusters'][0]['CacheNodes'][0]['Endpoint']['Address'] return host def get_group_host(name, region=None, key=None, keyid=None, profile=None): ''' Get hostname from replication cache group CLI example:: salt myminion boto_elasticache.get_group_host myelasticachegroup ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return None try: cc = conn.describe_replication_groups(name) except boto.exception.BotoServerError as e: msg = 'Failed to get config for cache cluster {0}.'.format(name) log.error(msg) log.debug(e) return {} cc = cc['DescribeReplicationGroupsResponse']['DescribeReplicationGroupsResult'] cc = cc['ReplicationGroups'][0]['NodeGroups'][0]['PrimaryEndpoint'] host = cc['Address'] return host def get_all_cache_subnet_groups(name=None, region=None, key=None, keyid=None, profile=None): ''' Return a list of all cache subnet groups with details CLI example:: salt myminion boto_elasticache.get_all_subnet_groups region=us-east-1 ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: marker = '' groups = [] while marker is not None: ret = conn.describe_cache_subnet_groups(cache_subnet_group_name=name, marker=marker) trimmed = ret.get('DescribeCacheSubnetGroupsResponse', {}).get('DescribeCacheSubnetGroupsResult', {}) groups += trimmed.get('CacheSubnetGroups', []) marker = trimmed.get('Marker', None) if not groups: log.debug('No ElastiCache subnet groups found.') return groups except boto.exception.BotoServerError as e: log.error(e) return [] def list_cache_subnet_groups(name=None, region=None, key=None, keyid=None, profile=None): ''' Return a list of all cache subnet group names CLI example:: salt myminion boto_elasticache.list_subnet_groups region=us-east-1 ''' return [g['CacheSubnetGroupName'] for g in get_all_cache_subnet_groups(name, region, key, keyid, profile)] def subnet_group_exists(name, tags=None, region=None, key=None, keyid=None, profile=None): ''' Check to see if an ElastiCache subnet group exists. CLI example:: salt myminion boto_elasticache.subnet_group_exists my-param-group \ region=us-east-1 ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return False try: ec = conn.describe_cache_subnet_groups(cache_subnet_group_name=name) if not ec: msg = ('ElastiCache subnet group does not exist in region {0}'.format(region)) log.debug(msg) return False return True except boto.exception.BotoServerError as e: log.debug(e) return False def create_subnet_group(name, description, subnet_ids=None, subnet_names=None, tags=None, region=None, key=None, keyid=None, profile=None): ''' Create an ElastiCache subnet group CLI example to create an ElastiCache subnet group:: salt myminion boto_elasticache.create_subnet_group my-subnet-group \ "group description" subnet_ids='[subnet-12345678, subnet-87654321]' \ region=us-east-1 ''' if not _exactly_one((subnet_ids, subnet_names)): raise SaltInvocationError("Exactly one of either 'subnet_ids' or " "'subnet_names' must be provided.") conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return False if subnet_group_exists(name, tags, region, key, keyid, profile): return True if subnet_names: subnet_ids = [] for n in subnet_names: r = __salt__['boto_vpc.get_resource_id']('subnet', n, region=region, key=key, keyid=keyid, profile=profile) if 'id' not in r: log.error('Couldn\'t resolve subnet name %s to an ID.', subnet_name) return False subnet_ids += [r['id']] try: ec = conn.create_cache_subnet_group(name, description, subnet_ids) if not ec: msg = 'Failed to create ElastiCache subnet group {0}'.format(name) log.error(msg) return False log.info('Created ElastiCache subnet group %s', name) return True except boto.exception.BotoServerError as e: log.debug(e) msg = 'Failed to create ElastiCache subnet group {0}'.format(name) log.error(msg) return False def get_cache_subnet_group(name, region=None, key=None, keyid=None, profile=None): ''' Get information about a cache subnet group. CLI example:: salt myminion boto_elasticache.get_cache_subnet_group mycache_subnet_group ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: csg = conn.describe_cache_subnet_groups(name) csg = csg['DescribeCacheSubnetGroupsResponse'] csg = csg['DescribeCacheSubnetGroupsResult']['CacheSubnetGroups'][0] except boto.exception.BotoServerError as e: msg = 'Failed to get cache subnet group {0}.'.format(name) log.error(msg) log.debug(e) return False except (IndexError, TypeError, KeyError): msg = 'Failed to get cache subnet group {0} (2).'.format(name) log.error(msg) return False ret = {} for key, val in six.iteritems(csg): if key == 'CacheSubnetGroupName': ret['cache_subnet_group_name'] = val elif key == 'CacheSubnetGroupDescription': ret['cache_subnet_group_description'] = val elif key == 'VpcId': ret['vpc_id'] = val elif key == 'Subnets': ret['subnets'] = [] for subnet in val: _subnet = {} _subnet['subnet_id'] = subnet['SubnetIdentifier'] _az = subnet['SubnetAvailabilityZone']['Name'] _subnet['subnet_availability_zone'] = _az ret['subnets'].append(_subnet) else: ret[key] = val return ret def delete_subnet_group(name, region=None, key=None, keyid=None, profile=None): ''' Delete an ElastiCache subnet group. CLI example:: salt myminion boto_elasticache.delete_subnet_group my-subnet-group \ region=us-east-1 ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return False try: conn.delete_cache_subnet_group(name) msg = 'Deleted ElastiCache subnet group {0}.'.format(name) log.info(msg) return True except boto.exception.BotoServerError as e: log.debug(e) msg = 'Failed to delete ElastiCache subnet group {0}'.format(name) log.error(msg) return False def create(name, num_cache_nodes=None, engine=None, cache_node_type=None, replication_group_id=None, engine_version=None, cache_parameter_group_name=None, cache_subnet_group_name=None, cache_security_group_names=None, security_group_ids=None, snapshot_arns=None, preferred_availability_zone=None, preferred_maintenance_window=None, port=None, notification_topic_arn=None, auto_minor_version_upgrade=None, wait=None, region=None, key=None, keyid=None, profile=None): ''' Create a cache cluster. CLI example:: salt myminion boto_elasticache.create myelasticache 1 redis cache.t1.micro cache_security_group_names='["myelasticachesg"]' ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: conn.create_cache_cluster( name, num_cache_nodes, cache_node_type, engine, replication_group_id, engine_version, cache_parameter_group_name, cache_subnet_group_name, cache_security_group_names, security_group_ids, snapshot_arns, preferred_availability_zone, preferred_maintenance_window, port, notification_topic_arn, auto_minor_version_upgrade) if not wait: log.info('Created cache cluster %s.', name) return True while True: time.sleep(3) config = get_config(name, region, key, keyid, profile) if not config: return True if config['cache_cluster_status'] == 'available': return True log.info('Created cache cluster %s.', name) except boto.exception.BotoServerError as e: msg = 'Failed to create cache cluster {0}.'.format(name) log.error(msg) log.debug(e) return False def create_cache_security_group(name, description, region=None, key=None, keyid=None, profile=None): ''' Create a cache security group. CLI example:: salt myminion boto_elasticache.create_cache_security_group myelasticachesg 'My Cache Security Group' ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) created = conn.create_cache_security_group(name, description) if created: log.info('Created cache security group %s.', name) return True else: msg = 'Failed to create cache security group {0}.'.format(name) log.error(msg) return False def delete_cache_security_group(name, region=None, key=None, keyid=None, profile=None): ''' Delete a cache security group. CLI example:: salt myminion boto_elasticache.delete_cache_security_group myelasticachesg 'My Cache Security Group' ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) deleted = conn.delete_cache_security_group(name) if deleted: log.info('Deleted cache security group %s.', name) return True else: msg = 'Failed to delete cache security group {0}.'.format(name) log.error(msg) return False def authorize_cache_security_group_ingress(name, ec2_security_group_name, ec2_security_group_owner_id, region=None, key=None, keyid=None, profile=None): ''' Authorize network ingress from an ec2 security group to a cache security group. CLI example:: salt myminion boto_elasticache.authorize_cache_security_group_ingress myelasticachesg myec2sg 879879 ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: added = conn.authorize_cache_security_group_ingress( name, ec2_security_group_name, ec2_security_group_owner_id) if added: msg = 'Added {0} to cache security group {1}.' msg = msg.format(name, ec2_security_group_name) log.info(msg) return True else: msg = 'Failed to add {0} to cache security group {1}.' msg = msg.format(name, ec2_security_group_name) log.error(msg) return False except boto.exception.EC2ResponseError as e: log.debug(e) msg = 'Failed to add {0} to cache security group {1}.' msg = msg.format(name, ec2_security_group_name) log.error(msg) return False def revoke_cache_security_group_ingress(name, ec2_security_group_name, ec2_security_group_owner_id, region=None, key=None, keyid=None, profile=None): ''' Revoke network ingress from an ec2 security group to a cache security group. CLI example:: salt myminion boto_elasticache.revoke_cache_security_group_ingress myelasticachesg myec2sg 879879 ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: removed = conn.revoke_cache_security_group_ingress( name, ec2_security_group_name, ec2_security_group_owner_id) if removed: msg = 'Removed {0} from cache security group {1}.' msg = msg.format(name, ec2_security_group_name) log.info(msg) return True else: msg = 'Failed to remove {0} from cache security group {1}.' msg = msg.format(name, ec2_security_group_name) log.error(msg) return False except boto.exception.EC2ResponseError as e: log.debug(e) msg = 'Failed to remove {0} from cache security group {1}.' msg = msg.format(name, ec2_security_group_name) log.error(msg) return False
saltstack/salt
salt/modules/boto_elasticache.py
create_cache_security_group
python
def create_cache_security_group(name, description, region=None, key=None, keyid=None, profile=None): ''' Create a cache security group. CLI example:: salt myminion boto_elasticache.create_cache_security_group myelasticachesg 'My Cache Security Group' ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) created = conn.create_cache_security_group(name, description) if created: log.info('Created cache security group %s.', name) return True else: msg = 'Failed to create cache security group {0}.'.format(name) log.error(msg) return False
Create a cache security group. CLI example:: salt myminion boto_elasticache.create_cache_security_group myelasticachesg 'My Cache Security Group'
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/boto_elasticache.py#L615-L633
null
# -*- coding: utf-8 -*- ''' Connection module for Amazon Elasticache .. versionadded:: 2014.7.0 :configuration: This module accepts explicit elasticache credentials but can also utilize IAM roles assigned to the instance through Instance Profiles. Dynamic credentials are then automatically obtained from AWS API and no further configuration is necessary. More Information available at: .. code-block:: text http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html If IAM roles are not used you need to specify them either in a pillar or in the minion's config file: .. code-block:: yaml elasticache.keyid: GKTADJGHEIQSXMKKRBJ08H elasticache.key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs A region may also be specified in the configuration: .. code-block:: yaml elasticache.region: us-east-1 If a region is not specified, the default is us-east-1. It's also possible to specify key, keyid and region via a profile, either as a passed in dict, or as a string to pull from pillars or minion config: .. code-block:: yaml myprofile: keyid: GKTADJGHEIQSXMKKRBJ08H key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs region: us-east-1 :depends: boto ''' # keep lint from choking on _get_conn and _cache_id #pylint: disable=E0602 from __future__ import absolute_import, print_function, unicode_literals # Import Python libs import logging import time # Import Salt libs from salt.ext import six from salt.exceptions import SaltInvocationError import salt.utils.odict as odict import salt.utils.versions log = logging.getLogger(__name__) # Import third party libs try: # pylint: disable=unused-import import boto import boto.elasticache # pylint: enable=unused-import import boto.utils logging.getLogger('boto').setLevel(logging.CRITICAL) HAS_BOTO = True except ImportError: HAS_BOTO = False def __virtual__(): ''' Only load if boto libraries exist. ''' has_boto_reqs = salt.utils.versions.check_boto_reqs( check_boto3=False ) if has_boto_reqs is True: __utils__['boto.assign_funcs'](__name__, 'elasticache', pack=__salt__) return has_boto_reqs def exists(name, region=None, key=None, keyid=None, profile=None): ''' Check to see if a cache cluster exists. CLI example:: salt myminion boto_elasticache.exists myelasticache ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: conn.describe_cache_clusters(name) return True except boto.exception.BotoServerError as e: log.debug(e) return False def group_exists(name, region=None, key=None, keyid=None, profile=None): ''' Check to see if a replication group exists. CLI example:: salt myminion boto_elasticache.group_exists myelasticache ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: conn.describe_replication_groups(name) return True except boto.exception.BotoServerError as e: log.debug(e) return False def create_replication_group(name, primary_cluster_id, replication_group_description, wait=None, region=None, key=None, keyid=None, profile=None): ''' Create replication group. CLI example:: salt myminion boto_elasticache.create_replication_group myelasticache myprimarycluster description ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return None try: cc = conn.create_replication_group(name, primary_cluster_id, replication_group_description) if not wait: log.info('Created cache cluster %s.', name) return True while True: time.sleep(3) config = describe_replication_group(name, region, key, keyid, profile) if not config: return True if config['status'] == 'available': return True except boto.exception.BotoServerError as e: msg = 'Failed to create replication group {0}.'.format(name) log.error(msg) log.debug(e) return {} def delete_replication_group(name, region=None, key=None, keyid=None, profile=None): ''' Delete an ElastiCache replication group. CLI example:: salt myminion boto_elasticache.delete_replication_group my-replication-group \ region=us-east-1 ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return False try: conn.delete_replication_group(name) msg = 'Deleted ElastiCache replication group {0}.'.format(name) log.info(msg) return True except boto.exception.BotoServerError as e: log.debug(e) msg = 'Failed to delete ElastiCache replication group {0}'.format(name) log.error(msg) return False def describe_replication_group(name, region=None, key=None, keyid=None, profile=None, parameter=None): ''' Get replication group information. CLI example:: salt myminion boto_elasticache.describe_replication_group mygroup ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return None try: cc = conn.describe_replication_groups(name) except boto.exception.BotoServerError as e: msg = 'Failed to get config for cache cluster {0}.'.format(name) log.error(msg) log.debug(e) return {} ret = odict.OrderedDict() cc = cc['DescribeReplicationGroupsResponse']['DescribeReplicationGroupsResult'] cc = cc['ReplicationGroups'][0] attrs = ['status', 'description', 'primary_endpoint', 'member_clusters', 'replication_group_id', 'pending_modified_values', 'primary_cluster_id', 'node_groups'] for key, val in six.iteritems(cc): _key = boto.utils.pythonize_name(key) if _key == 'status': if val: ret[_key] = val else: ret[_key] = None if _key == 'description': if val: ret[_key] = val else: ret[_key] = None if _key == 'replication_group_id': if val: ret[_key] = val else: ret[_key] = None if _key == 'member_clusters': if val: ret[_key] = val else: ret[_key] = None if _key == 'node_groups': if val: ret[_key] = val else: ret[_key] = None if _key == 'pending_modified_values': if val: ret[_key] = val else: ret[_key] = None return ret def get_config(name, region=None, key=None, keyid=None, profile=None): ''' Get the configuration for a cache cluster. CLI example:: salt myminion boto_elasticache.get_config myelasticache ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return None try: cc = conn.describe_cache_clusters(name, show_cache_node_info=True) except boto.exception.BotoServerError as e: msg = 'Failed to get config for cache cluster {0}.'.format(name) log.error(msg) log.debug(e) return {} cc = cc['DescribeCacheClustersResponse']['DescribeCacheClustersResult'] cc = cc['CacheClusters'][0] ret = odict.OrderedDict() attrs = ['engine', 'cache_parameter_group', 'cache_cluster_id', 'cache_security_groups', 'replication_group_id', 'auto_minor_version_upgrade', 'num_cache_nodes', 'preferred_availability_zone', 'security_groups', 'cache_subnet_group_name', 'engine_version', 'cache_node_type', 'notification_configuration', 'preferred_maintenance_window', 'configuration_endpoint', 'cache_cluster_status', 'cache_nodes'] for key, val in six.iteritems(cc): _key = boto.utils.pythonize_name(key) if _key not in attrs: continue if _key == 'cache_parameter_group': if val: ret[_key] = val['CacheParameterGroupName'] else: ret[_key] = None elif _key == 'cache_nodes': if val: ret[_key] = [k for k in val] else: ret[_key] = [] elif _key == 'cache_security_groups': if val: ret[_key] = [k['CacheSecurityGroupName'] for k in val] else: ret[_key] = [] elif _key == 'configuration_endpoint': if val: ret['port'] = val['Port'] ret['address'] = val['Address'] else: ret['port'] = None ret['address'] = None elif _key == 'notification_configuration': if val: ret['notification_topic_arn'] = val['TopicArn'] else: ret['notification_topic_arn'] = None else: ret[_key] = val return ret def get_node_host(name, region=None, key=None, keyid=None, profile=None): ''' Get hostname from cache node CLI example:: salt myminion boto_elasticache.get_node_host myelasticache ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return None try: cc = conn.describe_cache_clusters(name, show_cache_node_info=True) except boto.exception.BotoServerError as e: msg = 'Failed to get config for cache cluster {0}.'.format(name) log.error(msg) log.debug(e) return {} cc = cc['DescribeCacheClustersResponse']['DescribeCacheClustersResult'] host = cc['CacheClusters'][0]['CacheNodes'][0]['Endpoint']['Address'] return host def get_group_host(name, region=None, key=None, keyid=None, profile=None): ''' Get hostname from replication cache group CLI example:: salt myminion boto_elasticache.get_group_host myelasticachegroup ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return None try: cc = conn.describe_replication_groups(name) except boto.exception.BotoServerError as e: msg = 'Failed to get config for cache cluster {0}.'.format(name) log.error(msg) log.debug(e) return {} cc = cc['DescribeReplicationGroupsResponse']['DescribeReplicationGroupsResult'] cc = cc['ReplicationGroups'][0]['NodeGroups'][0]['PrimaryEndpoint'] host = cc['Address'] return host def get_all_cache_subnet_groups(name=None, region=None, key=None, keyid=None, profile=None): ''' Return a list of all cache subnet groups with details CLI example:: salt myminion boto_elasticache.get_all_subnet_groups region=us-east-1 ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: marker = '' groups = [] while marker is not None: ret = conn.describe_cache_subnet_groups(cache_subnet_group_name=name, marker=marker) trimmed = ret.get('DescribeCacheSubnetGroupsResponse', {}).get('DescribeCacheSubnetGroupsResult', {}) groups += trimmed.get('CacheSubnetGroups', []) marker = trimmed.get('Marker', None) if not groups: log.debug('No ElastiCache subnet groups found.') return groups except boto.exception.BotoServerError as e: log.error(e) return [] def list_cache_subnet_groups(name=None, region=None, key=None, keyid=None, profile=None): ''' Return a list of all cache subnet group names CLI example:: salt myminion boto_elasticache.list_subnet_groups region=us-east-1 ''' return [g['CacheSubnetGroupName'] for g in get_all_cache_subnet_groups(name, region, key, keyid, profile)] def subnet_group_exists(name, tags=None, region=None, key=None, keyid=None, profile=None): ''' Check to see if an ElastiCache subnet group exists. CLI example:: salt myminion boto_elasticache.subnet_group_exists my-param-group \ region=us-east-1 ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return False try: ec = conn.describe_cache_subnet_groups(cache_subnet_group_name=name) if not ec: msg = ('ElastiCache subnet group does not exist in region {0}'.format(region)) log.debug(msg) return False return True except boto.exception.BotoServerError as e: log.debug(e) return False def create_subnet_group(name, description, subnet_ids=None, subnet_names=None, tags=None, region=None, key=None, keyid=None, profile=None): ''' Create an ElastiCache subnet group CLI example to create an ElastiCache subnet group:: salt myminion boto_elasticache.create_subnet_group my-subnet-group \ "group description" subnet_ids='[subnet-12345678, subnet-87654321]' \ region=us-east-1 ''' if not _exactly_one((subnet_ids, subnet_names)): raise SaltInvocationError("Exactly one of either 'subnet_ids' or " "'subnet_names' must be provided.") conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return False if subnet_group_exists(name, tags, region, key, keyid, profile): return True if subnet_names: subnet_ids = [] for n in subnet_names: r = __salt__['boto_vpc.get_resource_id']('subnet', n, region=region, key=key, keyid=keyid, profile=profile) if 'id' not in r: log.error('Couldn\'t resolve subnet name %s to an ID.', subnet_name) return False subnet_ids += [r['id']] try: ec = conn.create_cache_subnet_group(name, description, subnet_ids) if not ec: msg = 'Failed to create ElastiCache subnet group {0}'.format(name) log.error(msg) return False log.info('Created ElastiCache subnet group %s', name) return True except boto.exception.BotoServerError as e: log.debug(e) msg = 'Failed to create ElastiCache subnet group {0}'.format(name) log.error(msg) return False def get_cache_subnet_group(name, region=None, key=None, keyid=None, profile=None): ''' Get information about a cache subnet group. CLI example:: salt myminion boto_elasticache.get_cache_subnet_group mycache_subnet_group ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: csg = conn.describe_cache_subnet_groups(name) csg = csg['DescribeCacheSubnetGroupsResponse'] csg = csg['DescribeCacheSubnetGroupsResult']['CacheSubnetGroups'][0] except boto.exception.BotoServerError as e: msg = 'Failed to get cache subnet group {0}.'.format(name) log.error(msg) log.debug(e) return False except (IndexError, TypeError, KeyError): msg = 'Failed to get cache subnet group {0} (2).'.format(name) log.error(msg) return False ret = {} for key, val in six.iteritems(csg): if key == 'CacheSubnetGroupName': ret['cache_subnet_group_name'] = val elif key == 'CacheSubnetGroupDescription': ret['cache_subnet_group_description'] = val elif key == 'VpcId': ret['vpc_id'] = val elif key == 'Subnets': ret['subnets'] = [] for subnet in val: _subnet = {} _subnet['subnet_id'] = subnet['SubnetIdentifier'] _az = subnet['SubnetAvailabilityZone']['Name'] _subnet['subnet_availability_zone'] = _az ret['subnets'].append(_subnet) else: ret[key] = val return ret def delete_subnet_group(name, region=None, key=None, keyid=None, profile=None): ''' Delete an ElastiCache subnet group. CLI example:: salt myminion boto_elasticache.delete_subnet_group my-subnet-group \ region=us-east-1 ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return False try: conn.delete_cache_subnet_group(name) msg = 'Deleted ElastiCache subnet group {0}.'.format(name) log.info(msg) return True except boto.exception.BotoServerError as e: log.debug(e) msg = 'Failed to delete ElastiCache subnet group {0}'.format(name) log.error(msg) return False def create(name, num_cache_nodes=None, engine=None, cache_node_type=None, replication_group_id=None, engine_version=None, cache_parameter_group_name=None, cache_subnet_group_name=None, cache_security_group_names=None, security_group_ids=None, snapshot_arns=None, preferred_availability_zone=None, preferred_maintenance_window=None, port=None, notification_topic_arn=None, auto_minor_version_upgrade=None, wait=None, region=None, key=None, keyid=None, profile=None): ''' Create a cache cluster. CLI example:: salt myminion boto_elasticache.create myelasticache 1 redis cache.t1.micro cache_security_group_names='["myelasticachesg"]' ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: conn.create_cache_cluster( name, num_cache_nodes, cache_node_type, engine, replication_group_id, engine_version, cache_parameter_group_name, cache_subnet_group_name, cache_security_group_names, security_group_ids, snapshot_arns, preferred_availability_zone, preferred_maintenance_window, port, notification_topic_arn, auto_minor_version_upgrade) if not wait: log.info('Created cache cluster %s.', name) return True while True: time.sleep(3) config = get_config(name, region, key, keyid, profile) if not config: return True if config['cache_cluster_status'] == 'available': return True log.info('Created cache cluster %s.', name) except boto.exception.BotoServerError as e: msg = 'Failed to create cache cluster {0}.'.format(name) log.error(msg) log.debug(e) return False def delete(name, wait=False, region=None, key=None, keyid=None, profile=None): ''' Delete a cache cluster. CLI example:: salt myminion boto_elasticache.delete myelasticache ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: conn.delete_cache_cluster(name) if not wait: log.info('Deleted cache cluster %s.', name) return True while True: config = get_config(name, region, key, keyid, profile) if not config: return True if config['cache_cluster_status'] == 'deleting': return True time.sleep(2) log.info('Deleted cache cluster %s.', name) return True except boto.exception.BotoServerError as e: msg = 'Failed to delete cache cluster {0}.'.format(name) log.error(msg) log.debug(e) return False def delete_cache_security_group(name, region=None, key=None, keyid=None, profile=None): ''' Delete a cache security group. CLI example:: salt myminion boto_elasticache.delete_cache_security_group myelasticachesg 'My Cache Security Group' ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) deleted = conn.delete_cache_security_group(name) if deleted: log.info('Deleted cache security group %s.', name) return True else: msg = 'Failed to delete cache security group {0}.'.format(name) log.error(msg) return False def authorize_cache_security_group_ingress(name, ec2_security_group_name, ec2_security_group_owner_id, region=None, key=None, keyid=None, profile=None): ''' Authorize network ingress from an ec2 security group to a cache security group. CLI example:: salt myminion boto_elasticache.authorize_cache_security_group_ingress myelasticachesg myec2sg 879879 ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: added = conn.authorize_cache_security_group_ingress( name, ec2_security_group_name, ec2_security_group_owner_id) if added: msg = 'Added {0} to cache security group {1}.' msg = msg.format(name, ec2_security_group_name) log.info(msg) return True else: msg = 'Failed to add {0} to cache security group {1}.' msg = msg.format(name, ec2_security_group_name) log.error(msg) return False except boto.exception.EC2ResponseError as e: log.debug(e) msg = 'Failed to add {0} to cache security group {1}.' msg = msg.format(name, ec2_security_group_name) log.error(msg) return False def revoke_cache_security_group_ingress(name, ec2_security_group_name, ec2_security_group_owner_id, region=None, key=None, keyid=None, profile=None): ''' Revoke network ingress from an ec2 security group to a cache security group. CLI example:: salt myminion boto_elasticache.revoke_cache_security_group_ingress myelasticachesg myec2sg 879879 ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: removed = conn.revoke_cache_security_group_ingress( name, ec2_security_group_name, ec2_security_group_owner_id) if removed: msg = 'Removed {0} from cache security group {1}.' msg = msg.format(name, ec2_security_group_name) log.info(msg) return True else: msg = 'Failed to remove {0} from cache security group {1}.' msg = msg.format(name, ec2_security_group_name) log.error(msg) return False except boto.exception.EC2ResponseError as e: log.debug(e) msg = 'Failed to remove {0} from cache security group {1}.' msg = msg.format(name, ec2_security_group_name) log.error(msg) return False
saltstack/salt
salt/modules/boto_elasticache.py
delete_cache_security_group
python
def delete_cache_security_group(name, region=None, key=None, keyid=None, profile=None): ''' Delete a cache security group. CLI example:: salt myminion boto_elasticache.delete_cache_security_group myelasticachesg 'My Cache Security Group' ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) deleted = conn.delete_cache_security_group(name) if deleted: log.info('Deleted cache security group %s.', name) return True else: msg = 'Failed to delete cache security group {0}.'.format(name) log.error(msg) return False
Delete a cache security group. CLI example:: salt myminion boto_elasticache.delete_cache_security_group myelasticachesg 'My Cache Security Group'
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/boto_elasticache.py#L636-L654
null
# -*- coding: utf-8 -*- ''' Connection module for Amazon Elasticache .. versionadded:: 2014.7.0 :configuration: This module accepts explicit elasticache credentials but can also utilize IAM roles assigned to the instance through Instance Profiles. Dynamic credentials are then automatically obtained from AWS API and no further configuration is necessary. More Information available at: .. code-block:: text http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html If IAM roles are not used you need to specify them either in a pillar or in the minion's config file: .. code-block:: yaml elasticache.keyid: GKTADJGHEIQSXMKKRBJ08H elasticache.key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs A region may also be specified in the configuration: .. code-block:: yaml elasticache.region: us-east-1 If a region is not specified, the default is us-east-1. It's also possible to specify key, keyid and region via a profile, either as a passed in dict, or as a string to pull from pillars or minion config: .. code-block:: yaml myprofile: keyid: GKTADJGHEIQSXMKKRBJ08H key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs region: us-east-1 :depends: boto ''' # keep lint from choking on _get_conn and _cache_id #pylint: disable=E0602 from __future__ import absolute_import, print_function, unicode_literals # Import Python libs import logging import time # Import Salt libs from salt.ext import six from salt.exceptions import SaltInvocationError import salt.utils.odict as odict import salt.utils.versions log = logging.getLogger(__name__) # Import third party libs try: # pylint: disable=unused-import import boto import boto.elasticache # pylint: enable=unused-import import boto.utils logging.getLogger('boto').setLevel(logging.CRITICAL) HAS_BOTO = True except ImportError: HAS_BOTO = False def __virtual__(): ''' Only load if boto libraries exist. ''' has_boto_reqs = salt.utils.versions.check_boto_reqs( check_boto3=False ) if has_boto_reqs is True: __utils__['boto.assign_funcs'](__name__, 'elasticache', pack=__salt__) return has_boto_reqs def exists(name, region=None, key=None, keyid=None, profile=None): ''' Check to see if a cache cluster exists. CLI example:: salt myminion boto_elasticache.exists myelasticache ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: conn.describe_cache_clusters(name) return True except boto.exception.BotoServerError as e: log.debug(e) return False def group_exists(name, region=None, key=None, keyid=None, profile=None): ''' Check to see if a replication group exists. CLI example:: salt myminion boto_elasticache.group_exists myelasticache ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: conn.describe_replication_groups(name) return True except boto.exception.BotoServerError as e: log.debug(e) return False def create_replication_group(name, primary_cluster_id, replication_group_description, wait=None, region=None, key=None, keyid=None, profile=None): ''' Create replication group. CLI example:: salt myminion boto_elasticache.create_replication_group myelasticache myprimarycluster description ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return None try: cc = conn.create_replication_group(name, primary_cluster_id, replication_group_description) if not wait: log.info('Created cache cluster %s.', name) return True while True: time.sleep(3) config = describe_replication_group(name, region, key, keyid, profile) if not config: return True if config['status'] == 'available': return True except boto.exception.BotoServerError as e: msg = 'Failed to create replication group {0}.'.format(name) log.error(msg) log.debug(e) return {} def delete_replication_group(name, region=None, key=None, keyid=None, profile=None): ''' Delete an ElastiCache replication group. CLI example:: salt myminion boto_elasticache.delete_replication_group my-replication-group \ region=us-east-1 ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return False try: conn.delete_replication_group(name) msg = 'Deleted ElastiCache replication group {0}.'.format(name) log.info(msg) return True except boto.exception.BotoServerError as e: log.debug(e) msg = 'Failed to delete ElastiCache replication group {0}'.format(name) log.error(msg) return False def describe_replication_group(name, region=None, key=None, keyid=None, profile=None, parameter=None): ''' Get replication group information. CLI example:: salt myminion boto_elasticache.describe_replication_group mygroup ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return None try: cc = conn.describe_replication_groups(name) except boto.exception.BotoServerError as e: msg = 'Failed to get config for cache cluster {0}.'.format(name) log.error(msg) log.debug(e) return {} ret = odict.OrderedDict() cc = cc['DescribeReplicationGroupsResponse']['DescribeReplicationGroupsResult'] cc = cc['ReplicationGroups'][0] attrs = ['status', 'description', 'primary_endpoint', 'member_clusters', 'replication_group_id', 'pending_modified_values', 'primary_cluster_id', 'node_groups'] for key, val in six.iteritems(cc): _key = boto.utils.pythonize_name(key) if _key == 'status': if val: ret[_key] = val else: ret[_key] = None if _key == 'description': if val: ret[_key] = val else: ret[_key] = None if _key == 'replication_group_id': if val: ret[_key] = val else: ret[_key] = None if _key == 'member_clusters': if val: ret[_key] = val else: ret[_key] = None if _key == 'node_groups': if val: ret[_key] = val else: ret[_key] = None if _key == 'pending_modified_values': if val: ret[_key] = val else: ret[_key] = None return ret def get_config(name, region=None, key=None, keyid=None, profile=None): ''' Get the configuration for a cache cluster. CLI example:: salt myminion boto_elasticache.get_config myelasticache ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return None try: cc = conn.describe_cache_clusters(name, show_cache_node_info=True) except boto.exception.BotoServerError as e: msg = 'Failed to get config for cache cluster {0}.'.format(name) log.error(msg) log.debug(e) return {} cc = cc['DescribeCacheClustersResponse']['DescribeCacheClustersResult'] cc = cc['CacheClusters'][0] ret = odict.OrderedDict() attrs = ['engine', 'cache_parameter_group', 'cache_cluster_id', 'cache_security_groups', 'replication_group_id', 'auto_minor_version_upgrade', 'num_cache_nodes', 'preferred_availability_zone', 'security_groups', 'cache_subnet_group_name', 'engine_version', 'cache_node_type', 'notification_configuration', 'preferred_maintenance_window', 'configuration_endpoint', 'cache_cluster_status', 'cache_nodes'] for key, val in six.iteritems(cc): _key = boto.utils.pythonize_name(key) if _key not in attrs: continue if _key == 'cache_parameter_group': if val: ret[_key] = val['CacheParameterGroupName'] else: ret[_key] = None elif _key == 'cache_nodes': if val: ret[_key] = [k for k in val] else: ret[_key] = [] elif _key == 'cache_security_groups': if val: ret[_key] = [k['CacheSecurityGroupName'] for k in val] else: ret[_key] = [] elif _key == 'configuration_endpoint': if val: ret['port'] = val['Port'] ret['address'] = val['Address'] else: ret['port'] = None ret['address'] = None elif _key == 'notification_configuration': if val: ret['notification_topic_arn'] = val['TopicArn'] else: ret['notification_topic_arn'] = None else: ret[_key] = val return ret def get_node_host(name, region=None, key=None, keyid=None, profile=None): ''' Get hostname from cache node CLI example:: salt myminion boto_elasticache.get_node_host myelasticache ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return None try: cc = conn.describe_cache_clusters(name, show_cache_node_info=True) except boto.exception.BotoServerError as e: msg = 'Failed to get config for cache cluster {0}.'.format(name) log.error(msg) log.debug(e) return {} cc = cc['DescribeCacheClustersResponse']['DescribeCacheClustersResult'] host = cc['CacheClusters'][0]['CacheNodes'][0]['Endpoint']['Address'] return host def get_group_host(name, region=None, key=None, keyid=None, profile=None): ''' Get hostname from replication cache group CLI example:: salt myminion boto_elasticache.get_group_host myelasticachegroup ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return None try: cc = conn.describe_replication_groups(name) except boto.exception.BotoServerError as e: msg = 'Failed to get config for cache cluster {0}.'.format(name) log.error(msg) log.debug(e) return {} cc = cc['DescribeReplicationGroupsResponse']['DescribeReplicationGroupsResult'] cc = cc['ReplicationGroups'][0]['NodeGroups'][0]['PrimaryEndpoint'] host = cc['Address'] return host def get_all_cache_subnet_groups(name=None, region=None, key=None, keyid=None, profile=None): ''' Return a list of all cache subnet groups with details CLI example:: salt myminion boto_elasticache.get_all_subnet_groups region=us-east-1 ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: marker = '' groups = [] while marker is not None: ret = conn.describe_cache_subnet_groups(cache_subnet_group_name=name, marker=marker) trimmed = ret.get('DescribeCacheSubnetGroupsResponse', {}).get('DescribeCacheSubnetGroupsResult', {}) groups += trimmed.get('CacheSubnetGroups', []) marker = trimmed.get('Marker', None) if not groups: log.debug('No ElastiCache subnet groups found.') return groups except boto.exception.BotoServerError as e: log.error(e) return [] def list_cache_subnet_groups(name=None, region=None, key=None, keyid=None, profile=None): ''' Return a list of all cache subnet group names CLI example:: salt myminion boto_elasticache.list_subnet_groups region=us-east-1 ''' return [g['CacheSubnetGroupName'] for g in get_all_cache_subnet_groups(name, region, key, keyid, profile)] def subnet_group_exists(name, tags=None, region=None, key=None, keyid=None, profile=None): ''' Check to see if an ElastiCache subnet group exists. CLI example:: salt myminion boto_elasticache.subnet_group_exists my-param-group \ region=us-east-1 ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return False try: ec = conn.describe_cache_subnet_groups(cache_subnet_group_name=name) if not ec: msg = ('ElastiCache subnet group does not exist in region {0}'.format(region)) log.debug(msg) return False return True except boto.exception.BotoServerError as e: log.debug(e) return False def create_subnet_group(name, description, subnet_ids=None, subnet_names=None, tags=None, region=None, key=None, keyid=None, profile=None): ''' Create an ElastiCache subnet group CLI example to create an ElastiCache subnet group:: salt myminion boto_elasticache.create_subnet_group my-subnet-group \ "group description" subnet_ids='[subnet-12345678, subnet-87654321]' \ region=us-east-1 ''' if not _exactly_one((subnet_ids, subnet_names)): raise SaltInvocationError("Exactly one of either 'subnet_ids' or " "'subnet_names' must be provided.") conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return False if subnet_group_exists(name, tags, region, key, keyid, profile): return True if subnet_names: subnet_ids = [] for n in subnet_names: r = __salt__['boto_vpc.get_resource_id']('subnet', n, region=region, key=key, keyid=keyid, profile=profile) if 'id' not in r: log.error('Couldn\'t resolve subnet name %s to an ID.', subnet_name) return False subnet_ids += [r['id']] try: ec = conn.create_cache_subnet_group(name, description, subnet_ids) if not ec: msg = 'Failed to create ElastiCache subnet group {0}'.format(name) log.error(msg) return False log.info('Created ElastiCache subnet group %s', name) return True except boto.exception.BotoServerError as e: log.debug(e) msg = 'Failed to create ElastiCache subnet group {0}'.format(name) log.error(msg) return False def get_cache_subnet_group(name, region=None, key=None, keyid=None, profile=None): ''' Get information about a cache subnet group. CLI example:: salt myminion boto_elasticache.get_cache_subnet_group mycache_subnet_group ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: csg = conn.describe_cache_subnet_groups(name) csg = csg['DescribeCacheSubnetGroupsResponse'] csg = csg['DescribeCacheSubnetGroupsResult']['CacheSubnetGroups'][0] except boto.exception.BotoServerError as e: msg = 'Failed to get cache subnet group {0}.'.format(name) log.error(msg) log.debug(e) return False except (IndexError, TypeError, KeyError): msg = 'Failed to get cache subnet group {0} (2).'.format(name) log.error(msg) return False ret = {} for key, val in six.iteritems(csg): if key == 'CacheSubnetGroupName': ret['cache_subnet_group_name'] = val elif key == 'CacheSubnetGroupDescription': ret['cache_subnet_group_description'] = val elif key == 'VpcId': ret['vpc_id'] = val elif key == 'Subnets': ret['subnets'] = [] for subnet in val: _subnet = {} _subnet['subnet_id'] = subnet['SubnetIdentifier'] _az = subnet['SubnetAvailabilityZone']['Name'] _subnet['subnet_availability_zone'] = _az ret['subnets'].append(_subnet) else: ret[key] = val return ret def delete_subnet_group(name, region=None, key=None, keyid=None, profile=None): ''' Delete an ElastiCache subnet group. CLI example:: salt myminion boto_elasticache.delete_subnet_group my-subnet-group \ region=us-east-1 ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return False try: conn.delete_cache_subnet_group(name) msg = 'Deleted ElastiCache subnet group {0}.'.format(name) log.info(msg) return True except boto.exception.BotoServerError as e: log.debug(e) msg = 'Failed to delete ElastiCache subnet group {0}'.format(name) log.error(msg) return False def create(name, num_cache_nodes=None, engine=None, cache_node_type=None, replication_group_id=None, engine_version=None, cache_parameter_group_name=None, cache_subnet_group_name=None, cache_security_group_names=None, security_group_ids=None, snapshot_arns=None, preferred_availability_zone=None, preferred_maintenance_window=None, port=None, notification_topic_arn=None, auto_minor_version_upgrade=None, wait=None, region=None, key=None, keyid=None, profile=None): ''' Create a cache cluster. CLI example:: salt myminion boto_elasticache.create myelasticache 1 redis cache.t1.micro cache_security_group_names='["myelasticachesg"]' ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: conn.create_cache_cluster( name, num_cache_nodes, cache_node_type, engine, replication_group_id, engine_version, cache_parameter_group_name, cache_subnet_group_name, cache_security_group_names, security_group_ids, snapshot_arns, preferred_availability_zone, preferred_maintenance_window, port, notification_topic_arn, auto_minor_version_upgrade) if not wait: log.info('Created cache cluster %s.', name) return True while True: time.sleep(3) config = get_config(name, region, key, keyid, profile) if not config: return True if config['cache_cluster_status'] == 'available': return True log.info('Created cache cluster %s.', name) except boto.exception.BotoServerError as e: msg = 'Failed to create cache cluster {0}.'.format(name) log.error(msg) log.debug(e) return False def delete(name, wait=False, region=None, key=None, keyid=None, profile=None): ''' Delete a cache cluster. CLI example:: salt myminion boto_elasticache.delete myelasticache ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: conn.delete_cache_cluster(name) if not wait: log.info('Deleted cache cluster %s.', name) return True while True: config = get_config(name, region, key, keyid, profile) if not config: return True if config['cache_cluster_status'] == 'deleting': return True time.sleep(2) log.info('Deleted cache cluster %s.', name) return True except boto.exception.BotoServerError as e: msg = 'Failed to delete cache cluster {0}.'.format(name) log.error(msg) log.debug(e) return False def create_cache_security_group(name, description, region=None, key=None, keyid=None, profile=None): ''' Create a cache security group. CLI example:: salt myminion boto_elasticache.create_cache_security_group myelasticachesg 'My Cache Security Group' ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) created = conn.create_cache_security_group(name, description) if created: log.info('Created cache security group %s.', name) return True else: msg = 'Failed to create cache security group {0}.'.format(name) log.error(msg) return False def authorize_cache_security_group_ingress(name, ec2_security_group_name, ec2_security_group_owner_id, region=None, key=None, keyid=None, profile=None): ''' Authorize network ingress from an ec2 security group to a cache security group. CLI example:: salt myminion boto_elasticache.authorize_cache_security_group_ingress myelasticachesg myec2sg 879879 ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: added = conn.authorize_cache_security_group_ingress( name, ec2_security_group_name, ec2_security_group_owner_id) if added: msg = 'Added {0} to cache security group {1}.' msg = msg.format(name, ec2_security_group_name) log.info(msg) return True else: msg = 'Failed to add {0} to cache security group {1}.' msg = msg.format(name, ec2_security_group_name) log.error(msg) return False except boto.exception.EC2ResponseError as e: log.debug(e) msg = 'Failed to add {0} to cache security group {1}.' msg = msg.format(name, ec2_security_group_name) log.error(msg) return False def revoke_cache_security_group_ingress(name, ec2_security_group_name, ec2_security_group_owner_id, region=None, key=None, keyid=None, profile=None): ''' Revoke network ingress from an ec2 security group to a cache security group. CLI example:: salt myminion boto_elasticache.revoke_cache_security_group_ingress myelasticachesg myec2sg 879879 ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: removed = conn.revoke_cache_security_group_ingress( name, ec2_security_group_name, ec2_security_group_owner_id) if removed: msg = 'Removed {0} from cache security group {1}.' msg = msg.format(name, ec2_security_group_name) log.info(msg) return True else: msg = 'Failed to remove {0} from cache security group {1}.' msg = msg.format(name, ec2_security_group_name) log.error(msg) return False except boto.exception.EC2ResponseError as e: log.debug(e) msg = 'Failed to remove {0} from cache security group {1}.' msg = msg.format(name, ec2_security_group_name) log.error(msg) return False
saltstack/salt
salt/modules/boto_elasticache.py
authorize_cache_security_group_ingress
python
def authorize_cache_security_group_ingress(name, ec2_security_group_name, ec2_security_group_owner_id, region=None, key=None, keyid=None, profile=None): ''' Authorize network ingress from an ec2 security group to a cache security group. CLI example:: salt myminion boto_elasticache.authorize_cache_security_group_ingress myelasticachesg myec2sg 879879 ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: added = conn.authorize_cache_security_group_ingress( name, ec2_security_group_name, ec2_security_group_owner_id) if added: msg = 'Added {0} to cache security group {1}.' msg = msg.format(name, ec2_security_group_name) log.info(msg) return True else: msg = 'Failed to add {0} to cache security group {1}.' msg = msg.format(name, ec2_security_group_name) log.error(msg) return False except boto.exception.EC2ResponseError as e: log.debug(e) msg = 'Failed to add {0} to cache security group {1}.' msg = msg.format(name, ec2_security_group_name) log.error(msg) return False
Authorize network ingress from an ec2 security group to a cache security group. CLI example:: salt myminion boto_elasticache.authorize_cache_security_group_ingress myelasticachesg myec2sg 879879
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/boto_elasticache.py#L657-L689
null
# -*- coding: utf-8 -*- ''' Connection module for Amazon Elasticache .. versionadded:: 2014.7.0 :configuration: This module accepts explicit elasticache credentials but can also utilize IAM roles assigned to the instance through Instance Profiles. Dynamic credentials are then automatically obtained from AWS API and no further configuration is necessary. More Information available at: .. code-block:: text http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html If IAM roles are not used you need to specify them either in a pillar or in the minion's config file: .. code-block:: yaml elasticache.keyid: GKTADJGHEIQSXMKKRBJ08H elasticache.key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs A region may also be specified in the configuration: .. code-block:: yaml elasticache.region: us-east-1 If a region is not specified, the default is us-east-1. It's also possible to specify key, keyid and region via a profile, either as a passed in dict, or as a string to pull from pillars or minion config: .. code-block:: yaml myprofile: keyid: GKTADJGHEIQSXMKKRBJ08H key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs region: us-east-1 :depends: boto ''' # keep lint from choking on _get_conn and _cache_id #pylint: disable=E0602 from __future__ import absolute_import, print_function, unicode_literals # Import Python libs import logging import time # Import Salt libs from salt.ext import six from salt.exceptions import SaltInvocationError import salt.utils.odict as odict import salt.utils.versions log = logging.getLogger(__name__) # Import third party libs try: # pylint: disable=unused-import import boto import boto.elasticache # pylint: enable=unused-import import boto.utils logging.getLogger('boto').setLevel(logging.CRITICAL) HAS_BOTO = True except ImportError: HAS_BOTO = False def __virtual__(): ''' Only load if boto libraries exist. ''' has_boto_reqs = salt.utils.versions.check_boto_reqs( check_boto3=False ) if has_boto_reqs is True: __utils__['boto.assign_funcs'](__name__, 'elasticache', pack=__salt__) return has_boto_reqs def exists(name, region=None, key=None, keyid=None, profile=None): ''' Check to see if a cache cluster exists. CLI example:: salt myminion boto_elasticache.exists myelasticache ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: conn.describe_cache_clusters(name) return True except boto.exception.BotoServerError as e: log.debug(e) return False def group_exists(name, region=None, key=None, keyid=None, profile=None): ''' Check to see if a replication group exists. CLI example:: salt myminion boto_elasticache.group_exists myelasticache ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: conn.describe_replication_groups(name) return True except boto.exception.BotoServerError as e: log.debug(e) return False def create_replication_group(name, primary_cluster_id, replication_group_description, wait=None, region=None, key=None, keyid=None, profile=None): ''' Create replication group. CLI example:: salt myminion boto_elasticache.create_replication_group myelasticache myprimarycluster description ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return None try: cc = conn.create_replication_group(name, primary_cluster_id, replication_group_description) if not wait: log.info('Created cache cluster %s.', name) return True while True: time.sleep(3) config = describe_replication_group(name, region, key, keyid, profile) if not config: return True if config['status'] == 'available': return True except boto.exception.BotoServerError as e: msg = 'Failed to create replication group {0}.'.format(name) log.error(msg) log.debug(e) return {} def delete_replication_group(name, region=None, key=None, keyid=None, profile=None): ''' Delete an ElastiCache replication group. CLI example:: salt myminion boto_elasticache.delete_replication_group my-replication-group \ region=us-east-1 ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return False try: conn.delete_replication_group(name) msg = 'Deleted ElastiCache replication group {0}.'.format(name) log.info(msg) return True except boto.exception.BotoServerError as e: log.debug(e) msg = 'Failed to delete ElastiCache replication group {0}'.format(name) log.error(msg) return False def describe_replication_group(name, region=None, key=None, keyid=None, profile=None, parameter=None): ''' Get replication group information. CLI example:: salt myminion boto_elasticache.describe_replication_group mygroup ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return None try: cc = conn.describe_replication_groups(name) except boto.exception.BotoServerError as e: msg = 'Failed to get config for cache cluster {0}.'.format(name) log.error(msg) log.debug(e) return {} ret = odict.OrderedDict() cc = cc['DescribeReplicationGroupsResponse']['DescribeReplicationGroupsResult'] cc = cc['ReplicationGroups'][0] attrs = ['status', 'description', 'primary_endpoint', 'member_clusters', 'replication_group_id', 'pending_modified_values', 'primary_cluster_id', 'node_groups'] for key, val in six.iteritems(cc): _key = boto.utils.pythonize_name(key) if _key == 'status': if val: ret[_key] = val else: ret[_key] = None if _key == 'description': if val: ret[_key] = val else: ret[_key] = None if _key == 'replication_group_id': if val: ret[_key] = val else: ret[_key] = None if _key == 'member_clusters': if val: ret[_key] = val else: ret[_key] = None if _key == 'node_groups': if val: ret[_key] = val else: ret[_key] = None if _key == 'pending_modified_values': if val: ret[_key] = val else: ret[_key] = None return ret def get_config(name, region=None, key=None, keyid=None, profile=None): ''' Get the configuration for a cache cluster. CLI example:: salt myminion boto_elasticache.get_config myelasticache ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return None try: cc = conn.describe_cache_clusters(name, show_cache_node_info=True) except boto.exception.BotoServerError as e: msg = 'Failed to get config for cache cluster {0}.'.format(name) log.error(msg) log.debug(e) return {} cc = cc['DescribeCacheClustersResponse']['DescribeCacheClustersResult'] cc = cc['CacheClusters'][0] ret = odict.OrderedDict() attrs = ['engine', 'cache_parameter_group', 'cache_cluster_id', 'cache_security_groups', 'replication_group_id', 'auto_minor_version_upgrade', 'num_cache_nodes', 'preferred_availability_zone', 'security_groups', 'cache_subnet_group_name', 'engine_version', 'cache_node_type', 'notification_configuration', 'preferred_maintenance_window', 'configuration_endpoint', 'cache_cluster_status', 'cache_nodes'] for key, val in six.iteritems(cc): _key = boto.utils.pythonize_name(key) if _key not in attrs: continue if _key == 'cache_parameter_group': if val: ret[_key] = val['CacheParameterGroupName'] else: ret[_key] = None elif _key == 'cache_nodes': if val: ret[_key] = [k for k in val] else: ret[_key] = [] elif _key == 'cache_security_groups': if val: ret[_key] = [k['CacheSecurityGroupName'] for k in val] else: ret[_key] = [] elif _key == 'configuration_endpoint': if val: ret['port'] = val['Port'] ret['address'] = val['Address'] else: ret['port'] = None ret['address'] = None elif _key == 'notification_configuration': if val: ret['notification_topic_arn'] = val['TopicArn'] else: ret['notification_topic_arn'] = None else: ret[_key] = val return ret def get_node_host(name, region=None, key=None, keyid=None, profile=None): ''' Get hostname from cache node CLI example:: salt myminion boto_elasticache.get_node_host myelasticache ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return None try: cc = conn.describe_cache_clusters(name, show_cache_node_info=True) except boto.exception.BotoServerError as e: msg = 'Failed to get config for cache cluster {0}.'.format(name) log.error(msg) log.debug(e) return {} cc = cc['DescribeCacheClustersResponse']['DescribeCacheClustersResult'] host = cc['CacheClusters'][0]['CacheNodes'][0]['Endpoint']['Address'] return host def get_group_host(name, region=None, key=None, keyid=None, profile=None): ''' Get hostname from replication cache group CLI example:: salt myminion boto_elasticache.get_group_host myelasticachegroup ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return None try: cc = conn.describe_replication_groups(name) except boto.exception.BotoServerError as e: msg = 'Failed to get config for cache cluster {0}.'.format(name) log.error(msg) log.debug(e) return {} cc = cc['DescribeReplicationGroupsResponse']['DescribeReplicationGroupsResult'] cc = cc['ReplicationGroups'][0]['NodeGroups'][0]['PrimaryEndpoint'] host = cc['Address'] return host def get_all_cache_subnet_groups(name=None, region=None, key=None, keyid=None, profile=None): ''' Return a list of all cache subnet groups with details CLI example:: salt myminion boto_elasticache.get_all_subnet_groups region=us-east-1 ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: marker = '' groups = [] while marker is not None: ret = conn.describe_cache_subnet_groups(cache_subnet_group_name=name, marker=marker) trimmed = ret.get('DescribeCacheSubnetGroupsResponse', {}).get('DescribeCacheSubnetGroupsResult', {}) groups += trimmed.get('CacheSubnetGroups', []) marker = trimmed.get('Marker', None) if not groups: log.debug('No ElastiCache subnet groups found.') return groups except boto.exception.BotoServerError as e: log.error(e) return [] def list_cache_subnet_groups(name=None, region=None, key=None, keyid=None, profile=None): ''' Return a list of all cache subnet group names CLI example:: salt myminion boto_elasticache.list_subnet_groups region=us-east-1 ''' return [g['CacheSubnetGroupName'] for g in get_all_cache_subnet_groups(name, region, key, keyid, profile)] def subnet_group_exists(name, tags=None, region=None, key=None, keyid=None, profile=None): ''' Check to see if an ElastiCache subnet group exists. CLI example:: salt myminion boto_elasticache.subnet_group_exists my-param-group \ region=us-east-1 ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return False try: ec = conn.describe_cache_subnet_groups(cache_subnet_group_name=name) if not ec: msg = ('ElastiCache subnet group does not exist in region {0}'.format(region)) log.debug(msg) return False return True except boto.exception.BotoServerError as e: log.debug(e) return False def create_subnet_group(name, description, subnet_ids=None, subnet_names=None, tags=None, region=None, key=None, keyid=None, profile=None): ''' Create an ElastiCache subnet group CLI example to create an ElastiCache subnet group:: salt myminion boto_elasticache.create_subnet_group my-subnet-group \ "group description" subnet_ids='[subnet-12345678, subnet-87654321]' \ region=us-east-1 ''' if not _exactly_one((subnet_ids, subnet_names)): raise SaltInvocationError("Exactly one of either 'subnet_ids' or " "'subnet_names' must be provided.") conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return False if subnet_group_exists(name, tags, region, key, keyid, profile): return True if subnet_names: subnet_ids = [] for n in subnet_names: r = __salt__['boto_vpc.get_resource_id']('subnet', n, region=region, key=key, keyid=keyid, profile=profile) if 'id' not in r: log.error('Couldn\'t resolve subnet name %s to an ID.', subnet_name) return False subnet_ids += [r['id']] try: ec = conn.create_cache_subnet_group(name, description, subnet_ids) if not ec: msg = 'Failed to create ElastiCache subnet group {0}'.format(name) log.error(msg) return False log.info('Created ElastiCache subnet group %s', name) return True except boto.exception.BotoServerError as e: log.debug(e) msg = 'Failed to create ElastiCache subnet group {0}'.format(name) log.error(msg) return False def get_cache_subnet_group(name, region=None, key=None, keyid=None, profile=None): ''' Get information about a cache subnet group. CLI example:: salt myminion boto_elasticache.get_cache_subnet_group mycache_subnet_group ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: csg = conn.describe_cache_subnet_groups(name) csg = csg['DescribeCacheSubnetGroupsResponse'] csg = csg['DescribeCacheSubnetGroupsResult']['CacheSubnetGroups'][0] except boto.exception.BotoServerError as e: msg = 'Failed to get cache subnet group {0}.'.format(name) log.error(msg) log.debug(e) return False except (IndexError, TypeError, KeyError): msg = 'Failed to get cache subnet group {0} (2).'.format(name) log.error(msg) return False ret = {} for key, val in six.iteritems(csg): if key == 'CacheSubnetGroupName': ret['cache_subnet_group_name'] = val elif key == 'CacheSubnetGroupDescription': ret['cache_subnet_group_description'] = val elif key == 'VpcId': ret['vpc_id'] = val elif key == 'Subnets': ret['subnets'] = [] for subnet in val: _subnet = {} _subnet['subnet_id'] = subnet['SubnetIdentifier'] _az = subnet['SubnetAvailabilityZone']['Name'] _subnet['subnet_availability_zone'] = _az ret['subnets'].append(_subnet) else: ret[key] = val return ret def delete_subnet_group(name, region=None, key=None, keyid=None, profile=None): ''' Delete an ElastiCache subnet group. CLI example:: salt myminion boto_elasticache.delete_subnet_group my-subnet-group \ region=us-east-1 ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return False try: conn.delete_cache_subnet_group(name) msg = 'Deleted ElastiCache subnet group {0}.'.format(name) log.info(msg) return True except boto.exception.BotoServerError as e: log.debug(e) msg = 'Failed to delete ElastiCache subnet group {0}'.format(name) log.error(msg) return False def create(name, num_cache_nodes=None, engine=None, cache_node_type=None, replication_group_id=None, engine_version=None, cache_parameter_group_name=None, cache_subnet_group_name=None, cache_security_group_names=None, security_group_ids=None, snapshot_arns=None, preferred_availability_zone=None, preferred_maintenance_window=None, port=None, notification_topic_arn=None, auto_minor_version_upgrade=None, wait=None, region=None, key=None, keyid=None, profile=None): ''' Create a cache cluster. CLI example:: salt myminion boto_elasticache.create myelasticache 1 redis cache.t1.micro cache_security_group_names='["myelasticachesg"]' ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: conn.create_cache_cluster( name, num_cache_nodes, cache_node_type, engine, replication_group_id, engine_version, cache_parameter_group_name, cache_subnet_group_name, cache_security_group_names, security_group_ids, snapshot_arns, preferred_availability_zone, preferred_maintenance_window, port, notification_topic_arn, auto_minor_version_upgrade) if not wait: log.info('Created cache cluster %s.', name) return True while True: time.sleep(3) config = get_config(name, region, key, keyid, profile) if not config: return True if config['cache_cluster_status'] == 'available': return True log.info('Created cache cluster %s.', name) except boto.exception.BotoServerError as e: msg = 'Failed to create cache cluster {0}.'.format(name) log.error(msg) log.debug(e) return False def delete(name, wait=False, region=None, key=None, keyid=None, profile=None): ''' Delete a cache cluster. CLI example:: salt myminion boto_elasticache.delete myelasticache ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: conn.delete_cache_cluster(name) if not wait: log.info('Deleted cache cluster %s.', name) return True while True: config = get_config(name, region, key, keyid, profile) if not config: return True if config['cache_cluster_status'] == 'deleting': return True time.sleep(2) log.info('Deleted cache cluster %s.', name) return True except boto.exception.BotoServerError as e: msg = 'Failed to delete cache cluster {0}.'.format(name) log.error(msg) log.debug(e) return False def create_cache_security_group(name, description, region=None, key=None, keyid=None, profile=None): ''' Create a cache security group. CLI example:: salt myminion boto_elasticache.create_cache_security_group myelasticachesg 'My Cache Security Group' ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) created = conn.create_cache_security_group(name, description) if created: log.info('Created cache security group %s.', name) return True else: msg = 'Failed to create cache security group {0}.'.format(name) log.error(msg) return False def delete_cache_security_group(name, region=None, key=None, keyid=None, profile=None): ''' Delete a cache security group. CLI example:: salt myminion boto_elasticache.delete_cache_security_group myelasticachesg 'My Cache Security Group' ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) deleted = conn.delete_cache_security_group(name) if deleted: log.info('Deleted cache security group %s.', name) return True else: msg = 'Failed to delete cache security group {0}.'.format(name) log.error(msg) return False def revoke_cache_security_group_ingress(name, ec2_security_group_name, ec2_security_group_owner_id, region=None, key=None, keyid=None, profile=None): ''' Revoke network ingress from an ec2 security group to a cache security group. CLI example:: salt myminion boto_elasticache.revoke_cache_security_group_ingress myelasticachesg myec2sg 879879 ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: removed = conn.revoke_cache_security_group_ingress( name, ec2_security_group_name, ec2_security_group_owner_id) if removed: msg = 'Removed {0} from cache security group {1}.' msg = msg.format(name, ec2_security_group_name) log.info(msg) return True else: msg = 'Failed to remove {0} from cache security group {1}.' msg = msg.format(name, ec2_security_group_name) log.error(msg) return False except boto.exception.EC2ResponseError as e: log.debug(e) msg = 'Failed to remove {0} from cache security group {1}.' msg = msg.format(name, ec2_security_group_name) log.error(msg) return False
saltstack/salt
salt/output/__init__.py
try_printout
python
def try_printout(data, out, opts, **kwargs): ''' Safely get the string to print out, try the configured outputter, then fall back to nested and then to raw ''' try: printout = get_printout(out, opts)(data, **kwargs) if printout is not None: return printout.rstrip() except (KeyError, AttributeError, TypeError): log.debug(traceback.format_exc()) try: printout = get_printout('nested', opts)(data, **kwargs) if printout is not None: return printout.rstrip() except (KeyError, AttributeError, TypeError): log.error('Nested output failed: ', exc_info=True) printout = get_printout('raw', opts)(data, **kwargs) if printout is not None: return printout.rstrip()
Safely get the string to print out, try the configured outputter, then fall back to nested and then to raw
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/output/__init__.py#L37-L56
[ "def get_printout(out, opts=None, **kwargs):\n '''\n Return a printer function\n '''\n if opts is None:\n opts = {}\n\n if 'output' in opts and opts['output'] != 'highstate':\n # new --out option, but don't choke when using --out=highstate at CLI\n # See Issue #29796 for more information.\n out = opts['output']\n\n # Handle setting the output when --static is passed.\n if not out and opts.get('static'):\n if opts.get('output'):\n out = opts['output']\n elif opts.get('fun', '').split('.')[0] == 'state':\n # --static doesn't have an output set at this point, but if we're\n # running a state function and \"out\" hasn't already been set, we\n # should set the out variable to \"highstate\". Otherwise state runs\n # are set to \"nested\" below. See Issue #44556 for more information.\n out = 'highstate'\n\n if out == 'text':\n out = 'txt'\n elif out is None or out == '':\n out = 'nested'\n if opts.get('progress', False):\n out = 'progress'\n\n opts.update(kwargs)\n if 'color' not in opts:\n def is_pipe():\n '''\n Check if sys.stdout is a pipe or not\n '''\n try:\n fileno = sys.stdout.fileno()\n except (AttributeError, io.UnsupportedOperation):\n fileno = -1 # sys.stdout is StringIO or fake\n return not os.isatty(fileno)\n\n if opts.get('force_color', False):\n opts['color'] = True\n elif opts.get('no_color', False) or is_pipe() or salt.utils.platform.is_windows():\n opts['color'] = False\n else:\n opts['color'] = True\n else:\n if opts.get('force_color', False):\n opts['color'] = True\n elif opts.get('no_color', False) or salt.utils.platform.is_windows():\n opts['color'] = False\n else:\n pass\n\n outputters = salt.loader.outputters(opts)\n if out not in outputters:\n # Since the grains outputter was removed we don't need to fire this\n # error when old minions are asking for it\n if out != 'grains':\n log.error('Invalid outputter %s specified, falling back to nested', out)\n return outputters['nested']\n return outputters[out]\n" ]
# -*- coding: utf-8 -*- ''' Used to manage the outputter system. This package is the modular system used for managing outputters. ''' # Import Python libs from __future__ import absolute_import, print_function, unicode_literals import errno import logging import io import os import re import sys import traceback # Import Salt libs import salt.loader import salt.utils.files import salt.utils.platform import salt.utils.stringutils # Import 3rd-party libs from salt.ext import six # Are you really sure !!! # dealing with unicode is not as simple as setting defaultencoding # which can break other python modules imported by salt in bad ways... # reloading sys is not either a good idea... # reload(sys) # sys.setdefaultencoding('utf-8') log = logging.getLogger(__name__) def get_progress(opts, out, progress): ''' Get the progress bar from the given outputter ''' return salt.loader.raw_mod(opts, out, 'rawmodule', mod='output')['{0}.progress_iter'.format(out)](progress) def update_progress(opts, progress, progress_iter, out): ''' Update the progress iterator for the given outputter ''' # Look up the outputter try: progress_outputter = salt.loader.outputters(opts)[out] except KeyError: # Outputter is not loaded log.warning('Progress outputter not available.') return False progress_outputter(progress, progress_iter) def progress_end(progress_iter): try: progress_iter.stop() except Exception: pass return None def display_output(data, out=None, opts=None, **kwargs): ''' Print the passed data using the desired output ''' if opts is None: opts = {} display_data = try_printout(data, out, opts, **kwargs) output_filename = opts.get('output_file', None) log.trace('data = %s', data) try: # output filename can be either '' or None if output_filename: if not hasattr(output_filename, 'write'): ofh = salt.utils.files.fopen(output_filename, 'a') # pylint: disable=resource-leakage fh_opened = True else: # Filehandle/file-like object ofh = output_filename fh_opened = False try: fdata = display_data if isinstance(fdata, six.text_type): try: fdata = fdata.encode('utf-8') except (UnicodeDecodeError, UnicodeEncodeError): # try to let the stream write # even if we didn't encode it pass if fdata: ofh.write(salt.utils.stringutils.to_str(fdata)) ofh.write('\n') finally: if fh_opened: ofh.close() return if display_data: salt.utils.stringutils.print_cli(display_data) except IOError as exc: # Only raise if it's NOT a broken pipe if exc.errno != errno.EPIPE: raise exc def get_printout(out, opts=None, **kwargs): ''' Return a printer function ''' if opts is None: opts = {} if 'output' in opts and opts['output'] != 'highstate': # new --out option, but don't choke when using --out=highstate at CLI # See Issue #29796 for more information. out = opts['output'] # Handle setting the output when --static is passed. if not out and opts.get('static'): if opts.get('output'): out = opts['output'] elif opts.get('fun', '').split('.')[0] == 'state': # --static doesn't have an output set at this point, but if we're # running a state function and "out" hasn't already been set, we # should set the out variable to "highstate". Otherwise state runs # are set to "nested" below. See Issue #44556 for more information. out = 'highstate' if out == 'text': out = 'txt' elif out is None or out == '': out = 'nested' if opts.get('progress', False): out = 'progress' opts.update(kwargs) if 'color' not in opts: def is_pipe(): ''' Check if sys.stdout is a pipe or not ''' try: fileno = sys.stdout.fileno() except (AttributeError, io.UnsupportedOperation): fileno = -1 # sys.stdout is StringIO or fake return not os.isatty(fileno) if opts.get('force_color', False): opts['color'] = True elif opts.get('no_color', False) or is_pipe() or salt.utils.platform.is_windows(): opts['color'] = False else: opts['color'] = True else: if opts.get('force_color', False): opts['color'] = True elif opts.get('no_color', False) or salt.utils.platform.is_windows(): opts['color'] = False else: pass outputters = salt.loader.outputters(opts) if out not in outputters: # Since the grains outputter was removed we don't need to fire this # error when old minions are asking for it if out != 'grains': log.error('Invalid outputter %s specified, falling back to nested', out) return outputters['nested'] return outputters[out] def out_format(data, out, opts=None, **kwargs): ''' Return the formatted outputter string for the passed data ''' return try_printout(data, out, opts, **kwargs) def string_format(data, out, opts=None, **kwargs): ''' Return the formatted outputter string, removing the ANSI escape sequences. ''' raw_output = try_printout(data, out, opts, **kwargs) ansi_escape = re.compile(r'\x1b[^m]*m') return ansi_escape.sub('', raw_output) def html_format(data, out, opts=None, **kwargs): ''' Return the formatted string as HTML. ''' ansi_escaped_string = string_format(data, out, opts, **kwargs) return ansi_escaped_string.replace(' ', '&nbsp;').replace('\n', '<br />') def strip_esc_sequence(txt): ''' Replace ESC (ASCII 27/Oct 33) to prevent unsafe strings from writing their own terminal manipulation commands ''' if isinstance(txt, six.string_types): try: return txt.replace('\033', '?') except UnicodeDecodeError: return txt.replace(str('\033'), str('?')) # future lint: disable=blacklisted-function else: return txt
saltstack/salt
salt/output/__init__.py
get_progress
python
def get_progress(opts, out, progress): ''' Get the progress bar from the given outputter ''' return salt.loader.raw_mod(opts, out, 'rawmodule', mod='output')['{0}.progress_iter'.format(out)](progress)
Get the progress bar from the given outputter
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/output/__init__.py#L59-L66
null
# -*- coding: utf-8 -*- ''' Used to manage the outputter system. This package is the modular system used for managing outputters. ''' # Import Python libs from __future__ import absolute_import, print_function, unicode_literals import errno import logging import io import os import re import sys import traceback # Import Salt libs import salt.loader import salt.utils.files import salt.utils.platform import salt.utils.stringutils # Import 3rd-party libs from salt.ext import six # Are you really sure !!! # dealing with unicode is not as simple as setting defaultencoding # which can break other python modules imported by salt in bad ways... # reloading sys is not either a good idea... # reload(sys) # sys.setdefaultencoding('utf-8') log = logging.getLogger(__name__) def try_printout(data, out, opts, **kwargs): ''' Safely get the string to print out, try the configured outputter, then fall back to nested and then to raw ''' try: printout = get_printout(out, opts)(data, **kwargs) if printout is not None: return printout.rstrip() except (KeyError, AttributeError, TypeError): log.debug(traceback.format_exc()) try: printout = get_printout('nested', opts)(data, **kwargs) if printout is not None: return printout.rstrip() except (KeyError, AttributeError, TypeError): log.error('Nested output failed: ', exc_info=True) printout = get_printout('raw', opts)(data, **kwargs) if printout is not None: return printout.rstrip() def update_progress(opts, progress, progress_iter, out): ''' Update the progress iterator for the given outputter ''' # Look up the outputter try: progress_outputter = salt.loader.outputters(opts)[out] except KeyError: # Outputter is not loaded log.warning('Progress outputter not available.') return False progress_outputter(progress, progress_iter) def progress_end(progress_iter): try: progress_iter.stop() except Exception: pass return None def display_output(data, out=None, opts=None, **kwargs): ''' Print the passed data using the desired output ''' if opts is None: opts = {} display_data = try_printout(data, out, opts, **kwargs) output_filename = opts.get('output_file', None) log.trace('data = %s', data) try: # output filename can be either '' or None if output_filename: if not hasattr(output_filename, 'write'): ofh = salt.utils.files.fopen(output_filename, 'a') # pylint: disable=resource-leakage fh_opened = True else: # Filehandle/file-like object ofh = output_filename fh_opened = False try: fdata = display_data if isinstance(fdata, six.text_type): try: fdata = fdata.encode('utf-8') except (UnicodeDecodeError, UnicodeEncodeError): # try to let the stream write # even if we didn't encode it pass if fdata: ofh.write(salt.utils.stringutils.to_str(fdata)) ofh.write('\n') finally: if fh_opened: ofh.close() return if display_data: salt.utils.stringutils.print_cli(display_data) except IOError as exc: # Only raise if it's NOT a broken pipe if exc.errno != errno.EPIPE: raise exc def get_printout(out, opts=None, **kwargs): ''' Return a printer function ''' if opts is None: opts = {} if 'output' in opts and opts['output'] != 'highstate': # new --out option, but don't choke when using --out=highstate at CLI # See Issue #29796 for more information. out = opts['output'] # Handle setting the output when --static is passed. if not out and opts.get('static'): if opts.get('output'): out = opts['output'] elif opts.get('fun', '').split('.')[0] == 'state': # --static doesn't have an output set at this point, but if we're # running a state function and "out" hasn't already been set, we # should set the out variable to "highstate". Otherwise state runs # are set to "nested" below. See Issue #44556 for more information. out = 'highstate' if out == 'text': out = 'txt' elif out is None or out == '': out = 'nested' if opts.get('progress', False): out = 'progress' opts.update(kwargs) if 'color' not in opts: def is_pipe(): ''' Check if sys.stdout is a pipe or not ''' try: fileno = sys.stdout.fileno() except (AttributeError, io.UnsupportedOperation): fileno = -1 # sys.stdout is StringIO or fake return not os.isatty(fileno) if opts.get('force_color', False): opts['color'] = True elif opts.get('no_color', False) or is_pipe() or salt.utils.platform.is_windows(): opts['color'] = False else: opts['color'] = True else: if opts.get('force_color', False): opts['color'] = True elif opts.get('no_color', False) or salt.utils.platform.is_windows(): opts['color'] = False else: pass outputters = salt.loader.outputters(opts) if out not in outputters: # Since the grains outputter was removed we don't need to fire this # error when old minions are asking for it if out != 'grains': log.error('Invalid outputter %s specified, falling back to nested', out) return outputters['nested'] return outputters[out] def out_format(data, out, opts=None, **kwargs): ''' Return the formatted outputter string for the passed data ''' return try_printout(data, out, opts, **kwargs) def string_format(data, out, opts=None, **kwargs): ''' Return the formatted outputter string, removing the ANSI escape sequences. ''' raw_output = try_printout(data, out, opts, **kwargs) ansi_escape = re.compile(r'\x1b[^m]*m') return ansi_escape.sub('', raw_output) def html_format(data, out, opts=None, **kwargs): ''' Return the formatted string as HTML. ''' ansi_escaped_string = string_format(data, out, opts, **kwargs) return ansi_escaped_string.replace(' ', '&nbsp;').replace('\n', '<br />') def strip_esc_sequence(txt): ''' Replace ESC (ASCII 27/Oct 33) to prevent unsafe strings from writing their own terminal manipulation commands ''' if isinstance(txt, six.string_types): try: return txt.replace('\033', '?') except UnicodeDecodeError: return txt.replace(str('\033'), str('?')) # future lint: disable=blacklisted-function else: return txt
saltstack/salt
salt/output/__init__.py
update_progress
python
def update_progress(opts, progress, progress_iter, out): ''' Update the progress iterator for the given outputter ''' # Look up the outputter try: progress_outputter = salt.loader.outputters(opts)[out] except KeyError: # Outputter is not loaded log.warning('Progress outputter not available.') return False progress_outputter(progress, progress_iter)
Update the progress iterator for the given outputter
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/output/__init__.py#L69-L79
null
# -*- coding: utf-8 -*- ''' Used to manage the outputter system. This package is the modular system used for managing outputters. ''' # Import Python libs from __future__ import absolute_import, print_function, unicode_literals import errno import logging import io import os import re import sys import traceback # Import Salt libs import salt.loader import salt.utils.files import salt.utils.platform import salt.utils.stringutils # Import 3rd-party libs from salt.ext import six # Are you really sure !!! # dealing with unicode is not as simple as setting defaultencoding # which can break other python modules imported by salt in bad ways... # reloading sys is not either a good idea... # reload(sys) # sys.setdefaultencoding('utf-8') log = logging.getLogger(__name__) def try_printout(data, out, opts, **kwargs): ''' Safely get the string to print out, try the configured outputter, then fall back to nested and then to raw ''' try: printout = get_printout(out, opts)(data, **kwargs) if printout is not None: return printout.rstrip() except (KeyError, AttributeError, TypeError): log.debug(traceback.format_exc()) try: printout = get_printout('nested', opts)(data, **kwargs) if printout is not None: return printout.rstrip() except (KeyError, AttributeError, TypeError): log.error('Nested output failed: ', exc_info=True) printout = get_printout('raw', opts)(data, **kwargs) if printout is not None: return printout.rstrip() def get_progress(opts, out, progress): ''' Get the progress bar from the given outputter ''' return salt.loader.raw_mod(opts, out, 'rawmodule', mod='output')['{0}.progress_iter'.format(out)](progress) def progress_end(progress_iter): try: progress_iter.stop() except Exception: pass return None def display_output(data, out=None, opts=None, **kwargs): ''' Print the passed data using the desired output ''' if opts is None: opts = {} display_data = try_printout(data, out, opts, **kwargs) output_filename = opts.get('output_file', None) log.trace('data = %s', data) try: # output filename can be either '' or None if output_filename: if not hasattr(output_filename, 'write'): ofh = salt.utils.files.fopen(output_filename, 'a') # pylint: disable=resource-leakage fh_opened = True else: # Filehandle/file-like object ofh = output_filename fh_opened = False try: fdata = display_data if isinstance(fdata, six.text_type): try: fdata = fdata.encode('utf-8') except (UnicodeDecodeError, UnicodeEncodeError): # try to let the stream write # even if we didn't encode it pass if fdata: ofh.write(salt.utils.stringutils.to_str(fdata)) ofh.write('\n') finally: if fh_opened: ofh.close() return if display_data: salt.utils.stringutils.print_cli(display_data) except IOError as exc: # Only raise if it's NOT a broken pipe if exc.errno != errno.EPIPE: raise exc def get_printout(out, opts=None, **kwargs): ''' Return a printer function ''' if opts is None: opts = {} if 'output' in opts and opts['output'] != 'highstate': # new --out option, but don't choke when using --out=highstate at CLI # See Issue #29796 for more information. out = opts['output'] # Handle setting the output when --static is passed. if not out and opts.get('static'): if opts.get('output'): out = opts['output'] elif opts.get('fun', '').split('.')[0] == 'state': # --static doesn't have an output set at this point, but if we're # running a state function and "out" hasn't already been set, we # should set the out variable to "highstate". Otherwise state runs # are set to "nested" below. See Issue #44556 for more information. out = 'highstate' if out == 'text': out = 'txt' elif out is None or out == '': out = 'nested' if opts.get('progress', False): out = 'progress' opts.update(kwargs) if 'color' not in opts: def is_pipe(): ''' Check if sys.stdout is a pipe or not ''' try: fileno = sys.stdout.fileno() except (AttributeError, io.UnsupportedOperation): fileno = -1 # sys.stdout is StringIO or fake return not os.isatty(fileno) if opts.get('force_color', False): opts['color'] = True elif opts.get('no_color', False) or is_pipe() or salt.utils.platform.is_windows(): opts['color'] = False else: opts['color'] = True else: if opts.get('force_color', False): opts['color'] = True elif opts.get('no_color', False) or salt.utils.platform.is_windows(): opts['color'] = False else: pass outputters = salt.loader.outputters(opts) if out not in outputters: # Since the grains outputter was removed we don't need to fire this # error when old minions are asking for it if out != 'grains': log.error('Invalid outputter %s specified, falling back to nested', out) return outputters['nested'] return outputters[out] def out_format(data, out, opts=None, **kwargs): ''' Return the formatted outputter string for the passed data ''' return try_printout(data, out, opts, **kwargs) def string_format(data, out, opts=None, **kwargs): ''' Return the formatted outputter string, removing the ANSI escape sequences. ''' raw_output = try_printout(data, out, opts, **kwargs) ansi_escape = re.compile(r'\x1b[^m]*m') return ansi_escape.sub('', raw_output) def html_format(data, out, opts=None, **kwargs): ''' Return the formatted string as HTML. ''' ansi_escaped_string = string_format(data, out, opts, **kwargs) return ansi_escaped_string.replace(' ', '&nbsp;').replace('\n', '<br />') def strip_esc_sequence(txt): ''' Replace ESC (ASCII 27/Oct 33) to prevent unsafe strings from writing their own terminal manipulation commands ''' if isinstance(txt, six.string_types): try: return txt.replace('\033', '?') except UnicodeDecodeError: return txt.replace(str('\033'), str('?')) # future lint: disable=blacklisted-function else: return txt
saltstack/salt
salt/output/__init__.py
display_output
python
def display_output(data, out=None, opts=None, **kwargs): ''' Print the passed data using the desired output ''' if opts is None: opts = {} display_data = try_printout(data, out, opts, **kwargs) output_filename = opts.get('output_file', None) log.trace('data = %s', data) try: # output filename can be either '' or None if output_filename: if not hasattr(output_filename, 'write'): ofh = salt.utils.files.fopen(output_filename, 'a') # pylint: disable=resource-leakage fh_opened = True else: # Filehandle/file-like object ofh = output_filename fh_opened = False try: fdata = display_data if isinstance(fdata, six.text_type): try: fdata = fdata.encode('utf-8') except (UnicodeDecodeError, UnicodeEncodeError): # try to let the stream write # even if we didn't encode it pass if fdata: ofh.write(salt.utils.stringutils.to_str(fdata)) ofh.write('\n') finally: if fh_opened: ofh.close() return if display_data: salt.utils.stringutils.print_cli(display_data) except IOError as exc: # Only raise if it's NOT a broken pipe if exc.errno != errno.EPIPE: raise exc
Print the passed data using the desired output
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/output/__init__.py#L90-L132
[ "def fopen(*args, **kwargs):\n '''\n Wrapper around open() built-in to set CLOEXEC on the fd.\n\n This flag specifies that the file descriptor should be closed when an exec\n function is invoked;\n\n When a file descriptor is allocated (as with open or dup), this bit is\n initially cleared on the new file descriptor, meaning that descriptor will\n survive into the new program after exec.\n\n NB! We still have small race condition between open and fcntl.\n '''\n if six.PY3:\n try:\n # Don't permit stdin/stdout/stderr to be opened. The boolean False\n # and True are treated by Python 3's open() as file descriptors 0\n # and 1, respectively.\n if args[0] in (0, 1, 2):\n raise TypeError(\n '{0} is not a permitted file descriptor'.format(args[0])\n )\n except IndexError:\n pass\n binary = None\n # ensure 'binary' mode is always used on Windows in Python 2\n if ((six.PY2 and salt.utils.platform.is_windows() and 'binary' not in kwargs) or\n kwargs.pop('binary', False)):\n if len(args) > 1:\n args = list(args)\n if 'b' not in args[1]:\n args[1] = args[1].replace('t', 'b')\n if 'b' not in args[1]:\n args[1] += 'b'\n elif kwargs.get('mode'):\n if 'b' not in kwargs['mode']:\n kwargs['mode'] = kwargs['mode'].replace('t', 'b')\n if 'b' not in kwargs['mode']:\n kwargs['mode'] += 'b'\n else:\n # the default is to read\n kwargs['mode'] = 'rb'\n elif six.PY3 and 'encoding' not in kwargs:\n # In Python 3, if text mode is used and the encoding\n # is not specified, set the encoding to 'utf-8'.\n binary = False\n if len(args) > 1:\n args = list(args)\n if 'b' in args[1]:\n binary = True\n if kwargs.get('mode', None):\n if 'b' in kwargs['mode']:\n binary = True\n if not binary:\n kwargs['encoding'] = __salt_system_encoding__\n\n if six.PY3 and not binary and not kwargs.get('newline', None):\n kwargs['newline'] = ''\n\n f_handle = open(*args, **kwargs) # pylint: disable=resource-leakage\n\n if is_fcntl_available():\n # modify the file descriptor on systems with fcntl\n # unix and unix-like systems only\n try:\n FD_CLOEXEC = fcntl.FD_CLOEXEC # pylint: disable=C0103\n except AttributeError:\n FD_CLOEXEC = 1 # pylint: disable=C0103\n old_flags = fcntl.fcntl(f_handle.fileno(), fcntl.F_GETFD)\n fcntl.fcntl(f_handle.fileno(), fcntl.F_SETFD, old_flags | FD_CLOEXEC)\n\n return f_handle\n", "def to_str(s, encoding=None, errors='strict', normalize=False):\n '''\n Given str, bytes, bytearray, or unicode (py2), return str\n '''\n def _normalize(s):\n try:\n return unicodedata.normalize('NFC', s) if normalize else s\n except TypeError:\n return s\n\n if encoding is None:\n # Try utf-8 first, and fall back to detected encoding\n encoding = ('utf-8', __salt_system_encoding__)\n if not isinstance(encoding, (tuple, list)):\n encoding = (encoding,)\n\n if not encoding:\n raise ValueError('encoding cannot be empty')\n\n # This shouldn't be six.string_types because if we're on PY2 and we already\n # have a string, we should just return it.\n if isinstance(s, str):\n return _normalize(s)\n\n exc = None\n if six.PY3:\n if isinstance(s, (bytes, bytearray)):\n for enc in encoding:\n try:\n return _normalize(s.decode(enc, errors))\n except UnicodeDecodeError as err:\n exc = err\n continue\n # The only way we get this far is if a UnicodeDecodeError was\n # raised, otherwise we would have already returned (or raised some\n # other exception).\n raise exc # pylint: disable=raising-bad-type\n raise TypeError('expected str, bytes, or bytearray not {}'.format(type(s)))\n else:\n if isinstance(s, bytearray):\n return str(s) # future lint: disable=blacklisted-function\n if isinstance(s, unicode): # pylint: disable=incompatible-py3-code,undefined-variable\n for enc in encoding:\n try:\n return _normalize(s).encode(enc, errors)\n except UnicodeEncodeError as err:\n exc = err\n continue\n # The only way we get this far is if a UnicodeDecodeError was\n # raised, otherwise we would have already returned (or raised some\n # other exception).\n raise exc # pylint: disable=raising-bad-type\n raise TypeError('expected str, bytearray, or unicode')\n", "def try_printout(data, out, opts, **kwargs):\n '''\n Safely get the string to print out, try the configured outputter, then\n fall back to nested and then to raw\n '''\n try:\n printout = get_printout(out, opts)(data, **kwargs)\n if printout is not None:\n return printout.rstrip()\n except (KeyError, AttributeError, TypeError):\n log.debug(traceback.format_exc())\n try:\n printout = get_printout('nested', opts)(data, **kwargs)\n if printout is not None:\n return printout.rstrip()\n except (KeyError, AttributeError, TypeError):\n log.error('Nested output failed: ', exc_info=True)\n printout = get_printout('raw', opts)(data, **kwargs)\n if printout is not None:\n return printout.rstrip()\n", "def print_cli(msg, retries=10, step=0.01):\n '''\n Wrapper around print() that suppresses tracebacks on broken pipes (i.e.\n when salt output is piped to less and less is stopped prematurely).\n '''\n while retries:\n try:\n try:\n print(msg)\n except UnicodeEncodeError:\n print(msg.encode('utf-8'))\n except IOError as exc:\n err = \"{0}\".format(exc)\n if exc.errno != errno.EPIPE:\n if (\n (\"temporarily unavailable\" in err or\n exc.errno in (errno.EAGAIN,)) and\n retries\n ):\n time.sleep(step)\n retries -= 1\n continue\n else:\n raise\n break\n" ]
# -*- coding: utf-8 -*- ''' Used to manage the outputter system. This package is the modular system used for managing outputters. ''' # Import Python libs from __future__ import absolute_import, print_function, unicode_literals import errno import logging import io import os import re import sys import traceback # Import Salt libs import salt.loader import salt.utils.files import salt.utils.platform import salt.utils.stringutils # Import 3rd-party libs from salt.ext import six # Are you really sure !!! # dealing with unicode is not as simple as setting defaultencoding # which can break other python modules imported by salt in bad ways... # reloading sys is not either a good idea... # reload(sys) # sys.setdefaultencoding('utf-8') log = logging.getLogger(__name__) def try_printout(data, out, opts, **kwargs): ''' Safely get the string to print out, try the configured outputter, then fall back to nested and then to raw ''' try: printout = get_printout(out, opts)(data, **kwargs) if printout is not None: return printout.rstrip() except (KeyError, AttributeError, TypeError): log.debug(traceback.format_exc()) try: printout = get_printout('nested', opts)(data, **kwargs) if printout is not None: return printout.rstrip() except (KeyError, AttributeError, TypeError): log.error('Nested output failed: ', exc_info=True) printout = get_printout('raw', opts)(data, **kwargs) if printout is not None: return printout.rstrip() def get_progress(opts, out, progress): ''' Get the progress bar from the given outputter ''' return salt.loader.raw_mod(opts, out, 'rawmodule', mod='output')['{0}.progress_iter'.format(out)](progress) def update_progress(opts, progress, progress_iter, out): ''' Update the progress iterator for the given outputter ''' # Look up the outputter try: progress_outputter = salt.loader.outputters(opts)[out] except KeyError: # Outputter is not loaded log.warning('Progress outputter not available.') return False progress_outputter(progress, progress_iter) def progress_end(progress_iter): try: progress_iter.stop() except Exception: pass return None def get_printout(out, opts=None, **kwargs): ''' Return a printer function ''' if opts is None: opts = {} if 'output' in opts and opts['output'] != 'highstate': # new --out option, but don't choke when using --out=highstate at CLI # See Issue #29796 for more information. out = opts['output'] # Handle setting the output when --static is passed. if not out and opts.get('static'): if opts.get('output'): out = opts['output'] elif opts.get('fun', '').split('.')[0] == 'state': # --static doesn't have an output set at this point, but if we're # running a state function and "out" hasn't already been set, we # should set the out variable to "highstate". Otherwise state runs # are set to "nested" below. See Issue #44556 for more information. out = 'highstate' if out == 'text': out = 'txt' elif out is None or out == '': out = 'nested' if opts.get('progress', False): out = 'progress' opts.update(kwargs) if 'color' not in opts: def is_pipe(): ''' Check if sys.stdout is a pipe or not ''' try: fileno = sys.stdout.fileno() except (AttributeError, io.UnsupportedOperation): fileno = -1 # sys.stdout is StringIO or fake return not os.isatty(fileno) if opts.get('force_color', False): opts['color'] = True elif opts.get('no_color', False) or is_pipe() or salt.utils.platform.is_windows(): opts['color'] = False else: opts['color'] = True else: if opts.get('force_color', False): opts['color'] = True elif opts.get('no_color', False) or salt.utils.platform.is_windows(): opts['color'] = False else: pass outputters = salt.loader.outputters(opts) if out not in outputters: # Since the grains outputter was removed we don't need to fire this # error when old minions are asking for it if out != 'grains': log.error('Invalid outputter %s specified, falling back to nested', out) return outputters['nested'] return outputters[out] def out_format(data, out, opts=None, **kwargs): ''' Return the formatted outputter string for the passed data ''' return try_printout(data, out, opts, **kwargs) def string_format(data, out, opts=None, **kwargs): ''' Return the formatted outputter string, removing the ANSI escape sequences. ''' raw_output = try_printout(data, out, opts, **kwargs) ansi_escape = re.compile(r'\x1b[^m]*m') return ansi_escape.sub('', raw_output) def html_format(data, out, opts=None, **kwargs): ''' Return the formatted string as HTML. ''' ansi_escaped_string = string_format(data, out, opts, **kwargs) return ansi_escaped_string.replace(' ', '&nbsp;').replace('\n', '<br />') def strip_esc_sequence(txt): ''' Replace ESC (ASCII 27/Oct 33) to prevent unsafe strings from writing their own terminal manipulation commands ''' if isinstance(txt, six.string_types): try: return txt.replace('\033', '?') except UnicodeDecodeError: return txt.replace(str('\033'), str('?')) # future lint: disable=blacklisted-function else: return txt
saltstack/salt
salt/output/__init__.py
get_printout
python
def get_printout(out, opts=None, **kwargs): ''' Return a printer function ''' if opts is None: opts = {} if 'output' in opts and opts['output'] != 'highstate': # new --out option, but don't choke when using --out=highstate at CLI # See Issue #29796 for more information. out = opts['output'] # Handle setting the output when --static is passed. if not out and opts.get('static'): if opts.get('output'): out = opts['output'] elif opts.get('fun', '').split('.')[0] == 'state': # --static doesn't have an output set at this point, but if we're # running a state function and "out" hasn't already been set, we # should set the out variable to "highstate". Otherwise state runs # are set to "nested" below. See Issue #44556 for more information. out = 'highstate' if out == 'text': out = 'txt' elif out is None or out == '': out = 'nested' if opts.get('progress', False): out = 'progress' opts.update(kwargs) if 'color' not in opts: def is_pipe(): ''' Check if sys.stdout is a pipe or not ''' try: fileno = sys.stdout.fileno() except (AttributeError, io.UnsupportedOperation): fileno = -1 # sys.stdout is StringIO or fake return not os.isatty(fileno) if opts.get('force_color', False): opts['color'] = True elif opts.get('no_color', False) or is_pipe() or salt.utils.platform.is_windows(): opts['color'] = False else: opts['color'] = True else: if opts.get('force_color', False): opts['color'] = True elif opts.get('no_color', False) or salt.utils.platform.is_windows(): opts['color'] = False else: pass outputters = salt.loader.outputters(opts) if out not in outputters: # Since the grains outputter was removed we don't need to fire this # error when old minions are asking for it if out != 'grains': log.error('Invalid outputter %s specified, falling back to nested', out) return outputters['nested'] return outputters[out]
Return a printer function
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/output/__init__.py#L135-L198
null
# -*- coding: utf-8 -*- ''' Used to manage the outputter system. This package is the modular system used for managing outputters. ''' # Import Python libs from __future__ import absolute_import, print_function, unicode_literals import errno import logging import io import os import re import sys import traceback # Import Salt libs import salt.loader import salt.utils.files import salt.utils.platform import salt.utils.stringutils # Import 3rd-party libs from salt.ext import six # Are you really sure !!! # dealing with unicode is not as simple as setting defaultencoding # which can break other python modules imported by salt in bad ways... # reloading sys is not either a good idea... # reload(sys) # sys.setdefaultencoding('utf-8') log = logging.getLogger(__name__) def try_printout(data, out, opts, **kwargs): ''' Safely get the string to print out, try the configured outputter, then fall back to nested and then to raw ''' try: printout = get_printout(out, opts)(data, **kwargs) if printout is not None: return printout.rstrip() except (KeyError, AttributeError, TypeError): log.debug(traceback.format_exc()) try: printout = get_printout('nested', opts)(data, **kwargs) if printout is not None: return printout.rstrip() except (KeyError, AttributeError, TypeError): log.error('Nested output failed: ', exc_info=True) printout = get_printout('raw', opts)(data, **kwargs) if printout is not None: return printout.rstrip() def get_progress(opts, out, progress): ''' Get the progress bar from the given outputter ''' return salt.loader.raw_mod(opts, out, 'rawmodule', mod='output')['{0}.progress_iter'.format(out)](progress) def update_progress(opts, progress, progress_iter, out): ''' Update the progress iterator for the given outputter ''' # Look up the outputter try: progress_outputter = salt.loader.outputters(opts)[out] except KeyError: # Outputter is not loaded log.warning('Progress outputter not available.') return False progress_outputter(progress, progress_iter) def progress_end(progress_iter): try: progress_iter.stop() except Exception: pass return None def display_output(data, out=None, opts=None, **kwargs): ''' Print the passed data using the desired output ''' if opts is None: opts = {} display_data = try_printout(data, out, opts, **kwargs) output_filename = opts.get('output_file', None) log.trace('data = %s', data) try: # output filename can be either '' or None if output_filename: if not hasattr(output_filename, 'write'): ofh = salt.utils.files.fopen(output_filename, 'a') # pylint: disable=resource-leakage fh_opened = True else: # Filehandle/file-like object ofh = output_filename fh_opened = False try: fdata = display_data if isinstance(fdata, six.text_type): try: fdata = fdata.encode('utf-8') except (UnicodeDecodeError, UnicodeEncodeError): # try to let the stream write # even if we didn't encode it pass if fdata: ofh.write(salt.utils.stringutils.to_str(fdata)) ofh.write('\n') finally: if fh_opened: ofh.close() return if display_data: salt.utils.stringutils.print_cli(display_data) except IOError as exc: # Only raise if it's NOT a broken pipe if exc.errno != errno.EPIPE: raise exc def out_format(data, out, opts=None, **kwargs): ''' Return the formatted outputter string for the passed data ''' return try_printout(data, out, opts, **kwargs) def string_format(data, out, opts=None, **kwargs): ''' Return the formatted outputter string, removing the ANSI escape sequences. ''' raw_output = try_printout(data, out, opts, **kwargs) ansi_escape = re.compile(r'\x1b[^m]*m') return ansi_escape.sub('', raw_output) def html_format(data, out, opts=None, **kwargs): ''' Return the formatted string as HTML. ''' ansi_escaped_string = string_format(data, out, opts, **kwargs) return ansi_escaped_string.replace(' ', '&nbsp;').replace('\n', '<br />') def strip_esc_sequence(txt): ''' Replace ESC (ASCII 27/Oct 33) to prevent unsafe strings from writing their own terminal manipulation commands ''' if isinstance(txt, six.string_types): try: return txt.replace('\033', '?') except UnicodeDecodeError: return txt.replace(str('\033'), str('?')) # future lint: disable=blacklisted-function else: return txt
saltstack/salt
salt/output/__init__.py
out_format
python
def out_format(data, out, opts=None, **kwargs): ''' Return the formatted outputter string for the passed data ''' return try_printout(data, out, opts, **kwargs)
Return the formatted outputter string for the passed data
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/output/__init__.py#L201-L205
[ "def try_printout(data, out, opts, **kwargs):\n '''\n Safely get the string to print out, try the configured outputter, then\n fall back to nested and then to raw\n '''\n try:\n printout = get_printout(out, opts)(data, **kwargs)\n if printout is not None:\n return printout.rstrip()\n except (KeyError, AttributeError, TypeError):\n log.debug(traceback.format_exc())\n try:\n printout = get_printout('nested', opts)(data, **kwargs)\n if printout is not None:\n return printout.rstrip()\n except (KeyError, AttributeError, TypeError):\n log.error('Nested output failed: ', exc_info=True)\n printout = get_printout('raw', opts)(data, **kwargs)\n if printout is not None:\n return printout.rstrip()\n" ]
# -*- coding: utf-8 -*- ''' Used to manage the outputter system. This package is the modular system used for managing outputters. ''' # Import Python libs from __future__ import absolute_import, print_function, unicode_literals import errno import logging import io import os import re import sys import traceback # Import Salt libs import salt.loader import salt.utils.files import salt.utils.platform import salt.utils.stringutils # Import 3rd-party libs from salt.ext import six # Are you really sure !!! # dealing with unicode is not as simple as setting defaultencoding # which can break other python modules imported by salt in bad ways... # reloading sys is not either a good idea... # reload(sys) # sys.setdefaultencoding('utf-8') log = logging.getLogger(__name__) def try_printout(data, out, opts, **kwargs): ''' Safely get the string to print out, try the configured outputter, then fall back to nested and then to raw ''' try: printout = get_printout(out, opts)(data, **kwargs) if printout is not None: return printout.rstrip() except (KeyError, AttributeError, TypeError): log.debug(traceback.format_exc()) try: printout = get_printout('nested', opts)(data, **kwargs) if printout is not None: return printout.rstrip() except (KeyError, AttributeError, TypeError): log.error('Nested output failed: ', exc_info=True) printout = get_printout('raw', opts)(data, **kwargs) if printout is not None: return printout.rstrip() def get_progress(opts, out, progress): ''' Get the progress bar from the given outputter ''' return salt.loader.raw_mod(opts, out, 'rawmodule', mod='output')['{0}.progress_iter'.format(out)](progress) def update_progress(opts, progress, progress_iter, out): ''' Update the progress iterator for the given outputter ''' # Look up the outputter try: progress_outputter = salt.loader.outputters(opts)[out] except KeyError: # Outputter is not loaded log.warning('Progress outputter not available.') return False progress_outputter(progress, progress_iter) def progress_end(progress_iter): try: progress_iter.stop() except Exception: pass return None def display_output(data, out=None, opts=None, **kwargs): ''' Print the passed data using the desired output ''' if opts is None: opts = {} display_data = try_printout(data, out, opts, **kwargs) output_filename = opts.get('output_file', None) log.trace('data = %s', data) try: # output filename can be either '' or None if output_filename: if not hasattr(output_filename, 'write'): ofh = salt.utils.files.fopen(output_filename, 'a') # pylint: disable=resource-leakage fh_opened = True else: # Filehandle/file-like object ofh = output_filename fh_opened = False try: fdata = display_data if isinstance(fdata, six.text_type): try: fdata = fdata.encode('utf-8') except (UnicodeDecodeError, UnicodeEncodeError): # try to let the stream write # even if we didn't encode it pass if fdata: ofh.write(salt.utils.stringutils.to_str(fdata)) ofh.write('\n') finally: if fh_opened: ofh.close() return if display_data: salt.utils.stringutils.print_cli(display_data) except IOError as exc: # Only raise if it's NOT a broken pipe if exc.errno != errno.EPIPE: raise exc def get_printout(out, opts=None, **kwargs): ''' Return a printer function ''' if opts is None: opts = {} if 'output' in opts and opts['output'] != 'highstate': # new --out option, but don't choke when using --out=highstate at CLI # See Issue #29796 for more information. out = opts['output'] # Handle setting the output when --static is passed. if not out and opts.get('static'): if opts.get('output'): out = opts['output'] elif opts.get('fun', '').split('.')[0] == 'state': # --static doesn't have an output set at this point, but if we're # running a state function and "out" hasn't already been set, we # should set the out variable to "highstate". Otherwise state runs # are set to "nested" below. See Issue #44556 for more information. out = 'highstate' if out == 'text': out = 'txt' elif out is None or out == '': out = 'nested' if opts.get('progress', False): out = 'progress' opts.update(kwargs) if 'color' not in opts: def is_pipe(): ''' Check if sys.stdout is a pipe or not ''' try: fileno = sys.stdout.fileno() except (AttributeError, io.UnsupportedOperation): fileno = -1 # sys.stdout is StringIO or fake return not os.isatty(fileno) if opts.get('force_color', False): opts['color'] = True elif opts.get('no_color', False) or is_pipe() or salt.utils.platform.is_windows(): opts['color'] = False else: opts['color'] = True else: if opts.get('force_color', False): opts['color'] = True elif opts.get('no_color', False) or salt.utils.platform.is_windows(): opts['color'] = False else: pass outputters = salt.loader.outputters(opts) if out not in outputters: # Since the grains outputter was removed we don't need to fire this # error when old minions are asking for it if out != 'grains': log.error('Invalid outputter %s specified, falling back to nested', out) return outputters['nested'] return outputters[out] def string_format(data, out, opts=None, **kwargs): ''' Return the formatted outputter string, removing the ANSI escape sequences. ''' raw_output = try_printout(data, out, opts, **kwargs) ansi_escape = re.compile(r'\x1b[^m]*m') return ansi_escape.sub('', raw_output) def html_format(data, out, opts=None, **kwargs): ''' Return the formatted string as HTML. ''' ansi_escaped_string = string_format(data, out, opts, **kwargs) return ansi_escaped_string.replace(' ', '&nbsp;').replace('\n', '<br />') def strip_esc_sequence(txt): ''' Replace ESC (ASCII 27/Oct 33) to prevent unsafe strings from writing their own terminal manipulation commands ''' if isinstance(txt, six.string_types): try: return txt.replace('\033', '?') except UnicodeDecodeError: return txt.replace(str('\033'), str('?')) # future lint: disable=blacklisted-function else: return txt
saltstack/salt
salt/output/__init__.py
string_format
python
def string_format(data, out, opts=None, **kwargs): ''' Return the formatted outputter string, removing the ANSI escape sequences. ''' raw_output = try_printout(data, out, opts, **kwargs) ansi_escape = re.compile(r'\x1b[^m]*m') return ansi_escape.sub('', raw_output)
Return the formatted outputter string, removing the ANSI escape sequences.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/output/__init__.py#L208-L214
[ "def try_printout(data, out, opts, **kwargs):\n '''\n Safely get the string to print out, try the configured outputter, then\n fall back to nested and then to raw\n '''\n try:\n printout = get_printout(out, opts)(data, **kwargs)\n if printout is not None:\n return printout.rstrip()\n except (KeyError, AttributeError, TypeError):\n log.debug(traceback.format_exc())\n try:\n printout = get_printout('nested', opts)(data, **kwargs)\n if printout is not None:\n return printout.rstrip()\n except (KeyError, AttributeError, TypeError):\n log.error('Nested output failed: ', exc_info=True)\n printout = get_printout('raw', opts)(data, **kwargs)\n if printout is not None:\n return printout.rstrip()\n" ]
# -*- coding: utf-8 -*- ''' Used to manage the outputter system. This package is the modular system used for managing outputters. ''' # Import Python libs from __future__ import absolute_import, print_function, unicode_literals import errno import logging import io import os import re import sys import traceback # Import Salt libs import salt.loader import salt.utils.files import salt.utils.platform import salt.utils.stringutils # Import 3rd-party libs from salt.ext import six # Are you really sure !!! # dealing with unicode is not as simple as setting defaultencoding # which can break other python modules imported by salt in bad ways... # reloading sys is not either a good idea... # reload(sys) # sys.setdefaultencoding('utf-8') log = logging.getLogger(__name__) def try_printout(data, out, opts, **kwargs): ''' Safely get the string to print out, try the configured outputter, then fall back to nested and then to raw ''' try: printout = get_printout(out, opts)(data, **kwargs) if printout is not None: return printout.rstrip() except (KeyError, AttributeError, TypeError): log.debug(traceback.format_exc()) try: printout = get_printout('nested', opts)(data, **kwargs) if printout is not None: return printout.rstrip() except (KeyError, AttributeError, TypeError): log.error('Nested output failed: ', exc_info=True) printout = get_printout('raw', opts)(data, **kwargs) if printout is not None: return printout.rstrip() def get_progress(opts, out, progress): ''' Get the progress bar from the given outputter ''' return salt.loader.raw_mod(opts, out, 'rawmodule', mod='output')['{0}.progress_iter'.format(out)](progress) def update_progress(opts, progress, progress_iter, out): ''' Update the progress iterator for the given outputter ''' # Look up the outputter try: progress_outputter = salt.loader.outputters(opts)[out] except KeyError: # Outputter is not loaded log.warning('Progress outputter not available.') return False progress_outputter(progress, progress_iter) def progress_end(progress_iter): try: progress_iter.stop() except Exception: pass return None def display_output(data, out=None, opts=None, **kwargs): ''' Print the passed data using the desired output ''' if opts is None: opts = {} display_data = try_printout(data, out, opts, **kwargs) output_filename = opts.get('output_file', None) log.trace('data = %s', data) try: # output filename can be either '' or None if output_filename: if not hasattr(output_filename, 'write'): ofh = salt.utils.files.fopen(output_filename, 'a') # pylint: disable=resource-leakage fh_opened = True else: # Filehandle/file-like object ofh = output_filename fh_opened = False try: fdata = display_data if isinstance(fdata, six.text_type): try: fdata = fdata.encode('utf-8') except (UnicodeDecodeError, UnicodeEncodeError): # try to let the stream write # even if we didn't encode it pass if fdata: ofh.write(salt.utils.stringutils.to_str(fdata)) ofh.write('\n') finally: if fh_opened: ofh.close() return if display_data: salt.utils.stringutils.print_cli(display_data) except IOError as exc: # Only raise if it's NOT a broken pipe if exc.errno != errno.EPIPE: raise exc def get_printout(out, opts=None, **kwargs): ''' Return a printer function ''' if opts is None: opts = {} if 'output' in opts and opts['output'] != 'highstate': # new --out option, but don't choke when using --out=highstate at CLI # See Issue #29796 for more information. out = opts['output'] # Handle setting the output when --static is passed. if not out and opts.get('static'): if opts.get('output'): out = opts['output'] elif opts.get('fun', '').split('.')[0] == 'state': # --static doesn't have an output set at this point, but if we're # running a state function and "out" hasn't already been set, we # should set the out variable to "highstate". Otherwise state runs # are set to "nested" below. See Issue #44556 for more information. out = 'highstate' if out == 'text': out = 'txt' elif out is None or out == '': out = 'nested' if opts.get('progress', False): out = 'progress' opts.update(kwargs) if 'color' not in opts: def is_pipe(): ''' Check if sys.stdout is a pipe or not ''' try: fileno = sys.stdout.fileno() except (AttributeError, io.UnsupportedOperation): fileno = -1 # sys.stdout is StringIO or fake return not os.isatty(fileno) if opts.get('force_color', False): opts['color'] = True elif opts.get('no_color', False) or is_pipe() or salt.utils.platform.is_windows(): opts['color'] = False else: opts['color'] = True else: if opts.get('force_color', False): opts['color'] = True elif opts.get('no_color', False) or salt.utils.platform.is_windows(): opts['color'] = False else: pass outputters = salt.loader.outputters(opts) if out not in outputters: # Since the grains outputter was removed we don't need to fire this # error when old minions are asking for it if out != 'grains': log.error('Invalid outputter %s specified, falling back to nested', out) return outputters['nested'] return outputters[out] def out_format(data, out, opts=None, **kwargs): ''' Return the formatted outputter string for the passed data ''' return try_printout(data, out, opts, **kwargs) def html_format(data, out, opts=None, **kwargs): ''' Return the formatted string as HTML. ''' ansi_escaped_string = string_format(data, out, opts, **kwargs) return ansi_escaped_string.replace(' ', '&nbsp;').replace('\n', '<br />') def strip_esc_sequence(txt): ''' Replace ESC (ASCII 27/Oct 33) to prevent unsafe strings from writing their own terminal manipulation commands ''' if isinstance(txt, six.string_types): try: return txt.replace('\033', '?') except UnicodeDecodeError: return txt.replace(str('\033'), str('?')) # future lint: disable=blacklisted-function else: return txt
saltstack/salt
salt/output/__init__.py
html_format
python
def html_format(data, out, opts=None, **kwargs): ''' Return the formatted string as HTML. ''' ansi_escaped_string = string_format(data, out, opts, **kwargs) return ansi_escaped_string.replace(' ', '&nbsp;').replace('\n', '<br />')
Return the formatted string as HTML.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/output/__init__.py#L217-L222
[ "def string_format(data, out, opts=None, **kwargs):\n '''\n Return the formatted outputter string, removing the ANSI escape sequences.\n '''\n raw_output = try_printout(data, out, opts, **kwargs)\n ansi_escape = re.compile(r'\\x1b[^m]*m')\n return ansi_escape.sub('', raw_output)\n" ]
# -*- coding: utf-8 -*- ''' Used to manage the outputter system. This package is the modular system used for managing outputters. ''' # Import Python libs from __future__ import absolute_import, print_function, unicode_literals import errno import logging import io import os import re import sys import traceback # Import Salt libs import salt.loader import salt.utils.files import salt.utils.platform import salt.utils.stringutils # Import 3rd-party libs from salt.ext import six # Are you really sure !!! # dealing with unicode is not as simple as setting defaultencoding # which can break other python modules imported by salt in bad ways... # reloading sys is not either a good idea... # reload(sys) # sys.setdefaultencoding('utf-8') log = logging.getLogger(__name__) def try_printout(data, out, opts, **kwargs): ''' Safely get the string to print out, try the configured outputter, then fall back to nested and then to raw ''' try: printout = get_printout(out, opts)(data, **kwargs) if printout is not None: return printout.rstrip() except (KeyError, AttributeError, TypeError): log.debug(traceback.format_exc()) try: printout = get_printout('nested', opts)(data, **kwargs) if printout is not None: return printout.rstrip() except (KeyError, AttributeError, TypeError): log.error('Nested output failed: ', exc_info=True) printout = get_printout('raw', opts)(data, **kwargs) if printout is not None: return printout.rstrip() def get_progress(opts, out, progress): ''' Get the progress bar from the given outputter ''' return salt.loader.raw_mod(opts, out, 'rawmodule', mod='output')['{0}.progress_iter'.format(out)](progress) def update_progress(opts, progress, progress_iter, out): ''' Update the progress iterator for the given outputter ''' # Look up the outputter try: progress_outputter = salt.loader.outputters(opts)[out] except KeyError: # Outputter is not loaded log.warning('Progress outputter not available.') return False progress_outputter(progress, progress_iter) def progress_end(progress_iter): try: progress_iter.stop() except Exception: pass return None def display_output(data, out=None, opts=None, **kwargs): ''' Print the passed data using the desired output ''' if opts is None: opts = {} display_data = try_printout(data, out, opts, **kwargs) output_filename = opts.get('output_file', None) log.trace('data = %s', data) try: # output filename can be either '' or None if output_filename: if not hasattr(output_filename, 'write'): ofh = salt.utils.files.fopen(output_filename, 'a') # pylint: disable=resource-leakage fh_opened = True else: # Filehandle/file-like object ofh = output_filename fh_opened = False try: fdata = display_data if isinstance(fdata, six.text_type): try: fdata = fdata.encode('utf-8') except (UnicodeDecodeError, UnicodeEncodeError): # try to let the stream write # even if we didn't encode it pass if fdata: ofh.write(salt.utils.stringutils.to_str(fdata)) ofh.write('\n') finally: if fh_opened: ofh.close() return if display_data: salt.utils.stringutils.print_cli(display_data) except IOError as exc: # Only raise if it's NOT a broken pipe if exc.errno != errno.EPIPE: raise exc def get_printout(out, opts=None, **kwargs): ''' Return a printer function ''' if opts is None: opts = {} if 'output' in opts and opts['output'] != 'highstate': # new --out option, but don't choke when using --out=highstate at CLI # See Issue #29796 for more information. out = opts['output'] # Handle setting the output when --static is passed. if not out and opts.get('static'): if opts.get('output'): out = opts['output'] elif opts.get('fun', '').split('.')[0] == 'state': # --static doesn't have an output set at this point, but if we're # running a state function and "out" hasn't already been set, we # should set the out variable to "highstate". Otherwise state runs # are set to "nested" below. See Issue #44556 for more information. out = 'highstate' if out == 'text': out = 'txt' elif out is None or out == '': out = 'nested' if opts.get('progress', False): out = 'progress' opts.update(kwargs) if 'color' not in opts: def is_pipe(): ''' Check if sys.stdout is a pipe or not ''' try: fileno = sys.stdout.fileno() except (AttributeError, io.UnsupportedOperation): fileno = -1 # sys.stdout is StringIO or fake return not os.isatty(fileno) if opts.get('force_color', False): opts['color'] = True elif opts.get('no_color', False) or is_pipe() or salt.utils.platform.is_windows(): opts['color'] = False else: opts['color'] = True else: if opts.get('force_color', False): opts['color'] = True elif opts.get('no_color', False) or salt.utils.platform.is_windows(): opts['color'] = False else: pass outputters = salt.loader.outputters(opts) if out not in outputters: # Since the grains outputter was removed we don't need to fire this # error when old minions are asking for it if out != 'grains': log.error('Invalid outputter %s specified, falling back to nested', out) return outputters['nested'] return outputters[out] def out_format(data, out, opts=None, **kwargs): ''' Return the formatted outputter string for the passed data ''' return try_printout(data, out, opts, **kwargs) def string_format(data, out, opts=None, **kwargs): ''' Return the formatted outputter string, removing the ANSI escape sequences. ''' raw_output = try_printout(data, out, opts, **kwargs) ansi_escape = re.compile(r'\x1b[^m]*m') return ansi_escape.sub('', raw_output) def strip_esc_sequence(txt): ''' Replace ESC (ASCII 27/Oct 33) to prevent unsafe strings from writing their own terminal manipulation commands ''' if isinstance(txt, six.string_types): try: return txt.replace('\033', '?') except UnicodeDecodeError: return txt.replace(str('\033'), str('?')) # future lint: disable=blacklisted-function else: return txt
saltstack/salt
salt/output/__init__.py
strip_esc_sequence
python
def strip_esc_sequence(txt): ''' Replace ESC (ASCII 27/Oct 33) to prevent unsafe strings from writing their own terminal manipulation commands ''' if isinstance(txt, six.string_types): try: return txt.replace('\033', '?') except UnicodeDecodeError: return txt.replace(str('\033'), str('?')) # future lint: disable=blacklisted-function else: return txt
Replace ESC (ASCII 27/Oct 33) to prevent unsafe strings from writing their own terminal manipulation commands
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/output/__init__.py#L225-L236
null
# -*- coding: utf-8 -*- ''' Used to manage the outputter system. This package is the modular system used for managing outputters. ''' # Import Python libs from __future__ import absolute_import, print_function, unicode_literals import errno import logging import io import os import re import sys import traceback # Import Salt libs import salt.loader import salt.utils.files import salt.utils.platform import salt.utils.stringutils # Import 3rd-party libs from salt.ext import six # Are you really sure !!! # dealing with unicode is not as simple as setting defaultencoding # which can break other python modules imported by salt in bad ways... # reloading sys is not either a good idea... # reload(sys) # sys.setdefaultencoding('utf-8') log = logging.getLogger(__name__) def try_printout(data, out, opts, **kwargs): ''' Safely get the string to print out, try the configured outputter, then fall back to nested and then to raw ''' try: printout = get_printout(out, opts)(data, **kwargs) if printout is not None: return printout.rstrip() except (KeyError, AttributeError, TypeError): log.debug(traceback.format_exc()) try: printout = get_printout('nested', opts)(data, **kwargs) if printout is not None: return printout.rstrip() except (KeyError, AttributeError, TypeError): log.error('Nested output failed: ', exc_info=True) printout = get_printout('raw', opts)(data, **kwargs) if printout is not None: return printout.rstrip() def get_progress(opts, out, progress): ''' Get the progress bar from the given outputter ''' return salt.loader.raw_mod(opts, out, 'rawmodule', mod='output')['{0}.progress_iter'.format(out)](progress) def update_progress(opts, progress, progress_iter, out): ''' Update the progress iterator for the given outputter ''' # Look up the outputter try: progress_outputter = salt.loader.outputters(opts)[out] except KeyError: # Outputter is not loaded log.warning('Progress outputter not available.') return False progress_outputter(progress, progress_iter) def progress_end(progress_iter): try: progress_iter.stop() except Exception: pass return None def display_output(data, out=None, opts=None, **kwargs): ''' Print the passed data using the desired output ''' if opts is None: opts = {} display_data = try_printout(data, out, opts, **kwargs) output_filename = opts.get('output_file', None) log.trace('data = %s', data) try: # output filename can be either '' or None if output_filename: if not hasattr(output_filename, 'write'): ofh = salt.utils.files.fopen(output_filename, 'a') # pylint: disable=resource-leakage fh_opened = True else: # Filehandle/file-like object ofh = output_filename fh_opened = False try: fdata = display_data if isinstance(fdata, six.text_type): try: fdata = fdata.encode('utf-8') except (UnicodeDecodeError, UnicodeEncodeError): # try to let the stream write # even if we didn't encode it pass if fdata: ofh.write(salt.utils.stringutils.to_str(fdata)) ofh.write('\n') finally: if fh_opened: ofh.close() return if display_data: salt.utils.stringutils.print_cli(display_data) except IOError as exc: # Only raise if it's NOT a broken pipe if exc.errno != errno.EPIPE: raise exc def get_printout(out, opts=None, **kwargs): ''' Return a printer function ''' if opts is None: opts = {} if 'output' in opts and opts['output'] != 'highstate': # new --out option, but don't choke when using --out=highstate at CLI # See Issue #29796 for more information. out = opts['output'] # Handle setting the output when --static is passed. if not out and opts.get('static'): if opts.get('output'): out = opts['output'] elif opts.get('fun', '').split('.')[0] == 'state': # --static doesn't have an output set at this point, but if we're # running a state function and "out" hasn't already been set, we # should set the out variable to "highstate". Otherwise state runs # are set to "nested" below. See Issue #44556 for more information. out = 'highstate' if out == 'text': out = 'txt' elif out is None or out == '': out = 'nested' if opts.get('progress', False): out = 'progress' opts.update(kwargs) if 'color' not in opts: def is_pipe(): ''' Check if sys.stdout is a pipe or not ''' try: fileno = sys.stdout.fileno() except (AttributeError, io.UnsupportedOperation): fileno = -1 # sys.stdout is StringIO or fake return not os.isatty(fileno) if opts.get('force_color', False): opts['color'] = True elif opts.get('no_color', False) or is_pipe() or salt.utils.platform.is_windows(): opts['color'] = False else: opts['color'] = True else: if opts.get('force_color', False): opts['color'] = True elif opts.get('no_color', False) or salt.utils.platform.is_windows(): opts['color'] = False else: pass outputters = salt.loader.outputters(opts) if out not in outputters: # Since the grains outputter was removed we don't need to fire this # error when old minions are asking for it if out != 'grains': log.error('Invalid outputter %s specified, falling back to nested', out) return outputters['nested'] return outputters[out] def out_format(data, out, opts=None, **kwargs): ''' Return the formatted outputter string for the passed data ''' return try_printout(data, out, opts, **kwargs) def string_format(data, out, opts=None, **kwargs): ''' Return the formatted outputter string, removing the ANSI escape sequences. ''' raw_output = try_printout(data, out, opts, **kwargs) ansi_escape = re.compile(r'\x1b[^m]*m') return ansi_escape.sub('', raw_output) def html_format(data, out, opts=None, **kwargs): ''' Return the formatted string as HTML. ''' ansi_escaped_string = string_format(data, out, opts, **kwargs) return ansi_escaped_string.replace(' ', '&nbsp;').replace('\n', '<br />')
saltstack/salt
salt/utils/dns.py
_tree
python
def _tree(domain, tld=False): ''' Split out a domain in its parents Leverages tldextract to take the TLDs from publicsuffix.org or makes a valiant approximation of that :param domain: dc2.ams2.example.com :param tld: Include TLD in list :return: [ 'dc2.ams2.example.com', 'ams2.example.com', 'example.com'] ''' domain = domain.rstrip('.') assert '.' in domain, 'Provide a decent domain' if not tld: if HAS_TLDEXTRACT: tld = tldextract.extract(domain).suffix else: tld = re.search(r'((?:(?:ac|biz|com?|info|edu|gov|mil|name|net|n[oi]m|org)\.)?[^.]+)$', domain).group() log.info('Without tldextract, dns.util resolves the TLD of %s to %s', domain, tld) res = [domain] while True: idx = domain.find('.') if idx < 0: break domain = domain[idx + 1:] if domain == tld: break res.append(domain) return res
Split out a domain in its parents Leverages tldextract to take the TLDs from publicsuffix.org or makes a valiant approximation of that :param domain: dc2.ams2.example.com :param tld: Include TLD in list :return: [ 'dc2.ams2.example.com', 'ams2.example.com', 'example.com']
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/dns.py#L134-L166
null
# -*- coding: utf-8 -*- ''' Compendium of generic DNS utilities # Examples: dns.lookup(name, rdtype, ...) dns.query(name, rdtype, ...) dns.srv_rec(data) dns.srv_data('my1.example.com', 389, prio=10, weight=100) dns.srv_name('ldap/tcp', 'example.com') ''' from __future__ import absolute_import, print_function, unicode_literals # Import Python libs import base64 import binascii import hashlib import itertools import logging import random import re import shlex import socket import ssl import string import functools # Import Salt libs import salt.utils.files import salt.utils.network import salt.utils.path import salt.utils.stringutils import salt.modules.cmdmod from salt._compat import ipaddress from salt.utils.odict import OrderedDict # Import 3rd-party libs from salt.ext import six from salt.ext.six.moves import zip # pylint: disable=redefined-builtin # Integrations try: import dns.resolver HAS_DNSPYTHON = True except ImportError: HAS_DNSPYTHON = False try: import tldextract HAS_TLDEXTRACT = True except ImportError: HAS_TLDEXTRACT = False HAS_DIG = salt.utils.path.which('dig') is not None DIG_OPTIONS = '+search +fail +noall +answer +nocl +nottl' HAS_DRILL = salt.utils.path.which('drill') is not None HAS_HOST = salt.utils.path.which('host') is not None HAS_NSLOOKUP = salt.utils.path.which('nslookup') is not None __salt__ = { 'cmd.run_all': salt.modules.cmdmod.run_all } log = logging.getLogger(__name__) class RFC(object): ''' Simple holding class for all RFC/IANA registered lists & standards ''' # https://tools.ietf.org/html/rfc6844#section-3 CAA_TAGS = ( 'issue', 'issuewild', 'iodef' ) # http://www.iana.org/assignments/dns-sshfp-rr-parameters/dns-sshfp-rr-parameters.xhtml SSHFP_ALGO = OrderedDict(( (1, 'rsa'), (2, 'dsa'), (3, 'ecdsa'), (4, 'ed25519'), )) SSHFP_HASH = OrderedDict(( (1, 'sha1'), (2, 'sha256'), )) # http://www.iana.org/assignments/dane-parameters/dane-parameters.xhtml TLSA_USAGE = OrderedDict(( (0, 'pkixta'), (1, 'pkixee'), (2, 'daneta'), (3, 'daneee'), )) TLSA_SELECT = OrderedDict(( (0, 'cert'), (1, 'spki'), )) TLSA_MATCHING = OrderedDict(( (0, 'full'), (1, 'sha256'), (2, 'sha512'), )) SRV_PROTO = ( 'tcp', 'udp', 'sctp' ) @staticmethod def validate(lookup, ref, match=None): if lookup in ref: return lookup elif match == 'in': return [code for code, name in ref.items() if lookup in name][-1] else: # OrderedDicts only!(?) return ref.keys()[ref.values().index(lookup)] def _to_port(port): try: port = int(port) assert 1 <= port <= 65535 return port except (ValueError, AssertionError): raise ValueError('Invalid port {0}'.format(port)) def _weighted_order(recs): res = [] weights = [rec['weight'] for rec in recs] while weights: rnd = random.random() * sum(weights) for i, w in enumerate(weights): rnd -= w if rnd < 0: res.append(recs.pop(i)['name']) weights.pop(i) break return res def _cast(rec_data, rec_cast): if isinstance(rec_cast, dict): rec_data = type(rec_cast.keys()[0])(rec_data) res = rec_cast[rec_data] return res elif isinstance(rec_cast, (list, tuple)): return RFC.validate(rec_data, rec_cast) else: return rec_cast(rec_data) def _data2rec(schema, rec_data): ''' schema = OrderedDict({ 'prio': int, 'weight': int, 'port': to_port, 'name': str, }) rec_data = '10 20 25 myawesome.nl' res = {'prio': 10, 'weight': 20, 'port': 25 'name': 'myawesome.nl'} ''' try: rec_fields = rec_data.split(' ') # spaces in digest fields are allowed assert len(rec_fields) >= len(schema) if len(rec_fields) > len(schema): cutoff = len(schema) - 1 rec_fields = rec_fields[0:cutoff] + [''.join(rec_fields[cutoff:])] if len(schema) == 1: res = _cast(rec_fields[0], next(iter(schema.values()))) else: res = dict(( (field_name, _cast(rec_field, rec_cast)) for (field_name, rec_cast), rec_field in zip(schema.items(), rec_fields) )) return res except (AssertionError, AttributeError, TypeError, ValueError) as e: raise ValueError('Unable to cast "{0}" as "{2}": {1}'.format( rec_data, e, ' '.join(schema.keys()) )) def _data2rec_group(schema, recs_data, group_key): if not isinstance(recs_data, (list, tuple)): recs_data = [recs_data] res = OrderedDict() try: for rdata in recs_data: rdata = _data2rec(schema, rdata) assert rdata and group_key in rdata idx = rdata.pop(group_key) if idx not in res: res[idx] = [] if len(rdata) == 1: rdata = next(iter(rdata.values())) res[idx].append(rdata) return res except (AssertionError, ValueError) as e: raise ValueError('Unable to cast "{0}" as a group of "{1}": {2}'.format( ','.join(recs_data), ' '.join(schema.keys()), e )) def _rec2data(*rdata): return ' '.join(rdata) def _data_clean(data): data = data.strip(string.whitespace) if data.startswith(('"', '\'')) and data.endswith(('"', '\'')): return data[1:-1] else: return data def _lookup_dig(name, rdtype, timeout=None, servers=None, secure=None): ''' Use dig to lookup addresses :param name: Name of record to search :param rdtype: DNS record type :param timeout: server response timeout :param servers: [] of servers to use :return: [] of records or False if error ''' cmd = 'dig {0} -t {1} '.format(DIG_OPTIONS, rdtype) if servers: cmd += ''.join(['@{0} '.format(srv) for srv in servers]) if timeout is not None: if servers: timeout = int(float(timeout) / len(servers)) else: timeout = int(timeout) cmd += '+time={0} '.format(timeout) if secure: cmd += '+dnssec +adflag ' cmd = __salt__['cmd.run_all']('{0} {1}'.format(cmd, name), python_shell=False, output_loglevel='quiet') if 'ignoring invalid type' in cmd['stderr']: raise ValueError('Invalid DNS type {}'.format(rdtype)) elif cmd['retcode'] != 0: log.warning( 'dig returned (%s): %s', cmd['retcode'], cmd['stderr'].strip(string.whitespace + ';') ) return False elif not cmd['stdout']: return [] validated = False res = [] for line in cmd['stdout'].splitlines(): _, rtype, rdata = line.split(None, 2) if rtype == 'CNAME' and rdtype != 'CNAME': continue elif rtype == 'RRSIG': validated = True continue res.append(_data_clean(rdata)) if res and secure and not validated: return False else: return res def _lookup_drill(name, rdtype, timeout=None, servers=None, secure=None): ''' Use drill to lookup addresses :param name: Name of record to search :param rdtype: DNS record type :param timeout: command return timeout :param servers: [] of servers to use :return: [] of records or False if error ''' cmd = 'drill ' if secure: cmd += '-D -o ad ' cmd += '{0} {1} '.format(rdtype, name) if servers: cmd += ''.join(['@{0} '.format(srv) for srv in servers]) cmd = __salt__['cmd.run_all']( cmd, timeout=timeout, python_shell=False, output_loglevel='quiet') if cmd['retcode'] != 0: log.warning('drill returned (%s): %s', cmd['retcode'], cmd['stderr']) return False lookup_res = iter(cmd['stdout'].splitlines()) validated = False res = [] try: line = '' while 'ANSWER SECTION' not in line: line = next(lookup_res) while True: line = next(lookup_res) line = line.strip() if not line or line.startswith(';;'): break l_type, l_rec = line.split(None, 4)[-2:] if l_type == 'CNAME' and rdtype != 'CNAME': continue elif l_type == 'RRSIG': validated = True continue elif l_type != rdtype: raise ValueError('Invalid DNS type {}'.format(rdtype)) res.append(_data_clean(l_rec)) except StopIteration: pass if res and secure and not validated: return False else: return res def _lookup_gai(name, rdtype, timeout=None): ''' Use Python's socket interface to lookup addresses :param name: Name of record to search :param rdtype: A or AAAA :param timeout: ignored :return: [] of addresses or False if error ''' try: sock_t = { 'A': socket.AF_INET, 'AAAA': socket.AF_INET6 }[rdtype] except KeyError: raise ValueError('Invalid DNS type {} for gai lookup'.format(rdtype)) if timeout: log.info('Ignoring timeout on gai resolver; fix resolv.conf to do that') try: addresses = [sock[4][0] for sock in socket.getaddrinfo(name, None, sock_t, 0, socket.SOCK_RAW)] return addresses except socket.gaierror: return False def _lookup_host(name, rdtype, timeout=None, server=None): ''' Use host to lookup addresses :param name: Name of record to search :param server: Server to query :param rdtype: DNS record type :param timeout: server response wait :return: [] of records or False if error ''' cmd = 'host -t {0} '.format(rdtype) if timeout: cmd += '-W {0} '.format(int(timeout)) cmd += name if server is not None: cmd += ' {0}'.format(server) cmd = __salt__['cmd.run_all'](cmd, python_shell=False, output_loglevel='quiet') if 'invalid type' in cmd['stderr']: raise ValueError('Invalid DNS type {}'.format(rdtype)) elif cmd['retcode'] != 0: log.warning('host returned (%s): %s', cmd['retcode'], cmd['stderr']) return False elif 'has no' in cmd['stdout']: return [] res = [] _stdout = cmd['stdout'] if server is None else cmd['stdout'].split('\n\n')[-1] for line in _stdout.splitlines(): if rdtype != 'CNAME' and 'is an alias' in line: continue line = line.split(' ', 3)[-1] for prefix in ('record', 'address', 'handled by', 'alias for'): if line.startswith(prefix): line = line[len(prefix) + 1:] break res.append(_data_clean(line)) return res def _lookup_dnspython(name, rdtype, timeout=None, servers=None, secure=None): ''' Use dnspython to lookup addresses :param name: Name of record to search :param rdtype: DNS record type :param timeout: query timeout :param server: [] of server(s) to try in order :return: [] of records or False if error ''' resolver = dns.resolver.Resolver() if timeout is not None: resolver.lifetime = float(timeout) if servers: resolver.nameservers = servers if secure: resolver.ednsflags += dns.flags.DO try: res = [_data_clean(rr.to_text()) for rr in resolver.query(name, rdtype, raise_on_no_answer=False)] return res except dns.rdatatype.UnknownRdatatype: raise ValueError('Invalid DNS type {}'.format(rdtype)) except (dns.resolver.NXDOMAIN, dns.resolver.YXDOMAIN, dns.resolver.NoNameservers, dns.exception.Timeout): return False def _lookup_nslookup(name, rdtype, timeout=None, server=None): ''' Use nslookup to lookup addresses :param name: Name of record to search :param rdtype: DNS record type :param timeout: server response timeout :param server: server to query :return: [] of records or False if error ''' cmd = 'nslookup -query={0} {1}'.format(rdtype, name) if timeout is not None: cmd += ' -timeout={0}'.format(int(timeout)) if server is not None: cmd += ' {0}'.format(server) cmd = __salt__['cmd.run_all'](cmd, python_shell=False, output_loglevel='quiet') if cmd['retcode'] != 0: log.warning( 'nslookup returned (%s): %s', cmd['retcode'], cmd['stdout'].splitlines()[-1].strip(string.whitespace + ';') ) return False lookup_res = iter(cmd['stdout'].splitlines()) res = [] try: line = next(lookup_res) if 'unknown query type' in line: raise ValueError('Invalid DNS type {}'.format(rdtype)) while True: if name in line: break line = next(lookup_res) while True: line = line.strip() if not line or line.startswith('*'): break elif rdtype != 'CNAME' and 'canonical name' in line: name = line.split()[-1][:-1] line = next(lookup_res) continue elif rdtype == 'SOA': line = line.split('=') elif line.startswith('Name:'): line = next(lookup_res) line = line.split(':', 1) elif line.startswith(name): if '=' in line: line = line.split('=', 1) else: line = line.split(' ') res.append(_data_clean(line[-1])) line = next(lookup_res) except StopIteration: pass if rdtype == 'SOA': return [' '.join(res[1:])] else: return res def lookup( name, rdtype, method=None, servers=None, timeout=None, walk=False, walk_tld=False, secure=None ): ''' Lookup DNS records and return their data :param name: name to lookup :param rdtype: DNS record type :param method: gai (getaddrinfo()), dnspython, dig, drill, host, nslookup or auto (default) :param servers: (list of) server(s) to try in-order :param timeout: query timeout or a valiant approximation of that :param walk: Walk the DNS upwards looking for the record type or name/recordtype if walk='name'. :param walk_tld: Include the final domain in the walk :param secure: return only DNSSEC secured responses :return: [] of record data ''' # opts = __opts__.get('dns', {}) opts = {} method = method or opts.get('method', 'auto') secure = secure or opts.get('secure', None) servers = servers or opts.get('servers', None) timeout = timeout or opts.get('timeout', False) rdtype = rdtype.upper() # pylint: disable=bad-whitespace,multiple-spaces-before-keyword query_methods = ( ('gai', _lookup_gai, not any((rdtype not in ('A', 'AAAA'), servers, secure))), ('dnspython', _lookup_dnspython, HAS_DNSPYTHON), ('dig', _lookup_dig, HAS_DIG), ('drill', _lookup_drill, HAS_DRILL), ('host', _lookup_host, HAS_HOST and not secure), ('nslookup', _lookup_nslookup, HAS_NSLOOKUP and not secure), ) # pylint: enable=bad-whitespace,multiple-spaces-before-keyword try: if method == 'auto': # The first one not to bork on the conditions becomes the function method, resolver = next(((rname, rcb) for rname, rcb, rtest in query_methods if rtest)) else: # The first one not to bork on the conditions becomes the function. And the name must match. resolver = next((rcb for rname, rcb, rtest in query_methods if rname == method and rtest)) except StopIteration: log.error( 'Unable to lookup %s/%s: Resolver method %s invalid, unsupported ' 'or unable to perform query', method, rdtype, name ) return False res_kwargs = { 'rdtype': rdtype, } if servers: if not isinstance(servers, (list, tuple)): servers = [servers] if method in ('dnspython', 'dig', 'drill'): res_kwargs['servers'] = servers else: if timeout: timeout /= len(servers) # Inject a wrapper for multi-server behaviour def _multi_srvr(resolv_func): @functools.wraps(resolv_func) def _wrapper(**res_kwargs): for server in servers: s_res = resolv_func(server=server, **res_kwargs) if s_res: return s_res return _wrapper resolver = _multi_srvr(resolver) if not walk: name = [name] else: idx = 0 if rdtype in ('SRV', 'TLSA'): # The only RRs I know that have 2 name components idx = name.find('.') + 1 idx = name.find('.', idx) + 1 domain = name[idx:] rname = name[0:idx] name = _tree(domain, walk_tld) if walk == 'name': name = [rname + domain for domain in name] if timeout: timeout /= len(name) if secure: res_kwargs['secure'] = secure if timeout: res_kwargs['timeout'] = timeout for rname in name: res = resolver(name=rname, **res_kwargs) if res: return res return res def query( name, rdtype, method=None, servers=None, timeout=None, walk=False, walk_tld=False, secure=None ): ''' Query DNS for information. Where `lookup()` returns record data, `query()` tries to interpret the data and return it's results :param name: name to lookup :param rdtype: DNS record type :param method: gai (getaddrinfo()), pydns, dig, drill, host, nslookup or auto (default) :param servers: (list of) server(s) to try in-order :param timeout: query timeout or a valiant approximation of that :param secure: return only DNSSEC secured response :param walk: Walk the DNS upwards looking for the record type or name/recordtype if walk='name'. :param walk_tld: Include the top-level domain in the walk :return: [] of records ''' rdtype = rdtype.upper() qargs = { 'method': method, 'servers': servers, 'timeout': timeout, 'walk': walk, 'walk_tld': walk_tld, 'secure': secure } if rdtype == 'PTR' and not name.endswith('arpa'): name = ptr_name(name) if rdtype == 'SPF': # 'SPF' has become a regular 'TXT' again qres = [answer for answer in lookup(name, 'TXT', **qargs) if answer.startswith('v=spf')] if not qres: qres = lookup(name, rdtype, **qargs) else: qres = lookup(name, rdtype, **qargs) rec_map = { 'A': a_rec, 'AAAA': aaaa_rec, 'CAA': caa_rec, 'MX': mx_rec, 'SOA': soa_rec, 'SPF': spf_rec, 'SRV': srv_rec, 'SSHFP': sshfp_rec, 'TLSA': tlsa_rec, } if not qres or rdtype not in rec_map: return qres elif rdtype in ('A', 'AAAA', 'SSHFP', 'TLSA'): res = [rec_map[rdtype](res) for res in qres] elif rdtype in ('SOA', 'SPF'): res = rec_map[rdtype](qres[0]) else: res = rec_map[rdtype](qres) return res def host(name, ip4=True, ip6=True, **kwargs): ''' Return a list of addresses for name ip6: Return IPv6 addresses ip4: Return IPv4 addresses the rest is passed on to lookup() ''' res = {} if ip6: ip6 = lookup(name, 'AAAA', **kwargs) if ip6: res['ip6'] = ip6 if ip4: ip4 = lookup(name, 'A', **kwargs) if ip4: res['ip4'] = ip4 return res def a_rec(rdata): ''' Validate and parse DNS record data for an A record :param rdata: DNS record data :return: { 'address': ip } ''' rschema = OrderedDict(( ('address', ipaddress.IPv4Address), )) return _data2rec(rschema, rdata) def aaaa_rec(rdata): ''' Validate and parse DNS record data for an AAAA record :param rdata: DNS record data :return: { 'address': ip } ''' rschema = OrderedDict(( ('address', ipaddress.IPv6Address), )) return _data2rec(rschema, rdata) def caa_rec(rdatas): ''' Validate and parse DNS record data for a CAA record :param rdata: DNS record data :return: dict w/fields ''' rschema = OrderedDict(( ('flags', lambda flag: ['critical'] if int(flag) > 0 else []), ('tag', RFC.CAA_TAGS), ('value', lambda val: val.strip('\',"')) )) res = _data2rec_group(rschema, rdatas, 'tag') for tag in ('issue', 'issuewild'): tag_res = res.get(tag, False) if not tag_res: continue for idx, val in enumerate(tag_res): if ';' not in val: continue val, params = val.split(';', 1) params = dict(param.split('=') for param in shlex.split(params)) tag_res[idx] = {val: params} return res def mx_data(target, preference=10): ''' Generate MX record data :param target: server :param preference: preference number :return: DNS record data ''' return _rec2data(int(preference), target) def mx_rec(rdatas): ''' Validate and parse DNS record data for MX record(s) :param rdata: DNS record data :return: dict w/fields ''' rschema = OrderedDict(( ('preference', int), ('name', str), )) return _data2rec_group(rschema, rdatas, 'preference') def ptr_name(rdata): ''' Return PTR name of given IP :param rdata: IP address :return: PTR record name ''' try: return ipaddress.ip_address(rdata).reverse_pointer except ValueError: log.error( 'Unable to generate PTR record; %s is not a valid IP address', rdata ) return False def soa_rec(rdata): ''' Validate and parse DNS record data for SOA record(s) :param rdata: DNS record data :return: dict w/fields ''' rschema = OrderedDict(( ('mname', str), ('rname', str), ('serial', int), ('refresh', int), ('retry', int), ('expire', int), ('minimum', int), )) return _data2rec(rschema, rdata) def spf_rec(rdata): ''' Validate and parse DNS record data for SPF record(s) :param rdata: DNS record data :return: dict w/fields ''' spf_fields = rdata.split(' ') if not spf_fields.pop(0).startswith('v=spf'): raise ValueError('Not an SPF record') res = OrderedDict() mods = set() for mech_spec in spf_fields: if mech_spec.startswith(('exp', 'redirect')): # It's a modifier mod, val = mech_spec.split('=', 1) if mod in mods: raise KeyError('Modifier {0} can only appear once'.format(mod)) mods.add(mod) continue # TODO: Should be in something intelligent like an SPF_get # if mod == 'exp': # res[mod] = lookup(val, 'TXT', **qargs) # continue # elif mod == 'redirect': # return query(val, 'SPF', **qargs) mech = {} if mech_spec[0] in ('+', '-', '~', '?'): mech['qualifier'] = mech_spec[0] mech_spec = mech_spec[1:] if ':' in mech_spec: mech_spec, val = mech_spec.split(':', 1) elif '/' in mech_spec: idx = mech_spec.find('/') mech_spec = mech_spec[0:idx] val = mech_spec[idx:] else: val = None res[mech_spec] = mech if not val: continue elif mech_spec in ('ip4', 'ip6'): val = ipaddress.ip_interface(val) assert val.version == int(mech_spec[-1]) mech['value'] = val return res def srv_data(target, port, prio=10, weight=10): ''' Generate SRV record data :param target: :param port: :param prio: :param weight: :return: ''' return _rec2data(prio, weight, port, target) def srv_name(svc, proto='tcp', domain=None): ''' Generate SRV record name :param svc: ldap, 389 etc :param proto: tcp, udp, sctp etc. :param domain: name to append :return: ''' proto = RFC.validate(proto, RFC.SRV_PROTO) if isinstance(svc, int) or svc.isdigit(): svc = _to_port(svc) if domain: domain = '.' + domain return '_{0}._{1}{2}'.format(svc, proto, domain) def srv_rec(rdatas): ''' Validate and parse DNS record data for SRV record(s) :param rdata: DNS record data :return: dict w/fields ''' rschema = OrderedDict(( ('prio', int), ('weight', int), ('port', _to_port), ('name', str), )) return _data2rec_group(rschema, rdatas, 'prio') def sshfp_data(key_t, hash_t, pub): ''' Generate an SSHFP record :param key_t: rsa/dsa/ecdsa/ed25519 :param hash_t: sha1/sha256 :param pub: the SSH public key ''' key_t = RFC.validate(key_t, RFC.SSHFP_ALGO, 'in') hash_t = RFC.validate(hash_t, RFC.SSHFP_HASH) hasher = hashlib.new(hash_t) hasher.update( base64.b64decode(pub) ) ssh_fp = hasher.hexdigest() return _rec2data(key_t, hash_t, ssh_fp) def sshfp_rec(rdata): ''' Validate and parse DNS record data for TLSA record(s) :param rdata: DNS record data :return: dict w/fields ''' rschema = OrderedDict(( ('algorithm', RFC.SSHFP_ALGO), ('fp_hash', RFC.SSHFP_HASH), ('fingerprint', lambda val: val.lower()) # resolvers are inconsistent on this one )) return _data2rec(rschema, rdata) def tlsa_data(pub, usage, selector, matching): ''' Generate a TLSA rec :param pub: Pub key in PEM format :param usage: :param selector: :param matching: :return: TLSA data portion ''' usage = RFC.validate(usage, RFC.TLSA_USAGE) selector = RFC.validate(selector, RFC.TLSA_SELECT) matching = RFC.validate(matching, RFC.TLSA_MATCHING) pub = ssl.PEM_cert_to_DER_cert(pub.strip()) if matching == 0: cert_fp = binascii.b2a_hex(pub) else: hasher = hashlib.new(RFC.TLSA_MATCHING[matching]) hasher.update( pub ) cert_fp = hasher.hexdigest() return _rec2data(usage, selector, matching, cert_fp) def tlsa_rec(rdata): ''' Validate and parse DNS record data for TLSA record(s) :param rdata: DNS record data :return: dict w/fields ''' rschema = OrderedDict(( ('usage', RFC.TLSA_USAGE), ('selector', RFC.TLSA_SELECT), ('matching', RFC.TLSA_MATCHING), ('pub', str) )) return _data2rec(rschema, rdata) def service( svc, proto='tcp', domain=None, walk=False, secure=None ): ''' Find an SRV service in a domain or it's parents :param svc: service to find (ldap, 389, etc) :param proto: protocol the service talks (tcp, udp, etc) :param domain: domain to start search in :param walk: walk the parents if domain doesn't provide the service :param secure: only return DNSSEC-validated results :return: [ [ prio1server1, prio1server2 ], [ prio2server1, prio2server2 ], ] (the servers will already be weighted according to the SRV rules) ''' qres = query(srv_name(svc, proto, domain), 'SRV', walk=walk, secure=secure) if not qres: return False res = [] for _, recs in qres.items(): res.append(_weighted_order(recs)) return res def services(services_file='/etc/services'): ''' Parse through system-known services :return: { 'svc': [ { 'port': port 'proto': proto, 'desc': comment }, ], } ''' res = {} with salt.utils.files.fopen(services_file, 'r') as svc_defs: for svc_def in svc_defs.readlines(): svc_def = salt.utils.stringutils.to_unicode(svc_def.strip()) if not svc_def or svc_def.startswith('#'): continue elif '#' in svc_def: svc_def, comment = svc_def.split('#', 1) comment = comment.strip() else: comment = None svc_def = svc_def.split() port, proto = svc_def.pop(1).split('/') port = int(port) for name in svc_def: svc_res = res.get(name, {}) pp_res = svc_res.get(port, False) if not pp_res: svc = { 'port': port, 'proto': proto, } if comment: svc['desc'] = comment svc_res[port] = svc else: curr_proto = pp_res['proto'] if isinstance(curr_proto, (list, tuple)): curr_proto.append(proto) else: pp_res['proto'] = [curr_proto, proto] curr_desc = pp_res.get('desc', False) if comment: if not curr_desc: pp_res['desc'] = comment elif comment != curr_desc: pp_res['desc'] = '{0}, {1}'.format(curr_desc, comment) res[name] = svc_res for svc, data in res.items(): if len(data) == 1: res[svc] = data.values().pop() continue else: res[svc] = list(data.values()) return res def parse_resolv(src='/etc/resolv.conf'): ''' Parse a resolver configuration file (traditionally /etc/resolv.conf) ''' nameservers = [] ip4_nameservers = [] ip6_nameservers = [] search = [] sortlist = [] domain = '' options = [] try: with salt.utils.files.fopen(src) as src_file: # pylint: disable=too-many-nested-blocks for line in src_file: line = salt.utils.stringutils.to_unicode(line).strip().split() try: (directive, arg) = (line[0].lower(), line[1:]) # Drop everything after # or ; (comments) arg = list(itertools.takewhile(lambda x: x[0] not in ('#', ';'), arg)) if directive == 'nameserver': addr = arg[0] try: ip_addr = ipaddress.ip_address(addr) version = ip_addr.version ip_addr = str(ip_addr) if ip_addr not in nameservers: nameservers.append(ip_addr) if version == 4 and ip_addr not in ip4_nameservers: ip4_nameservers.append(ip_addr) elif version == 6 and ip_addr not in ip6_nameservers: ip6_nameservers.append(ip_addr) except ValueError as exc: log.error('%s: %s', src, exc) elif directive == 'domain': domain = arg[0] elif directive == 'search': search = arg elif directive == 'sortlist': # A sortlist is specified by IP address netmask pairs. # The netmask is optional and defaults to the natural # netmask of the net. The IP address and optional # network pairs are separated by slashes. for ip_raw in arg: try: ip_net = ipaddress.ip_network(ip_raw) except ValueError as exc: log.error('%s: %s', src, exc) else: if '/' not in ip_raw: # No netmask has been provided, guess # the "natural" one if ip_net.version == 4: ip_addr = six.text_type(ip_net.network_address) # pylint: disable=protected-access mask = salt.utils.network.natural_ipv4_netmask(ip_addr) ip_net = ipaddress.ip_network( '{0}{1}'.format(ip_addr, mask), strict=False ) if ip_net.version == 6: # TODO pass if ip_net not in sortlist: sortlist.append(ip_net) elif directive == 'options': # Options allows certain internal resolver variables to # be modified. if arg[0] not in options: options.append(arg[0]) except IndexError: continue if domain and search: # The domain and search keywords are mutually exclusive. If more # than one instance of these keywords is present, the last instance # will override. log.debug( '%s: The domain and search keywords are mutually exclusive.', src ) return { 'nameservers': nameservers, 'ip4_nameservers': ip4_nameservers, 'ip6_nameservers': ip6_nameservers, 'sortlist': [ip.with_netmask for ip in sortlist], 'domain': domain, 'search': search, 'options': options } except IOError: return {}
saltstack/salt
salt/utils/dns.py
_data2rec
python
def _data2rec(schema, rec_data): ''' schema = OrderedDict({ 'prio': int, 'weight': int, 'port': to_port, 'name': str, }) rec_data = '10 20 25 myawesome.nl' res = {'prio': 10, 'weight': 20, 'port': 25 'name': 'myawesome.nl'} ''' try: rec_fields = rec_data.split(' ') # spaces in digest fields are allowed assert len(rec_fields) >= len(schema) if len(rec_fields) > len(schema): cutoff = len(schema) - 1 rec_fields = rec_fields[0:cutoff] + [''.join(rec_fields[cutoff:])] if len(schema) == 1: res = _cast(rec_fields[0], next(iter(schema.values()))) else: res = dict(( (field_name, _cast(rec_field, rec_cast)) for (field_name, rec_cast), rec_field in zip(schema.items(), rec_fields) )) return res except (AssertionError, AttributeError, TypeError, ValueError) as e: raise ValueError('Unable to cast "{0}" as "{2}": {1}'.format( rec_data, e, ' '.join(schema.keys()) ))
schema = OrderedDict({ 'prio': int, 'weight': int, 'port': to_port, 'name': str, }) rec_data = '10 20 25 myawesome.nl' res = {'prio': 10, 'weight': 20, 'port': 25 'name': 'myawesome.nl'}
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/dns.py#L195-L228
[ "def _cast(rec_data, rec_cast):\n if isinstance(rec_cast, dict):\n rec_data = type(rec_cast.keys()[0])(rec_data)\n res = rec_cast[rec_data]\n return res\n elif isinstance(rec_cast, (list, tuple)):\n return RFC.validate(rec_data, rec_cast)\n else:\n return rec_cast(rec_data)\n", "def keys(self):\n 'od.keys() -> list of keys in od'\n return list(self)\n", "def items(self):\n 'od.items() -> list of (key, value) pairs in od'\n return [(key, self[key]) for key in self]\n", "def values(self):\n 'od.values() -> list of values in od'\n return [self[key] for key in self]\n" ]
# -*- coding: utf-8 -*- ''' Compendium of generic DNS utilities # Examples: dns.lookup(name, rdtype, ...) dns.query(name, rdtype, ...) dns.srv_rec(data) dns.srv_data('my1.example.com', 389, prio=10, weight=100) dns.srv_name('ldap/tcp', 'example.com') ''' from __future__ import absolute_import, print_function, unicode_literals # Import Python libs import base64 import binascii import hashlib import itertools import logging import random import re import shlex import socket import ssl import string import functools # Import Salt libs import salt.utils.files import salt.utils.network import salt.utils.path import salt.utils.stringutils import salt.modules.cmdmod from salt._compat import ipaddress from salt.utils.odict import OrderedDict # Import 3rd-party libs from salt.ext import six from salt.ext.six.moves import zip # pylint: disable=redefined-builtin # Integrations try: import dns.resolver HAS_DNSPYTHON = True except ImportError: HAS_DNSPYTHON = False try: import tldextract HAS_TLDEXTRACT = True except ImportError: HAS_TLDEXTRACT = False HAS_DIG = salt.utils.path.which('dig') is not None DIG_OPTIONS = '+search +fail +noall +answer +nocl +nottl' HAS_DRILL = salt.utils.path.which('drill') is not None HAS_HOST = salt.utils.path.which('host') is not None HAS_NSLOOKUP = salt.utils.path.which('nslookup') is not None __salt__ = { 'cmd.run_all': salt.modules.cmdmod.run_all } log = logging.getLogger(__name__) class RFC(object): ''' Simple holding class for all RFC/IANA registered lists & standards ''' # https://tools.ietf.org/html/rfc6844#section-3 CAA_TAGS = ( 'issue', 'issuewild', 'iodef' ) # http://www.iana.org/assignments/dns-sshfp-rr-parameters/dns-sshfp-rr-parameters.xhtml SSHFP_ALGO = OrderedDict(( (1, 'rsa'), (2, 'dsa'), (3, 'ecdsa'), (4, 'ed25519'), )) SSHFP_HASH = OrderedDict(( (1, 'sha1'), (2, 'sha256'), )) # http://www.iana.org/assignments/dane-parameters/dane-parameters.xhtml TLSA_USAGE = OrderedDict(( (0, 'pkixta'), (1, 'pkixee'), (2, 'daneta'), (3, 'daneee'), )) TLSA_SELECT = OrderedDict(( (0, 'cert'), (1, 'spki'), )) TLSA_MATCHING = OrderedDict(( (0, 'full'), (1, 'sha256'), (2, 'sha512'), )) SRV_PROTO = ( 'tcp', 'udp', 'sctp' ) @staticmethod def validate(lookup, ref, match=None): if lookup in ref: return lookup elif match == 'in': return [code for code, name in ref.items() if lookup in name][-1] else: # OrderedDicts only!(?) return ref.keys()[ref.values().index(lookup)] def _to_port(port): try: port = int(port) assert 1 <= port <= 65535 return port except (ValueError, AssertionError): raise ValueError('Invalid port {0}'.format(port)) def _tree(domain, tld=False): ''' Split out a domain in its parents Leverages tldextract to take the TLDs from publicsuffix.org or makes a valiant approximation of that :param domain: dc2.ams2.example.com :param tld: Include TLD in list :return: [ 'dc2.ams2.example.com', 'ams2.example.com', 'example.com'] ''' domain = domain.rstrip('.') assert '.' in domain, 'Provide a decent domain' if not tld: if HAS_TLDEXTRACT: tld = tldextract.extract(domain).suffix else: tld = re.search(r'((?:(?:ac|biz|com?|info|edu|gov|mil|name|net|n[oi]m|org)\.)?[^.]+)$', domain).group() log.info('Without tldextract, dns.util resolves the TLD of %s to %s', domain, tld) res = [domain] while True: idx = domain.find('.') if idx < 0: break domain = domain[idx + 1:] if domain == tld: break res.append(domain) return res def _weighted_order(recs): res = [] weights = [rec['weight'] for rec in recs] while weights: rnd = random.random() * sum(weights) for i, w in enumerate(weights): rnd -= w if rnd < 0: res.append(recs.pop(i)['name']) weights.pop(i) break return res def _cast(rec_data, rec_cast): if isinstance(rec_cast, dict): rec_data = type(rec_cast.keys()[0])(rec_data) res = rec_cast[rec_data] return res elif isinstance(rec_cast, (list, tuple)): return RFC.validate(rec_data, rec_cast) else: return rec_cast(rec_data) def _data2rec_group(schema, recs_data, group_key): if not isinstance(recs_data, (list, tuple)): recs_data = [recs_data] res = OrderedDict() try: for rdata in recs_data: rdata = _data2rec(schema, rdata) assert rdata and group_key in rdata idx = rdata.pop(group_key) if idx not in res: res[idx] = [] if len(rdata) == 1: rdata = next(iter(rdata.values())) res[idx].append(rdata) return res except (AssertionError, ValueError) as e: raise ValueError('Unable to cast "{0}" as a group of "{1}": {2}'.format( ','.join(recs_data), ' '.join(schema.keys()), e )) def _rec2data(*rdata): return ' '.join(rdata) def _data_clean(data): data = data.strip(string.whitespace) if data.startswith(('"', '\'')) and data.endswith(('"', '\'')): return data[1:-1] else: return data def _lookup_dig(name, rdtype, timeout=None, servers=None, secure=None): ''' Use dig to lookup addresses :param name: Name of record to search :param rdtype: DNS record type :param timeout: server response timeout :param servers: [] of servers to use :return: [] of records or False if error ''' cmd = 'dig {0} -t {1} '.format(DIG_OPTIONS, rdtype) if servers: cmd += ''.join(['@{0} '.format(srv) for srv in servers]) if timeout is not None: if servers: timeout = int(float(timeout) / len(servers)) else: timeout = int(timeout) cmd += '+time={0} '.format(timeout) if secure: cmd += '+dnssec +adflag ' cmd = __salt__['cmd.run_all']('{0} {1}'.format(cmd, name), python_shell=False, output_loglevel='quiet') if 'ignoring invalid type' in cmd['stderr']: raise ValueError('Invalid DNS type {}'.format(rdtype)) elif cmd['retcode'] != 0: log.warning( 'dig returned (%s): %s', cmd['retcode'], cmd['stderr'].strip(string.whitespace + ';') ) return False elif not cmd['stdout']: return [] validated = False res = [] for line in cmd['stdout'].splitlines(): _, rtype, rdata = line.split(None, 2) if rtype == 'CNAME' and rdtype != 'CNAME': continue elif rtype == 'RRSIG': validated = True continue res.append(_data_clean(rdata)) if res and secure and not validated: return False else: return res def _lookup_drill(name, rdtype, timeout=None, servers=None, secure=None): ''' Use drill to lookup addresses :param name: Name of record to search :param rdtype: DNS record type :param timeout: command return timeout :param servers: [] of servers to use :return: [] of records or False if error ''' cmd = 'drill ' if secure: cmd += '-D -o ad ' cmd += '{0} {1} '.format(rdtype, name) if servers: cmd += ''.join(['@{0} '.format(srv) for srv in servers]) cmd = __salt__['cmd.run_all']( cmd, timeout=timeout, python_shell=False, output_loglevel='quiet') if cmd['retcode'] != 0: log.warning('drill returned (%s): %s', cmd['retcode'], cmd['stderr']) return False lookup_res = iter(cmd['stdout'].splitlines()) validated = False res = [] try: line = '' while 'ANSWER SECTION' not in line: line = next(lookup_res) while True: line = next(lookup_res) line = line.strip() if not line or line.startswith(';;'): break l_type, l_rec = line.split(None, 4)[-2:] if l_type == 'CNAME' and rdtype != 'CNAME': continue elif l_type == 'RRSIG': validated = True continue elif l_type != rdtype: raise ValueError('Invalid DNS type {}'.format(rdtype)) res.append(_data_clean(l_rec)) except StopIteration: pass if res and secure and not validated: return False else: return res def _lookup_gai(name, rdtype, timeout=None): ''' Use Python's socket interface to lookup addresses :param name: Name of record to search :param rdtype: A or AAAA :param timeout: ignored :return: [] of addresses or False if error ''' try: sock_t = { 'A': socket.AF_INET, 'AAAA': socket.AF_INET6 }[rdtype] except KeyError: raise ValueError('Invalid DNS type {} for gai lookup'.format(rdtype)) if timeout: log.info('Ignoring timeout on gai resolver; fix resolv.conf to do that') try: addresses = [sock[4][0] for sock in socket.getaddrinfo(name, None, sock_t, 0, socket.SOCK_RAW)] return addresses except socket.gaierror: return False def _lookup_host(name, rdtype, timeout=None, server=None): ''' Use host to lookup addresses :param name: Name of record to search :param server: Server to query :param rdtype: DNS record type :param timeout: server response wait :return: [] of records or False if error ''' cmd = 'host -t {0} '.format(rdtype) if timeout: cmd += '-W {0} '.format(int(timeout)) cmd += name if server is not None: cmd += ' {0}'.format(server) cmd = __salt__['cmd.run_all'](cmd, python_shell=False, output_loglevel='quiet') if 'invalid type' in cmd['stderr']: raise ValueError('Invalid DNS type {}'.format(rdtype)) elif cmd['retcode'] != 0: log.warning('host returned (%s): %s', cmd['retcode'], cmd['stderr']) return False elif 'has no' in cmd['stdout']: return [] res = [] _stdout = cmd['stdout'] if server is None else cmd['stdout'].split('\n\n')[-1] for line in _stdout.splitlines(): if rdtype != 'CNAME' and 'is an alias' in line: continue line = line.split(' ', 3)[-1] for prefix in ('record', 'address', 'handled by', 'alias for'): if line.startswith(prefix): line = line[len(prefix) + 1:] break res.append(_data_clean(line)) return res def _lookup_dnspython(name, rdtype, timeout=None, servers=None, secure=None): ''' Use dnspython to lookup addresses :param name: Name of record to search :param rdtype: DNS record type :param timeout: query timeout :param server: [] of server(s) to try in order :return: [] of records or False if error ''' resolver = dns.resolver.Resolver() if timeout is not None: resolver.lifetime = float(timeout) if servers: resolver.nameservers = servers if secure: resolver.ednsflags += dns.flags.DO try: res = [_data_clean(rr.to_text()) for rr in resolver.query(name, rdtype, raise_on_no_answer=False)] return res except dns.rdatatype.UnknownRdatatype: raise ValueError('Invalid DNS type {}'.format(rdtype)) except (dns.resolver.NXDOMAIN, dns.resolver.YXDOMAIN, dns.resolver.NoNameservers, dns.exception.Timeout): return False def _lookup_nslookup(name, rdtype, timeout=None, server=None): ''' Use nslookup to lookup addresses :param name: Name of record to search :param rdtype: DNS record type :param timeout: server response timeout :param server: server to query :return: [] of records or False if error ''' cmd = 'nslookup -query={0} {1}'.format(rdtype, name) if timeout is not None: cmd += ' -timeout={0}'.format(int(timeout)) if server is not None: cmd += ' {0}'.format(server) cmd = __salt__['cmd.run_all'](cmd, python_shell=False, output_loglevel='quiet') if cmd['retcode'] != 0: log.warning( 'nslookup returned (%s): %s', cmd['retcode'], cmd['stdout'].splitlines()[-1].strip(string.whitespace + ';') ) return False lookup_res = iter(cmd['stdout'].splitlines()) res = [] try: line = next(lookup_res) if 'unknown query type' in line: raise ValueError('Invalid DNS type {}'.format(rdtype)) while True: if name in line: break line = next(lookup_res) while True: line = line.strip() if not line or line.startswith('*'): break elif rdtype != 'CNAME' and 'canonical name' in line: name = line.split()[-1][:-1] line = next(lookup_res) continue elif rdtype == 'SOA': line = line.split('=') elif line.startswith('Name:'): line = next(lookup_res) line = line.split(':', 1) elif line.startswith(name): if '=' in line: line = line.split('=', 1) else: line = line.split(' ') res.append(_data_clean(line[-1])) line = next(lookup_res) except StopIteration: pass if rdtype == 'SOA': return [' '.join(res[1:])] else: return res def lookup( name, rdtype, method=None, servers=None, timeout=None, walk=False, walk_tld=False, secure=None ): ''' Lookup DNS records and return their data :param name: name to lookup :param rdtype: DNS record type :param method: gai (getaddrinfo()), dnspython, dig, drill, host, nslookup or auto (default) :param servers: (list of) server(s) to try in-order :param timeout: query timeout or a valiant approximation of that :param walk: Walk the DNS upwards looking for the record type or name/recordtype if walk='name'. :param walk_tld: Include the final domain in the walk :param secure: return only DNSSEC secured responses :return: [] of record data ''' # opts = __opts__.get('dns', {}) opts = {} method = method or opts.get('method', 'auto') secure = secure or opts.get('secure', None) servers = servers or opts.get('servers', None) timeout = timeout or opts.get('timeout', False) rdtype = rdtype.upper() # pylint: disable=bad-whitespace,multiple-spaces-before-keyword query_methods = ( ('gai', _lookup_gai, not any((rdtype not in ('A', 'AAAA'), servers, secure))), ('dnspython', _lookup_dnspython, HAS_DNSPYTHON), ('dig', _lookup_dig, HAS_DIG), ('drill', _lookup_drill, HAS_DRILL), ('host', _lookup_host, HAS_HOST and not secure), ('nslookup', _lookup_nslookup, HAS_NSLOOKUP and not secure), ) # pylint: enable=bad-whitespace,multiple-spaces-before-keyword try: if method == 'auto': # The first one not to bork on the conditions becomes the function method, resolver = next(((rname, rcb) for rname, rcb, rtest in query_methods if rtest)) else: # The first one not to bork on the conditions becomes the function. And the name must match. resolver = next((rcb for rname, rcb, rtest in query_methods if rname == method and rtest)) except StopIteration: log.error( 'Unable to lookup %s/%s: Resolver method %s invalid, unsupported ' 'or unable to perform query', method, rdtype, name ) return False res_kwargs = { 'rdtype': rdtype, } if servers: if not isinstance(servers, (list, tuple)): servers = [servers] if method in ('dnspython', 'dig', 'drill'): res_kwargs['servers'] = servers else: if timeout: timeout /= len(servers) # Inject a wrapper for multi-server behaviour def _multi_srvr(resolv_func): @functools.wraps(resolv_func) def _wrapper(**res_kwargs): for server in servers: s_res = resolv_func(server=server, **res_kwargs) if s_res: return s_res return _wrapper resolver = _multi_srvr(resolver) if not walk: name = [name] else: idx = 0 if rdtype in ('SRV', 'TLSA'): # The only RRs I know that have 2 name components idx = name.find('.') + 1 idx = name.find('.', idx) + 1 domain = name[idx:] rname = name[0:idx] name = _tree(domain, walk_tld) if walk == 'name': name = [rname + domain for domain in name] if timeout: timeout /= len(name) if secure: res_kwargs['secure'] = secure if timeout: res_kwargs['timeout'] = timeout for rname in name: res = resolver(name=rname, **res_kwargs) if res: return res return res def query( name, rdtype, method=None, servers=None, timeout=None, walk=False, walk_tld=False, secure=None ): ''' Query DNS for information. Where `lookup()` returns record data, `query()` tries to interpret the data and return it's results :param name: name to lookup :param rdtype: DNS record type :param method: gai (getaddrinfo()), pydns, dig, drill, host, nslookup or auto (default) :param servers: (list of) server(s) to try in-order :param timeout: query timeout or a valiant approximation of that :param secure: return only DNSSEC secured response :param walk: Walk the DNS upwards looking for the record type or name/recordtype if walk='name'. :param walk_tld: Include the top-level domain in the walk :return: [] of records ''' rdtype = rdtype.upper() qargs = { 'method': method, 'servers': servers, 'timeout': timeout, 'walk': walk, 'walk_tld': walk_tld, 'secure': secure } if rdtype == 'PTR' and not name.endswith('arpa'): name = ptr_name(name) if rdtype == 'SPF': # 'SPF' has become a regular 'TXT' again qres = [answer for answer in lookup(name, 'TXT', **qargs) if answer.startswith('v=spf')] if not qres: qres = lookup(name, rdtype, **qargs) else: qres = lookup(name, rdtype, **qargs) rec_map = { 'A': a_rec, 'AAAA': aaaa_rec, 'CAA': caa_rec, 'MX': mx_rec, 'SOA': soa_rec, 'SPF': spf_rec, 'SRV': srv_rec, 'SSHFP': sshfp_rec, 'TLSA': tlsa_rec, } if not qres or rdtype not in rec_map: return qres elif rdtype in ('A', 'AAAA', 'SSHFP', 'TLSA'): res = [rec_map[rdtype](res) for res in qres] elif rdtype in ('SOA', 'SPF'): res = rec_map[rdtype](qres[0]) else: res = rec_map[rdtype](qres) return res def host(name, ip4=True, ip6=True, **kwargs): ''' Return a list of addresses for name ip6: Return IPv6 addresses ip4: Return IPv4 addresses the rest is passed on to lookup() ''' res = {} if ip6: ip6 = lookup(name, 'AAAA', **kwargs) if ip6: res['ip6'] = ip6 if ip4: ip4 = lookup(name, 'A', **kwargs) if ip4: res['ip4'] = ip4 return res def a_rec(rdata): ''' Validate and parse DNS record data for an A record :param rdata: DNS record data :return: { 'address': ip } ''' rschema = OrderedDict(( ('address', ipaddress.IPv4Address), )) return _data2rec(rschema, rdata) def aaaa_rec(rdata): ''' Validate and parse DNS record data for an AAAA record :param rdata: DNS record data :return: { 'address': ip } ''' rschema = OrderedDict(( ('address', ipaddress.IPv6Address), )) return _data2rec(rschema, rdata) def caa_rec(rdatas): ''' Validate and parse DNS record data for a CAA record :param rdata: DNS record data :return: dict w/fields ''' rschema = OrderedDict(( ('flags', lambda flag: ['critical'] if int(flag) > 0 else []), ('tag', RFC.CAA_TAGS), ('value', lambda val: val.strip('\',"')) )) res = _data2rec_group(rschema, rdatas, 'tag') for tag in ('issue', 'issuewild'): tag_res = res.get(tag, False) if not tag_res: continue for idx, val in enumerate(tag_res): if ';' not in val: continue val, params = val.split(';', 1) params = dict(param.split('=') for param in shlex.split(params)) tag_res[idx] = {val: params} return res def mx_data(target, preference=10): ''' Generate MX record data :param target: server :param preference: preference number :return: DNS record data ''' return _rec2data(int(preference), target) def mx_rec(rdatas): ''' Validate and parse DNS record data for MX record(s) :param rdata: DNS record data :return: dict w/fields ''' rschema = OrderedDict(( ('preference', int), ('name', str), )) return _data2rec_group(rschema, rdatas, 'preference') def ptr_name(rdata): ''' Return PTR name of given IP :param rdata: IP address :return: PTR record name ''' try: return ipaddress.ip_address(rdata).reverse_pointer except ValueError: log.error( 'Unable to generate PTR record; %s is not a valid IP address', rdata ) return False def soa_rec(rdata): ''' Validate and parse DNS record data for SOA record(s) :param rdata: DNS record data :return: dict w/fields ''' rschema = OrderedDict(( ('mname', str), ('rname', str), ('serial', int), ('refresh', int), ('retry', int), ('expire', int), ('minimum', int), )) return _data2rec(rschema, rdata) def spf_rec(rdata): ''' Validate and parse DNS record data for SPF record(s) :param rdata: DNS record data :return: dict w/fields ''' spf_fields = rdata.split(' ') if not spf_fields.pop(0).startswith('v=spf'): raise ValueError('Not an SPF record') res = OrderedDict() mods = set() for mech_spec in spf_fields: if mech_spec.startswith(('exp', 'redirect')): # It's a modifier mod, val = mech_spec.split('=', 1) if mod in mods: raise KeyError('Modifier {0} can only appear once'.format(mod)) mods.add(mod) continue # TODO: Should be in something intelligent like an SPF_get # if mod == 'exp': # res[mod] = lookup(val, 'TXT', **qargs) # continue # elif mod == 'redirect': # return query(val, 'SPF', **qargs) mech = {} if mech_spec[0] in ('+', '-', '~', '?'): mech['qualifier'] = mech_spec[0] mech_spec = mech_spec[1:] if ':' in mech_spec: mech_spec, val = mech_spec.split(':', 1) elif '/' in mech_spec: idx = mech_spec.find('/') mech_spec = mech_spec[0:idx] val = mech_spec[idx:] else: val = None res[mech_spec] = mech if not val: continue elif mech_spec in ('ip4', 'ip6'): val = ipaddress.ip_interface(val) assert val.version == int(mech_spec[-1]) mech['value'] = val return res def srv_data(target, port, prio=10, weight=10): ''' Generate SRV record data :param target: :param port: :param prio: :param weight: :return: ''' return _rec2data(prio, weight, port, target) def srv_name(svc, proto='tcp', domain=None): ''' Generate SRV record name :param svc: ldap, 389 etc :param proto: tcp, udp, sctp etc. :param domain: name to append :return: ''' proto = RFC.validate(proto, RFC.SRV_PROTO) if isinstance(svc, int) or svc.isdigit(): svc = _to_port(svc) if domain: domain = '.' + domain return '_{0}._{1}{2}'.format(svc, proto, domain) def srv_rec(rdatas): ''' Validate and parse DNS record data for SRV record(s) :param rdata: DNS record data :return: dict w/fields ''' rschema = OrderedDict(( ('prio', int), ('weight', int), ('port', _to_port), ('name', str), )) return _data2rec_group(rschema, rdatas, 'prio') def sshfp_data(key_t, hash_t, pub): ''' Generate an SSHFP record :param key_t: rsa/dsa/ecdsa/ed25519 :param hash_t: sha1/sha256 :param pub: the SSH public key ''' key_t = RFC.validate(key_t, RFC.SSHFP_ALGO, 'in') hash_t = RFC.validate(hash_t, RFC.SSHFP_HASH) hasher = hashlib.new(hash_t) hasher.update( base64.b64decode(pub) ) ssh_fp = hasher.hexdigest() return _rec2data(key_t, hash_t, ssh_fp) def sshfp_rec(rdata): ''' Validate and parse DNS record data for TLSA record(s) :param rdata: DNS record data :return: dict w/fields ''' rschema = OrderedDict(( ('algorithm', RFC.SSHFP_ALGO), ('fp_hash', RFC.SSHFP_HASH), ('fingerprint', lambda val: val.lower()) # resolvers are inconsistent on this one )) return _data2rec(rschema, rdata) def tlsa_data(pub, usage, selector, matching): ''' Generate a TLSA rec :param pub: Pub key in PEM format :param usage: :param selector: :param matching: :return: TLSA data portion ''' usage = RFC.validate(usage, RFC.TLSA_USAGE) selector = RFC.validate(selector, RFC.TLSA_SELECT) matching = RFC.validate(matching, RFC.TLSA_MATCHING) pub = ssl.PEM_cert_to_DER_cert(pub.strip()) if matching == 0: cert_fp = binascii.b2a_hex(pub) else: hasher = hashlib.new(RFC.TLSA_MATCHING[matching]) hasher.update( pub ) cert_fp = hasher.hexdigest() return _rec2data(usage, selector, matching, cert_fp) def tlsa_rec(rdata): ''' Validate and parse DNS record data for TLSA record(s) :param rdata: DNS record data :return: dict w/fields ''' rschema = OrderedDict(( ('usage', RFC.TLSA_USAGE), ('selector', RFC.TLSA_SELECT), ('matching', RFC.TLSA_MATCHING), ('pub', str) )) return _data2rec(rschema, rdata) def service( svc, proto='tcp', domain=None, walk=False, secure=None ): ''' Find an SRV service in a domain or it's parents :param svc: service to find (ldap, 389, etc) :param proto: protocol the service talks (tcp, udp, etc) :param domain: domain to start search in :param walk: walk the parents if domain doesn't provide the service :param secure: only return DNSSEC-validated results :return: [ [ prio1server1, prio1server2 ], [ prio2server1, prio2server2 ], ] (the servers will already be weighted according to the SRV rules) ''' qres = query(srv_name(svc, proto, domain), 'SRV', walk=walk, secure=secure) if not qres: return False res = [] for _, recs in qres.items(): res.append(_weighted_order(recs)) return res def services(services_file='/etc/services'): ''' Parse through system-known services :return: { 'svc': [ { 'port': port 'proto': proto, 'desc': comment }, ], } ''' res = {} with salt.utils.files.fopen(services_file, 'r') as svc_defs: for svc_def in svc_defs.readlines(): svc_def = salt.utils.stringutils.to_unicode(svc_def.strip()) if not svc_def or svc_def.startswith('#'): continue elif '#' in svc_def: svc_def, comment = svc_def.split('#', 1) comment = comment.strip() else: comment = None svc_def = svc_def.split() port, proto = svc_def.pop(1).split('/') port = int(port) for name in svc_def: svc_res = res.get(name, {}) pp_res = svc_res.get(port, False) if not pp_res: svc = { 'port': port, 'proto': proto, } if comment: svc['desc'] = comment svc_res[port] = svc else: curr_proto = pp_res['proto'] if isinstance(curr_proto, (list, tuple)): curr_proto.append(proto) else: pp_res['proto'] = [curr_proto, proto] curr_desc = pp_res.get('desc', False) if comment: if not curr_desc: pp_res['desc'] = comment elif comment != curr_desc: pp_res['desc'] = '{0}, {1}'.format(curr_desc, comment) res[name] = svc_res for svc, data in res.items(): if len(data) == 1: res[svc] = data.values().pop() continue else: res[svc] = list(data.values()) return res def parse_resolv(src='/etc/resolv.conf'): ''' Parse a resolver configuration file (traditionally /etc/resolv.conf) ''' nameservers = [] ip4_nameservers = [] ip6_nameservers = [] search = [] sortlist = [] domain = '' options = [] try: with salt.utils.files.fopen(src) as src_file: # pylint: disable=too-many-nested-blocks for line in src_file: line = salt.utils.stringutils.to_unicode(line).strip().split() try: (directive, arg) = (line[0].lower(), line[1:]) # Drop everything after # or ; (comments) arg = list(itertools.takewhile(lambda x: x[0] not in ('#', ';'), arg)) if directive == 'nameserver': addr = arg[0] try: ip_addr = ipaddress.ip_address(addr) version = ip_addr.version ip_addr = str(ip_addr) if ip_addr not in nameservers: nameservers.append(ip_addr) if version == 4 and ip_addr not in ip4_nameservers: ip4_nameservers.append(ip_addr) elif version == 6 and ip_addr not in ip6_nameservers: ip6_nameservers.append(ip_addr) except ValueError as exc: log.error('%s: %s', src, exc) elif directive == 'domain': domain = arg[0] elif directive == 'search': search = arg elif directive == 'sortlist': # A sortlist is specified by IP address netmask pairs. # The netmask is optional and defaults to the natural # netmask of the net. The IP address and optional # network pairs are separated by slashes. for ip_raw in arg: try: ip_net = ipaddress.ip_network(ip_raw) except ValueError as exc: log.error('%s: %s', src, exc) else: if '/' not in ip_raw: # No netmask has been provided, guess # the "natural" one if ip_net.version == 4: ip_addr = six.text_type(ip_net.network_address) # pylint: disable=protected-access mask = salt.utils.network.natural_ipv4_netmask(ip_addr) ip_net = ipaddress.ip_network( '{0}{1}'.format(ip_addr, mask), strict=False ) if ip_net.version == 6: # TODO pass if ip_net not in sortlist: sortlist.append(ip_net) elif directive == 'options': # Options allows certain internal resolver variables to # be modified. if arg[0] not in options: options.append(arg[0]) except IndexError: continue if domain and search: # The domain and search keywords are mutually exclusive. If more # than one instance of these keywords is present, the last instance # will override. log.debug( '%s: The domain and search keywords are mutually exclusive.', src ) return { 'nameservers': nameservers, 'ip4_nameservers': ip4_nameservers, 'ip6_nameservers': ip6_nameservers, 'sortlist': [ip.with_netmask for ip in sortlist], 'domain': domain, 'search': search, 'options': options } except IOError: return {}
saltstack/salt
salt/utils/dns.py
_lookup_dig
python
def _lookup_dig(name, rdtype, timeout=None, servers=None, secure=None): ''' Use dig to lookup addresses :param name: Name of record to search :param rdtype: DNS record type :param timeout: server response timeout :param servers: [] of servers to use :return: [] of records or False if error ''' cmd = 'dig {0} -t {1} '.format(DIG_OPTIONS, rdtype) if servers: cmd += ''.join(['@{0} '.format(srv) for srv in servers]) if timeout is not None: if servers: timeout = int(float(timeout) / len(servers)) else: timeout = int(timeout) cmd += '+time={0} '.format(timeout) if secure: cmd += '+dnssec +adflag ' cmd = __salt__['cmd.run_all']('{0} {1}'.format(cmd, name), python_shell=False, output_loglevel='quiet') if 'ignoring invalid type' in cmd['stderr']: raise ValueError('Invalid DNS type {}'.format(rdtype)) elif cmd['retcode'] != 0: log.warning( 'dig returned (%s): %s', cmd['retcode'], cmd['stderr'].strip(string.whitespace + ';') ) return False elif not cmd['stdout']: return [] validated = False res = [] for line in cmd['stdout'].splitlines(): _, rtype, rdata = line.split(None, 2) if rtype == 'CNAME' and rdtype != 'CNAME': continue elif rtype == 'RRSIG': validated = True continue res.append(_data_clean(rdata)) if res and secure and not validated: return False else: return res
Use dig to lookup addresses :param name: Name of record to search :param rdtype: DNS record type :param timeout: server response timeout :param servers: [] of servers to use :return: [] of records or False if error
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/dns.py#L271-L319
null
# -*- coding: utf-8 -*- ''' Compendium of generic DNS utilities # Examples: dns.lookup(name, rdtype, ...) dns.query(name, rdtype, ...) dns.srv_rec(data) dns.srv_data('my1.example.com', 389, prio=10, weight=100) dns.srv_name('ldap/tcp', 'example.com') ''' from __future__ import absolute_import, print_function, unicode_literals # Import Python libs import base64 import binascii import hashlib import itertools import logging import random import re import shlex import socket import ssl import string import functools # Import Salt libs import salt.utils.files import salt.utils.network import salt.utils.path import salt.utils.stringutils import salt.modules.cmdmod from salt._compat import ipaddress from salt.utils.odict import OrderedDict # Import 3rd-party libs from salt.ext import six from salt.ext.six.moves import zip # pylint: disable=redefined-builtin # Integrations try: import dns.resolver HAS_DNSPYTHON = True except ImportError: HAS_DNSPYTHON = False try: import tldextract HAS_TLDEXTRACT = True except ImportError: HAS_TLDEXTRACT = False HAS_DIG = salt.utils.path.which('dig') is not None DIG_OPTIONS = '+search +fail +noall +answer +nocl +nottl' HAS_DRILL = salt.utils.path.which('drill') is not None HAS_HOST = salt.utils.path.which('host') is not None HAS_NSLOOKUP = salt.utils.path.which('nslookup') is not None __salt__ = { 'cmd.run_all': salt.modules.cmdmod.run_all } log = logging.getLogger(__name__) class RFC(object): ''' Simple holding class for all RFC/IANA registered lists & standards ''' # https://tools.ietf.org/html/rfc6844#section-3 CAA_TAGS = ( 'issue', 'issuewild', 'iodef' ) # http://www.iana.org/assignments/dns-sshfp-rr-parameters/dns-sshfp-rr-parameters.xhtml SSHFP_ALGO = OrderedDict(( (1, 'rsa'), (2, 'dsa'), (3, 'ecdsa'), (4, 'ed25519'), )) SSHFP_HASH = OrderedDict(( (1, 'sha1'), (2, 'sha256'), )) # http://www.iana.org/assignments/dane-parameters/dane-parameters.xhtml TLSA_USAGE = OrderedDict(( (0, 'pkixta'), (1, 'pkixee'), (2, 'daneta'), (3, 'daneee'), )) TLSA_SELECT = OrderedDict(( (0, 'cert'), (1, 'spki'), )) TLSA_MATCHING = OrderedDict(( (0, 'full'), (1, 'sha256'), (2, 'sha512'), )) SRV_PROTO = ( 'tcp', 'udp', 'sctp' ) @staticmethod def validate(lookup, ref, match=None): if lookup in ref: return lookup elif match == 'in': return [code for code, name in ref.items() if lookup in name][-1] else: # OrderedDicts only!(?) return ref.keys()[ref.values().index(lookup)] def _to_port(port): try: port = int(port) assert 1 <= port <= 65535 return port except (ValueError, AssertionError): raise ValueError('Invalid port {0}'.format(port)) def _tree(domain, tld=False): ''' Split out a domain in its parents Leverages tldextract to take the TLDs from publicsuffix.org or makes a valiant approximation of that :param domain: dc2.ams2.example.com :param tld: Include TLD in list :return: [ 'dc2.ams2.example.com', 'ams2.example.com', 'example.com'] ''' domain = domain.rstrip('.') assert '.' in domain, 'Provide a decent domain' if not tld: if HAS_TLDEXTRACT: tld = tldextract.extract(domain).suffix else: tld = re.search(r'((?:(?:ac|biz|com?|info|edu|gov|mil|name|net|n[oi]m|org)\.)?[^.]+)$', domain).group() log.info('Without tldextract, dns.util resolves the TLD of %s to %s', domain, tld) res = [domain] while True: idx = domain.find('.') if idx < 0: break domain = domain[idx + 1:] if domain == tld: break res.append(domain) return res def _weighted_order(recs): res = [] weights = [rec['weight'] for rec in recs] while weights: rnd = random.random() * sum(weights) for i, w in enumerate(weights): rnd -= w if rnd < 0: res.append(recs.pop(i)['name']) weights.pop(i) break return res def _cast(rec_data, rec_cast): if isinstance(rec_cast, dict): rec_data = type(rec_cast.keys()[0])(rec_data) res = rec_cast[rec_data] return res elif isinstance(rec_cast, (list, tuple)): return RFC.validate(rec_data, rec_cast) else: return rec_cast(rec_data) def _data2rec(schema, rec_data): ''' schema = OrderedDict({ 'prio': int, 'weight': int, 'port': to_port, 'name': str, }) rec_data = '10 20 25 myawesome.nl' res = {'prio': 10, 'weight': 20, 'port': 25 'name': 'myawesome.nl'} ''' try: rec_fields = rec_data.split(' ') # spaces in digest fields are allowed assert len(rec_fields) >= len(schema) if len(rec_fields) > len(schema): cutoff = len(schema) - 1 rec_fields = rec_fields[0:cutoff] + [''.join(rec_fields[cutoff:])] if len(schema) == 1: res = _cast(rec_fields[0], next(iter(schema.values()))) else: res = dict(( (field_name, _cast(rec_field, rec_cast)) for (field_name, rec_cast), rec_field in zip(schema.items(), rec_fields) )) return res except (AssertionError, AttributeError, TypeError, ValueError) as e: raise ValueError('Unable to cast "{0}" as "{2}": {1}'.format( rec_data, e, ' '.join(schema.keys()) )) def _data2rec_group(schema, recs_data, group_key): if not isinstance(recs_data, (list, tuple)): recs_data = [recs_data] res = OrderedDict() try: for rdata in recs_data: rdata = _data2rec(schema, rdata) assert rdata and group_key in rdata idx = rdata.pop(group_key) if idx not in res: res[idx] = [] if len(rdata) == 1: rdata = next(iter(rdata.values())) res[idx].append(rdata) return res except (AssertionError, ValueError) as e: raise ValueError('Unable to cast "{0}" as a group of "{1}": {2}'.format( ','.join(recs_data), ' '.join(schema.keys()), e )) def _rec2data(*rdata): return ' '.join(rdata) def _data_clean(data): data = data.strip(string.whitespace) if data.startswith(('"', '\'')) and data.endswith(('"', '\'')): return data[1:-1] else: return data def _lookup_drill(name, rdtype, timeout=None, servers=None, secure=None): ''' Use drill to lookup addresses :param name: Name of record to search :param rdtype: DNS record type :param timeout: command return timeout :param servers: [] of servers to use :return: [] of records or False if error ''' cmd = 'drill ' if secure: cmd += '-D -o ad ' cmd += '{0} {1} '.format(rdtype, name) if servers: cmd += ''.join(['@{0} '.format(srv) for srv in servers]) cmd = __salt__['cmd.run_all']( cmd, timeout=timeout, python_shell=False, output_loglevel='quiet') if cmd['retcode'] != 0: log.warning('drill returned (%s): %s', cmd['retcode'], cmd['stderr']) return False lookup_res = iter(cmd['stdout'].splitlines()) validated = False res = [] try: line = '' while 'ANSWER SECTION' not in line: line = next(lookup_res) while True: line = next(lookup_res) line = line.strip() if not line or line.startswith(';;'): break l_type, l_rec = line.split(None, 4)[-2:] if l_type == 'CNAME' and rdtype != 'CNAME': continue elif l_type == 'RRSIG': validated = True continue elif l_type != rdtype: raise ValueError('Invalid DNS type {}'.format(rdtype)) res.append(_data_clean(l_rec)) except StopIteration: pass if res and secure and not validated: return False else: return res def _lookup_gai(name, rdtype, timeout=None): ''' Use Python's socket interface to lookup addresses :param name: Name of record to search :param rdtype: A or AAAA :param timeout: ignored :return: [] of addresses or False if error ''' try: sock_t = { 'A': socket.AF_INET, 'AAAA': socket.AF_INET6 }[rdtype] except KeyError: raise ValueError('Invalid DNS type {} for gai lookup'.format(rdtype)) if timeout: log.info('Ignoring timeout on gai resolver; fix resolv.conf to do that') try: addresses = [sock[4][0] for sock in socket.getaddrinfo(name, None, sock_t, 0, socket.SOCK_RAW)] return addresses except socket.gaierror: return False def _lookup_host(name, rdtype, timeout=None, server=None): ''' Use host to lookup addresses :param name: Name of record to search :param server: Server to query :param rdtype: DNS record type :param timeout: server response wait :return: [] of records or False if error ''' cmd = 'host -t {0} '.format(rdtype) if timeout: cmd += '-W {0} '.format(int(timeout)) cmd += name if server is not None: cmd += ' {0}'.format(server) cmd = __salt__['cmd.run_all'](cmd, python_shell=False, output_loglevel='quiet') if 'invalid type' in cmd['stderr']: raise ValueError('Invalid DNS type {}'.format(rdtype)) elif cmd['retcode'] != 0: log.warning('host returned (%s): %s', cmd['retcode'], cmd['stderr']) return False elif 'has no' in cmd['stdout']: return [] res = [] _stdout = cmd['stdout'] if server is None else cmd['stdout'].split('\n\n')[-1] for line in _stdout.splitlines(): if rdtype != 'CNAME' and 'is an alias' in line: continue line = line.split(' ', 3)[-1] for prefix in ('record', 'address', 'handled by', 'alias for'): if line.startswith(prefix): line = line[len(prefix) + 1:] break res.append(_data_clean(line)) return res def _lookup_dnspython(name, rdtype, timeout=None, servers=None, secure=None): ''' Use dnspython to lookup addresses :param name: Name of record to search :param rdtype: DNS record type :param timeout: query timeout :param server: [] of server(s) to try in order :return: [] of records or False if error ''' resolver = dns.resolver.Resolver() if timeout is not None: resolver.lifetime = float(timeout) if servers: resolver.nameservers = servers if secure: resolver.ednsflags += dns.flags.DO try: res = [_data_clean(rr.to_text()) for rr in resolver.query(name, rdtype, raise_on_no_answer=False)] return res except dns.rdatatype.UnknownRdatatype: raise ValueError('Invalid DNS type {}'.format(rdtype)) except (dns.resolver.NXDOMAIN, dns.resolver.YXDOMAIN, dns.resolver.NoNameservers, dns.exception.Timeout): return False def _lookup_nslookup(name, rdtype, timeout=None, server=None): ''' Use nslookup to lookup addresses :param name: Name of record to search :param rdtype: DNS record type :param timeout: server response timeout :param server: server to query :return: [] of records or False if error ''' cmd = 'nslookup -query={0} {1}'.format(rdtype, name) if timeout is not None: cmd += ' -timeout={0}'.format(int(timeout)) if server is not None: cmd += ' {0}'.format(server) cmd = __salt__['cmd.run_all'](cmd, python_shell=False, output_loglevel='quiet') if cmd['retcode'] != 0: log.warning( 'nslookup returned (%s): %s', cmd['retcode'], cmd['stdout'].splitlines()[-1].strip(string.whitespace + ';') ) return False lookup_res = iter(cmd['stdout'].splitlines()) res = [] try: line = next(lookup_res) if 'unknown query type' in line: raise ValueError('Invalid DNS type {}'.format(rdtype)) while True: if name in line: break line = next(lookup_res) while True: line = line.strip() if not line or line.startswith('*'): break elif rdtype != 'CNAME' and 'canonical name' in line: name = line.split()[-1][:-1] line = next(lookup_res) continue elif rdtype == 'SOA': line = line.split('=') elif line.startswith('Name:'): line = next(lookup_res) line = line.split(':', 1) elif line.startswith(name): if '=' in line: line = line.split('=', 1) else: line = line.split(' ') res.append(_data_clean(line[-1])) line = next(lookup_res) except StopIteration: pass if rdtype == 'SOA': return [' '.join(res[1:])] else: return res def lookup( name, rdtype, method=None, servers=None, timeout=None, walk=False, walk_tld=False, secure=None ): ''' Lookup DNS records and return their data :param name: name to lookup :param rdtype: DNS record type :param method: gai (getaddrinfo()), dnspython, dig, drill, host, nslookup or auto (default) :param servers: (list of) server(s) to try in-order :param timeout: query timeout or a valiant approximation of that :param walk: Walk the DNS upwards looking for the record type or name/recordtype if walk='name'. :param walk_tld: Include the final domain in the walk :param secure: return only DNSSEC secured responses :return: [] of record data ''' # opts = __opts__.get('dns', {}) opts = {} method = method or opts.get('method', 'auto') secure = secure or opts.get('secure', None) servers = servers or opts.get('servers', None) timeout = timeout or opts.get('timeout', False) rdtype = rdtype.upper() # pylint: disable=bad-whitespace,multiple-spaces-before-keyword query_methods = ( ('gai', _lookup_gai, not any((rdtype not in ('A', 'AAAA'), servers, secure))), ('dnspython', _lookup_dnspython, HAS_DNSPYTHON), ('dig', _lookup_dig, HAS_DIG), ('drill', _lookup_drill, HAS_DRILL), ('host', _lookup_host, HAS_HOST and not secure), ('nslookup', _lookup_nslookup, HAS_NSLOOKUP and not secure), ) # pylint: enable=bad-whitespace,multiple-spaces-before-keyword try: if method == 'auto': # The first one not to bork on the conditions becomes the function method, resolver = next(((rname, rcb) for rname, rcb, rtest in query_methods if rtest)) else: # The first one not to bork on the conditions becomes the function. And the name must match. resolver = next((rcb for rname, rcb, rtest in query_methods if rname == method and rtest)) except StopIteration: log.error( 'Unable to lookup %s/%s: Resolver method %s invalid, unsupported ' 'or unable to perform query', method, rdtype, name ) return False res_kwargs = { 'rdtype': rdtype, } if servers: if not isinstance(servers, (list, tuple)): servers = [servers] if method in ('dnspython', 'dig', 'drill'): res_kwargs['servers'] = servers else: if timeout: timeout /= len(servers) # Inject a wrapper for multi-server behaviour def _multi_srvr(resolv_func): @functools.wraps(resolv_func) def _wrapper(**res_kwargs): for server in servers: s_res = resolv_func(server=server, **res_kwargs) if s_res: return s_res return _wrapper resolver = _multi_srvr(resolver) if not walk: name = [name] else: idx = 0 if rdtype in ('SRV', 'TLSA'): # The only RRs I know that have 2 name components idx = name.find('.') + 1 idx = name.find('.', idx) + 1 domain = name[idx:] rname = name[0:idx] name = _tree(domain, walk_tld) if walk == 'name': name = [rname + domain for domain in name] if timeout: timeout /= len(name) if secure: res_kwargs['secure'] = secure if timeout: res_kwargs['timeout'] = timeout for rname in name: res = resolver(name=rname, **res_kwargs) if res: return res return res def query( name, rdtype, method=None, servers=None, timeout=None, walk=False, walk_tld=False, secure=None ): ''' Query DNS for information. Where `lookup()` returns record data, `query()` tries to interpret the data and return it's results :param name: name to lookup :param rdtype: DNS record type :param method: gai (getaddrinfo()), pydns, dig, drill, host, nslookup or auto (default) :param servers: (list of) server(s) to try in-order :param timeout: query timeout or a valiant approximation of that :param secure: return only DNSSEC secured response :param walk: Walk the DNS upwards looking for the record type or name/recordtype if walk='name'. :param walk_tld: Include the top-level domain in the walk :return: [] of records ''' rdtype = rdtype.upper() qargs = { 'method': method, 'servers': servers, 'timeout': timeout, 'walk': walk, 'walk_tld': walk_tld, 'secure': secure } if rdtype == 'PTR' and not name.endswith('arpa'): name = ptr_name(name) if rdtype == 'SPF': # 'SPF' has become a regular 'TXT' again qres = [answer for answer in lookup(name, 'TXT', **qargs) if answer.startswith('v=spf')] if not qres: qres = lookup(name, rdtype, **qargs) else: qres = lookup(name, rdtype, **qargs) rec_map = { 'A': a_rec, 'AAAA': aaaa_rec, 'CAA': caa_rec, 'MX': mx_rec, 'SOA': soa_rec, 'SPF': spf_rec, 'SRV': srv_rec, 'SSHFP': sshfp_rec, 'TLSA': tlsa_rec, } if not qres or rdtype not in rec_map: return qres elif rdtype in ('A', 'AAAA', 'SSHFP', 'TLSA'): res = [rec_map[rdtype](res) for res in qres] elif rdtype in ('SOA', 'SPF'): res = rec_map[rdtype](qres[0]) else: res = rec_map[rdtype](qres) return res def host(name, ip4=True, ip6=True, **kwargs): ''' Return a list of addresses for name ip6: Return IPv6 addresses ip4: Return IPv4 addresses the rest is passed on to lookup() ''' res = {} if ip6: ip6 = lookup(name, 'AAAA', **kwargs) if ip6: res['ip6'] = ip6 if ip4: ip4 = lookup(name, 'A', **kwargs) if ip4: res['ip4'] = ip4 return res def a_rec(rdata): ''' Validate and parse DNS record data for an A record :param rdata: DNS record data :return: { 'address': ip } ''' rschema = OrderedDict(( ('address', ipaddress.IPv4Address), )) return _data2rec(rschema, rdata) def aaaa_rec(rdata): ''' Validate and parse DNS record data for an AAAA record :param rdata: DNS record data :return: { 'address': ip } ''' rschema = OrderedDict(( ('address', ipaddress.IPv6Address), )) return _data2rec(rschema, rdata) def caa_rec(rdatas): ''' Validate and parse DNS record data for a CAA record :param rdata: DNS record data :return: dict w/fields ''' rschema = OrderedDict(( ('flags', lambda flag: ['critical'] if int(flag) > 0 else []), ('tag', RFC.CAA_TAGS), ('value', lambda val: val.strip('\',"')) )) res = _data2rec_group(rschema, rdatas, 'tag') for tag in ('issue', 'issuewild'): tag_res = res.get(tag, False) if not tag_res: continue for idx, val in enumerate(tag_res): if ';' not in val: continue val, params = val.split(';', 1) params = dict(param.split('=') for param in shlex.split(params)) tag_res[idx] = {val: params} return res def mx_data(target, preference=10): ''' Generate MX record data :param target: server :param preference: preference number :return: DNS record data ''' return _rec2data(int(preference), target) def mx_rec(rdatas): ''' Validate and parse DNS record data for MX record(s) :param rdata: DNS record data :return: dict w/fields ''' rschema = OrderedDict(( ('preference', int), ('name', str), )) return _data2rec_group(rschema, rdatas, 'preference') def ptr_name(rdata): ''' Return PTR name of given IP :param rdata: IP address :return: PTR record name ''' try: return ipaddress.ip_address(rdata).reverse_pointer except ValueError: log.error( 'Unable to generate PTR record; %s is not a valid IP address', rdata ) return False def soa_rec(rdata): ''' Validate and parse DNS record data for SOA record(s) :param rdata: DNS record data :return: dict w/fields ''' rschema = OrderedDict(( ('mname', str), ('rname', str), ('serial', int), ('refresh', int), ('retry', int), ('expire', int), ('minimum', int), )) return _data2rec(rschema, rdata) def spf_rec(rdata): ''' Validate and parse DNS record data for SPF record(s) :param rdata: DNS record data :return: dict w/fields ''' spf_fields = rdata.split(' ') if not spf_fields.pop(0).startswith('v=spf'): raise ValueError('Not an SPF record') res = OrderedDict() mods = set() for mech_spec in spf_fields: if mech_spec.startswith(('exp', 'redirect')): # It's a modifier mod, val = mech_spec.split('=', 1) if mod in mods: raise KeyError('Modifier {0} can only appear once'.format(mod)) mods.add(mod) continue # TODO: Should be in something intelligent like an SPF_get # if mod == 'exp': # res[mod] = lookup(val, 'TXT', **qargs) # continue # elif mod == 'redirect': # return query(val, 'SPF', **qargs) mech = {} if mech_spec[0] in ('+', '-', '~', '?'): mech['qualifier'] = mech_spec[0] mech_spec = mech_spec[1:] if ':' in mech_spec: mech_spec, val = mech_spec.split(':', 1) elif '/' in mech_spec: idx = mech_spec.find('/') mech_spec = mech_spec[0:idx] val = mech_spec[idx:] else: val = None res[mech_spec] = mech if not val: continue elif mech_spec in ('ip4', 'ip6'): val = ipaddress.ip_interface(val) assert val.version == int(mech_spec[-1]) mech['value'] = val return res def srv_data(target, port, prio=10, weight=10): ''' Generate SRV record data :param target: :param port: :param prio: :param weight: :return: ''' return _rec2data(prio, weight, port, target) def srv_name(svc, proto='tcp', domain=None): ''' Generate SRV record name :param svc: ldap, 389 etc :param proto: tcp, udp, sctp etc. :param domain: name to append :return: ''' proto = RFC.validate(proto, RFC.SRV_PROTO) if isinstance(svc, int) or svc.isdigit(): svc = _to_port(svc) if domain: domain = '.' + domain return '_{0}._{1}{2}'.format(svc, proto, domain) def srv_rec(rdatas): ''' Validate and parse DNS record data for SRV record(s) :param rdata: DNS record data :return: dict w/fields ''' rschema = OrderedDict(( ('prio', int), ('weight', int), ('port', _to_port), ('name', str), )) return _data2rec_group(rschema, rdatas, 'prio') def sshfp_data(key_t, hash_t, pub): ''' Generate an SSHFP record :param key_t: rsa/dsa/ecdsa/ed25519 :param hash_t: sha1/sha256 :param pub: the SSH public key ''' key_t = RFC.validate(key_t, RFC.SSHFP_ALGO, 'in') hash_t = RFC.validate(hash_t, RFC.SSHFP_HASH) hasher = hashlib.new(hash_t) hasher.update( base64.b64decode(pub) ) ssh_fp = hasher.hexdigest() return _rec2data(key_t, hash_t, ssh_fp) def sshfp_rec(rdata): ''' Validate and parse DNS record data for TLSA record(s) :param rdata: DNS record data :return: dict w/fields ''' rschema = OrderedDict(( ('algorithm', RFC.SSHFP_ALGO), ('fp_hash', RFC.SSHFP_HASH), ('fingerprint', lambda val: val.lower()) # resolvers are inconsistent on this one )) return _data2rec(rschema, rdata) def tlsa_data(pub, usage, selector, matching): ''' Generate a TLSA rec :param pub: Pub key in PEM format :param usage: :param selector: :param matching: :return: TLSA data portion ''' usage = RFC.validate(usage, RFC.TLSA_USAGE) selector = RFC.validate(selector, RFC.TLSA_SELECT) matching = RFC.validate(matching, RFC.TLSA_MATCHING) pub = ssl.PEM_cert_to_DER_cert(pub.strip()) if matching == 0: cert_fp = binascii.b2a_hex(pub) else: hasher = hashlib.new(RFC.TLSA_MATCHING[matching]) hasher.update( pub ) cert_fp = hasher.hexdigest() return _rec2data(usage, selector, matching, cert_fp) def tlsa_rec(rdata): ''' Validate and parse DNS record data for TLSA record(s) :param rdata: DNS record data :return: dict w/fields ''' rschema = OrderedDict(( ('usage', RFC.TLSA_USAGE), ('selector', RFC.TLSA_SELECT), ('matching', RFC.TLSA_MATCHING), ('pub', str) )) return _data2rec(rschema, rdata) def service( svc, proto='tcp', domain=None, walk=False, secure=None ): ''' Find an SRV service in a domain or it's parents :param svc: service to find (ldap, 389, etc) :param proto: protocol the service talks (tcp, udp, etc) :param domain: domain to start search in :param walk: walk the parents if domain doesn't provide the service :param secure: only return DNSSEC-validated results :return: [ [ prio1server1, prio1server2 ], [ prio2server1, prio2server2 ], ] (the servers will already be weighted according to the SRV rules) ''' qres = query(srv_name(svc, proto, domain), 'SRV', walk=walk, secure=secure) if not qres: return False res = [] for _, recs in qres.items(): res.append(_weighted_order(recs)) return res def services(services_file='/etc/services'): ''' Parse through system-known services :return: { 'svc': [ { 'port': port 'proto': proto, 'desc': comment }, ], } ''' res = {} with salt.utils.files.fopen(services_file, 'r') as svc_defs: for svc_def in svc_defs.readlines(): svc_def = salt.utils.stringutils.to_unicode(svc_def.strip()) if not svc_def or svc_def.startswith('#'): continue elif '#' in svc_def: svc_def, comment = svc_def.split('#', 1) comment = comment.strip() else: comment = None svc_def = svc_def.split() port, proto = svc_def.pop(1).split('/') port = int(port) for name in svc_def: svc_res = res.get(name, {}) pp_res = svc_res.get(port, False) if not pp_res: svc = { 'port': port, 'proto': proto, } if comment: svc['desc'] = comment svc_res[port] = svc else: curr_proto = pp_res['proto'] if isinstance(curr_proto, (list, tuple)): curr_proto.append(proto) else: pp_res['proto'] = [curr_proto, proto] curr_desc = pp_res.get('desc', False) if comment: if not curr_desc: pp_res['desc'] = comment elif comment != curr_desc: pp_res['desc'] = '{0}, {1}'.format(curr_desc, comment) res[name] = svc_res for svc, data in res.items(): if len(data) == 1: res[svc] = data.values().pop() continue else: res[svc] = list(data.values()) return res def parse_resolv(src='/etc/resolv.conf'): ''' Parse a resolver configuration file (traditionally /etc/resolv.conf) ''' nameservers = [] ip4_nameservers = [] ip6_nameservers = [] search = [] sortlist = [] domain = '' options = [] try: with salt.utils.files.fopen(src) as src_file: # pylint: disable=too-many-nested-blocks for line in src_file: line = salt.utils.stringutils.to_unicode(line).strip().split() try: (directive, arg) = (line[0].lower(), line[1:]) # Drop everything after # or ; (comments) arg = list(itertools.takewhile(lambda x: x[0] not in ('#', ';'), arg)) if directive == 'nameserver': addr = arg[0] try: ip_addr = ipaddress.ip_address(addr) version = ip_addr.version ip_addr = str(ip_addr) if ip_addr not in nameservers: nameservers.append(ip_addr) if version == 4 and ip_addr not in ip4_nameservers: ip4_nameservers.append(ip_addr) elif version == 6 and ip_addr not in ip6_nameservers: ip6_nameservers.append(ip_addr) except ValueError as exc: log.error('%s: %s', src, exc) elif directive == 'domain': domain = arg[0] elif directive == 'search': search = arg elif directive == 'sortlist': # A sortlist is specified by IP address netmask pairs. # The netmask is optional and defaults to the natural # netmask of the net. The IP address and optional # network pairs are separated by slashes. for ip_raw in arg: try: ip_net = ipaddress.ip_network(ip_raw) except ValueError as exc: log.error('%s: %s', src, exc) else: if '/' not in ip_raw: # No netmask has been provided, guess # the "natural" one if ip_net.version == 4: ip_addr = six.text_type(ip_net.network_address) # pylint: disable=protected-access mask = salt.utils.network.natural_ipv4_netmask(ip_addr) ip_net = ipaddress.ip_network( '{0}{1}'.format(ip_addr, mask), strict=False ) if ip_net.version == 6: # TODO pass if ip_net not in sortlist: sortlist.append(ip_net) elif directive == 'options': # Options allows certain internal resolver variables to # be modified. if arg[0] not in options: options.append(arg[0]) except IndexError: continue if domain and search: # The domain and search keywords are mutually exclusive. If more # than one instance of these keywords is present, the last instance # will override. log.debug( '%s: The domain and search keywords are mutually exclusive.', src ) return { 'nameservers': nameservers, 'ip4_nameservers': ip4_nameservers, 'ip6_nameservers': ip6_nameservers, 'sortlist': [ip.with_netmask for ip in sortlist], 'domain': domain, 'search': search, 'options': options } except IOError: return {}
saltstack/salt
salt/utils/dns.py
_lookup_drill
python
def _lookup_drill(name, rdtype, timeout=None, servers=None, secure=None): ''' Use drill to lookup addresses :param name: Name of record to search :param rdtype: DNS record type :param timeout: command return timeout :param servers: [] of servers to use :return: [] of records or False if error ''' cmd = 'drill ' if secure: cmd += '-D -o ad ' cmd += '{0} {1} '.format(rdtype, name) if servers: cmd += ''.join(['@{0} '.format(srv) for srv in servers]) cmd = __salt__['cmd.run_all']( cmd, timeout=timeout, python_shell=False, output_loglevel='quiet') if cmd['retcode'] != 0: log.warning('drill returned (%s): %s', cmd['retcode'], cmd['stderr']) return False lookup_res = iter(cmd['stdout'].splitlines()) validated = False res = [] try: line = '' while 'ANSWER SECTION' not in line: line = next(lookup_res) while True: line = next(lookup_res) line = line.strip() if not line or line.startswith(';;'): break l_type, l_rec = line.split(None, 4)[-2:] if l_type == 'CNAME' and rdtype != 'CNAME': continue elif l_type == 'RRSIG': validated = True continue elif l_type != rdtype: raise ValueError('Invalid DNS type {}'.format(rdtype)) res.append(_data_clean(l_rec)) except StopIteration: pass if res and secure and not validated: return False else: return res
Use drill to lookup addresses :param name: Name of record to search :param rdtype: DNS record type :param timeout: command return timeout :param servers: [] of servers to use :return: [] of records or False if error
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/dns.py#L322-L375
null
# -*- coding: utf-8 -*- ''' Compendium of generic DNS utilities # Examples: dns.lookup(name, rdtype, ...) dns.query(name, rdtype, ...) dns.srv_rec(data) dns.srv_data('my1.example.com', 389, prio=10, weight=100) dns.srv_name('ldap/tcp', 'example.com') ''' from __future__ import absolute_import, print_function, unicode_literals # Import Python libs import base64 import binascii import hashlib import itertools import logging import random import re import shlex import socket import ssl import string import functools # Import Salt libs import salt.utils.files import salt.utils.network import salt.utils.path import salt.utils.stringutils import salt.modules.cmdmod from salt._compat import ipaddress from salt.utils.odict import OrderedDict # Import 3rd-party libs from salt.ext import six from salt.ext.six.moves import zip # pylint: disable=redefined-builtin # Integrations try: import dns.resolver HAS_DNSPYTHON = True except ImportError: HAS_DNSPYTHON = False try: import tldextract HAS_TLDEXTRACT = True except ImportError: HAS_TLDEXTRACT = False HAS_DIG = salt.utils.path.which('dig') is not None DIG_OPTIONS = '+search +fail +noall +answer +nocl +nottl' HAS_DRILL = salt.utils.path.which('drill') is not None HAS_HOST = salt.utils.path.which('host') is not None HAS_NSLOOKUP = salt.utils.path.which('nslookup') is not None __salt__ = { 'cmd.run_all': salt.modules.cmdmod.run_all } log = logging.getLogger(__name__) class RFC(object): ''' Simple holding class for all RFC/IANA registered lists & standards ''' # https://tools.ietf.org/html/rfc6844#section-3 CAA_TAGS = ( 'issue', 'issuewild', 'iodef' ) # http://www.iana.org/assignments/dns-sshfp-rr-parameters/dns-sshfp-rr-parameters.xhtml SSHFP_ALGO = OrderedDict(( (1, 'rsa'), (2, 'dsa'), (3, 'ecdsa'), (4, 'ed25519'), )) SSHFP_HASH = OrderedDict(( (1, 'sha1'), (2, 'sha256'), )) # http://www.iana.org/assignments/dane-parameters/dane-parameters.xhtml TLSA_USAGE = OrderedDict(( (0, 'pkixta'), (1, 'pkixee'), (2, 'daneta'), (3, 'daneee'), )) TLSA_SELECT = OrderedDict(( (0, 'cert'), (1, 'spki'), )) TLSA_MATCHING = OrderedDict(( (0, 'full'), (1, 'sha256'), (2, 'sha512'), )) SRV_PROTO = ( 'tcp', 'udp', 'sctp' ) @staticmethod def validate(lookup, ref, match=None): if lookup in ref: return lookup elif match == 'in': return [code for code, name in ref.items() if lookup in name][-1] else: # OrderedDicts only!(?) return ref.keys()[ref.values().index(lookup)] def _to_port(port): try: port = int(port) assert 1 <= port <= 65535 return port except (ValueError, AssertionError): raise ValueError('Invalid port {0}'.format(port)) def _tree(domain, tld=False): ''' Split out a domain in its parents Leverages tldextract to take the TLDs from publicsuffix.org or makes a valiant approximation of that :param domain: dc2.ams2.example.com :param tld: Include TLD in list :return: [ 'dc2.ams2.example.com', 'ams2.example.com', 'example.com'] ''' domain = domain.rstrip('.') assert '.' in domain, 'Provide a decent domain' if not tld: if HAS_TLDEXTRACT: tld = tldextract.extract(domain).suffix else: tld = re.search(r'((?:(?:ac|biz|com?|info|edu|gov|mil|name|net|n[oi]m|org)\.)?[^.]+)$', domain).group() log.info('Without tldextract, dns.util resolves the TLD of %s to %s', domain, tld) res = [domain] while True: idx = domain.find('.') if idx < 0: break domain = domain[idx + 1:] if domain == tld: break res.append(domain) return res def _weighted_order(recs): res = [] weights = [rec['weight'] for rec in recs] while weights: rnd = random.random() * sum(weights) for i, w in enumerate(weights): rnd -= w if rnd < 0: res.append(recs.pop(i)['name']) weights.pop(i) break return res def _cast(rec_data, rec_cast): if isinstance(rec_cast, dict): rec_data = type(rec_cast.keys()[0])(rec_data) res = rec_cast[rec_data] return res elif isinstance(rec_cast, (list, tuple)): return RFC.validate(rec_data, rec_cast) else: return rec_cast(rec_data) def _data2rec(schema, rec_data): ''' schema = OrderedDict({ 'prio': int, 'weight': int, 'port': to_port, 'name': str, }) rec_data = '10 20 25 myawesome.nl' res = {'prio': 10, 'weight': 20, 'port': 25 'name': 'myawesome.nl'} ''' try: rec_fields = rec_data.split(' ') # spaces in digest fields are allowed assert len(rec_fields) >= len(schema) if len(rec_fields) > len(schema): cutoff = len(schema) - 1 rec_fields = rec_fields[0:cutoff] + [''.join(rec_fields[cutoff:])] if len(schema) == 1: res = _cast(rec_fields[0], next(iter(schema.values()))) else: res = dict(( (field_name, _cast(rec_field, rec_cast)) for (field_name, rec_cast), rec_field in zip(schema.items(), rec_fields) )) return res except (AssertionError, AttributeError, TypeError, ValueError) as e: raise ValueError('Unable to cast "{0}" as "{2}": {1}'.format( rec_data, e, ' '.join(schema.keys()) )) def _data2rec_group(schema, recs_data, group_key): if not isinstance(recs_data, (list, tuple)): recs_data = [recs_data] res = OrderedDict() try: for rdata in recs_data: rdata = _data2rec(schema, rdata) assert rdata and group_key in rdata idx = rdata.pop(group_key) if idx not in res: res[idx] = [] if len(rdata) == 1: rdata = next(iter(rdata.values())) res[idx].append(rdata) return res except (AssertionError, ValueError) as e: raise ValueError('Unable to cast "{0}" as a group of "{1}": {2}'.format( ','.join(recs_data), ' '.join(schema.keys()), e )) def _rec2data(*rdata): return ' '.join(rdata) def _data_clean(data): data = data.strip(string.whitespace) if data.startswith(('"', '\'')) and data.endswith(('"', '\'')): return data[1:-1] else: return data def _lookup_dig(name, rdtype, timeout=None, servers=None, secure=None): ''' Use dig to lookup addresses :param name: Name of record to search :param rdtype: DNS record type :param timeout: server response timeout :param servers: [] of servers to use :return: [] of records or False if error ''' cmd = 'dig {0} -t {1} '.format(DIG_OPTIONS, rdtype) if servers: cmd += ''.join(['@{0} '.format(srv) for srv in servers]) if timeout is not None: if servers: timeout = int(float(timeout) / len(servers)) else: timeout = int(timeout) cmd += '+time={0} '.format(timeout) if secure: cmd += '+dnssec +adflag ' cmd = __salt__['cmd.run_all']('{0} {1}'.format(cmd, name), python_shell=False, output_loglevel='quiet') if 'ignoring invalid type' in cmd['stderr']: raise ValueError('Invalid DNS type {}'.format(rdtype)) elif cmd['retcode'] != 0: log.warning( 'dig returned (%s): %s', cmd['retcode'], cmd['stderr'].strip(string.whitespace + ';') ) return False elif not cmd['stdout']: return [] validated = False res = [] for line in cmd['stdout'].splitlines(): _, rtype, rdata = line.split(None, 2) if rtype == 'CNAME' and rdtype != 'CNAME': continue elif rtype == 'RRSIG': validated = True continue res.append(_data_clean(rdata)) if res and secure and not validated: return False else: return res def _lookup_gai(name, rdtype, timeout=None): ''' Use Python's socket interface to lookup addresses :param name: Name of record to search :param rdtype: A or AAAA :param timeout: ignored :return: [] of addresses or False if error ''' try: sock_t = { 'A': socket.AF_INET, 'AAAA': socket.AF_INET6 }[rdtype] except KeyError: raise ValueError('Invalid DNS type {} for gai lookup'.format(rdtype)) if timeout: log.info('Ignoring timeout on gai resolver; fix resolv.conf to do that') try: addresses = [sock[4][0] for sock in socket.getaddrinfo(name, None, sock_t, 0, socket.SOCK_RAW)] return addresses except socket.gaierror: return False def _lookup_host(name, rdtype, timeout=None, server=None): ''' Use host to lookup addresses :param name: Name of record to search :param server: Server to query :param rdtype: DNS record type :param timeout: server response wait :return: [] of records or False if error ''' cmd = 'host -t {0} '.format(rdtype) if timeout: cmd += '-W {0} '.format(int(timeout)) cmd += name if server is not None: cmd += ' {0}'.format(server) cmd = __salt__['cmd.run_all'](cmd, python_shell=False, output_loglevel='quiet') if 'invalid type' in cmd['stderr']: raise ValueError('Invalid DNS type {}'.format(rdtype)) elif cmd['retcode'] != 0: log.warning('host returned (%s): %s', cmd['retcode'], cmd['stderr']) return False elif 'has no' in cmd['stdout']: return [] res = [] _stdout = cmd['stdout'] if server is None else cmd['stdout'].split('\n\n')[-1] for line in _stdout.splitlines(): if rdtype != 'CNAME' and 'is an alias' in line: continue line = line.split(' ', 3)[-1] for prefix in ('record', 'address', 'handled by', 'alias for'): if line.startswith(prefix): line = line[len(prefix) + 1:] break res.append(_data_clean(line)) return res def _lookup_dnspython(name, rdtype, timeout=None, servers=None, secure=None): ''' Use dnspython to lookup addresses :param name: Name of record to search :param rdtype: DNS record type :param timeout: query timeout :param server: [] of server(s) to try in order :return: [] of records or False if error ''' resolver = dns.resolver.Resolver() if timeout is not None: resolver.lifetime = float(timeout) if servers: resolver.nameservers = servers if secure: resolver.ednsflags += dns.flags.DO try: res = [_data_clean(rr.to_text()) for rr in resolver.query(name, rdtype, raise_on_no_answer=False)] return res except dns.rdatatype.UnknownRdatatype: raise ValueError('Invalid DNS type {}'.format(rdtype)) except (dns.resolver.NXDOMAIN, dns.resolver.YXDOMAIN, dns.resolver.NoNameservers, dns.exception.Timeout): return False def _lookup_nslookup(name, rdtype, timeout=None, server=None): ''' Use nslookup to lookup addresses :param name: Name of record to search :param rdtype: DNS record type :param timeout: server response timeout :param server: server to query :return: [] of records or False if error ''' cmd = 'nslookup -query={0} {1}'.format(rdtype, name) if timeout is not None: cmd += ' -timeout={0}'.format(int(timeout)) if server is not None: cmd += ' {0}'.format(server) cmd = __salt__['cmd.run_all'](cmd, python_shell=False, output_loglevel='quiet') if cmd['retcode'] != 0: log.warning( 'nslookup returned (%s): %s', cmd['retcode'], cmd['stdout'].splitlines()[-1].strip(string.whitespace + ';') ) return False lookup_res = iter(cmd['stdout'].splitlines()) res = [] try: line = next(lookup_res) if 'unknown query type' in line: raise ValueError('Invalid DNS type {}'.format(rdtype)) while True: if name in line: break line = next(lookup_res) while True: line = line.strip() if not line or line.startswith('*'): break elif rdtype != 'CNAME' and 'canonical name' in line: name = line.split()[-1][:-1] line = next(lookup_res) continue elif rdtype == 'SOA': line = line.split('=') elif line.startswith('Name:'): line = next(lookup_res) line = line.split(':', 1) elif line.startswith(name): if '=' in line: line = line.split('=', 1) else: line = line.split(' ') res.append(_data_clean(line[-1])) line = next(lookup_res) except StopIteration: pass if rdtype == 'SOA': return [' '.join(res[1:])] else: return res def lookup( name, rdtype, method=None, servers=None, timeout=None, walk=False, walk_tld=False, secure=None ): ''' Lookup DNS records and return their data :param name: name to lookup :param rdtype: DNS record type :param method: gai (getaddrinfo()), dnspython, dig, drill, host, nslookup or auto (default) :param servers: (list of) server(s) to try in-order :param timeout: query timeout or a valiant approximation of that :param walk: Walk the DNS upwards looking for the record type or name/recordtype if walk='name'. :param walk_tld: Include the final domain in the walk :param secure: return only DNSSEC secured responses :return: [] of record data ''' # opts = __opts__.get('dns', {}) opts = {} method = method or opts.get('method', 'auto') secure = secure or opts.get('secure', None) servers = servers or opts.get('servers', None) timeout = timeout or opts.get('timeout', False) rdtype = rdtype.upper() # pylint: disable=bad-whitespace,multiple-spaces-before-keyword query_methods = ( ('gai', _lookup_gai, not any((rdtype not in ('A', 'AAAA'), servers, secure))), ('dnspython', _lookup_dnspython, HAS_DNSPYTHON), ('dig', _lookup_dig, HAS_DIG), ('drill', _lookup_drill, HAS_DRILL), ('host', _lookup_host, HAS_HOST and not secure), ('nslookup', _lookup_nslookup, HAS_NSLOOKUP and not secure), ) # pylint: enable=bad-whitespace,multiple-spaces-before-keyword try: if method == 'auto': # The first one not to bork on the conditions becomes the function method, resolver = next(((rname, rcb) for rname, rcb, rtest in query_methods if rtest)) else: # The first one not to bork on the conditions becomes the function. And the name must match. resolver = next((rcb for rname, rcb, rtest in query_methods if rname == method and rtest)) except StopIteration: log.error( 'Unable to lookup %s/%s: Resolver method %s invalid, unsupported ' 'or unable to perform query', method, rdtype, name ) return False res_kwargs = { 'rdtype': rdtype, } if servers: if not isinstance(servers, (list, tuple)): servers = [servers] if method in ('dnspython', 'dig', 'drill'): res_kwargs['servers'] = servers else: if timeout: timeout /= len(servers) # Inject a wrapper for multi-server behaviour def _multi_srvr(resolv_func): @functools.wraps(resolv_func) def _wrapper(**res_kwargs): for server in servers: s_res = resolv_func(server=server, **res_kwargs) if s_res: return s_res return _wrapper resolver = _multi_srvr(resolver) if not walk: name = [name] else: idx = 0 if rdtype in ('SRV', 'TLSA'): # The only RRs I know that have 2 name components idx = name.find('.') + 1 idx = name.find('.', idx) + 1 domain = name[idx:] rname = name[0:idx] name = _tree(domain, walk_tld) if walk == 'name': name = [rname + domain for domain in name] if timeout: timeout /= len(name) if secure: res_kwargs['secure'] = secure if timeout: res_kwargs['timeout'] = timeout for rname in name: res = resolver(name=rname, **res_kwargs) if res: return res return res def query( name, rdtype, method=None, servers=None, timeout=None, walk=False, walk_tld=False, secure=None ): ''' Query DNS for information. Where `lookup()` returns record data, `query()` tries to interpret the data and return it's results :param name: name to lookup :param rdtype: DNS record type :param method: gai (getaddrinfo()), pydns, dig, drill, host, nslookup or auto (default) :param servers: (list of) server(s) to try in-order :param timeout: query timeout or a valiant approximation of that :param secure: return only DNSSEC secured response :param walk: Walk the DNS upwards looking for the record type or name/recordtype if walk='name'. :param walk_tld: Include the top-level domain in the walk :return: [] of records ''' rdtype = rdtype.upper() qargs = { 'method': method, 'servers': servers, 'timeout': timeout, 'walk': walk, 'walk_tld': walk_tld, 'secure': secure } if rdtype == 'PTR' and not name.endswith('arpa'): name = ptr_name(name) if rdtype == 'SPF': # 'SPF' has become a regular 'TXT' again qres = [answer for answer in lookup(name, 'TXT', **qargs) if answer.startswith('v=spf')] if not qres: qres = lookup(name, rdtype, **qargs) else: qres = lookup(name, rdtype, **qargs) rec_map = { 'A': a_rec, 'AAAA': aaaa_rec, 'CAA': caa_rec, 'MX': mx_rec, 'SOA': soa_rec, 'SPF': spf_rec, 'SRV': srv_rec, 'SSHFP': sshfp_rec, 'TLSA': tlsa_rec, } if not qres or rdtype not in rec_map: return qres elif rdtype in ('A', 'AAAA', 'SSHFP', 'TLSA'): res = [rec_map[rdtype](res) for res in qres] elif rdtype in ('SOA', 'SPF'): res = rec_map[rdtype](qres[0]) else: res = rec_map[rdtype](qres) return res def host(name, ip4=True, ip6=True, **kwargs): ''' Return a list of addresses for name ip6: Return IPv6 addresses ip4: Return IPv4 addresses the rest is passed on to lookup() ''' res = {} if ip6: ip6 = lookup(name, 'AAAA', **kwargs) if ip6: res['ip6'] = ip6 if ip4: ip4 = lookup(name, 'A', **kwargs) if ip4: res['ip4'] = ip4 return res def a_rec(rdata): ''' Validate and parse DNS record data for an A record :param rdata: DNS record data :return: { 'address': ip } ''' rschema = OrderedDict(( ('address', ipaddress.IPv4Address), )) return _data2rec(rschema, rdata) def aaaa_rec(rdata): ''' Validate and parse DNS record data for an AAAA record :param rdata: DNS record data :return: { 'address': ip } ''' rschema = OrderedDict(( ('address', ipaddress.IPv6Address), )) return _data2rec(rschema, rdata) def caa_rec(rdatas): ''' Validate and parse DNS record data for a CAA record :param rdata: DNS record data :return: dict w/fields ''' rschema = OrderedDict(( ('flags', lambda flag: ['critical'] if int(flag) > 0 else []), ('tag', RFC.CAA_TAGS), ('value', lambda val: val.strip('\',"')) )) res = _data2rec_group(rschema, rdatas, 'tag') for tag in ('issue', 'issuewild'): tag_res = res.get(tag, False) if not tag_res: continue for idx, val in enumerate(tag_res): if ';' not in val: continue val, params = val.split(';', 1) params = dict(param.split('=') for param in shlex.split(params)) tag_res[idx] = {val: params} return res def mx_data(target, preference=10): ''' Generate MX record data :param target: server :param preference: preference number :return: DNS record data ''' return _rec2data(int(preference), target) def mx_rec(rdatas): ''' Validate and parse DNS record data for MX record(s) :param rdata: DNS record data :return: dict w/fields ''' rschema = OrderedDict(( ('preference', int), ('name', str), )) return _data2rec_group(rschema, rdatas, 'preference') def ptr_name(rdata): ''' Return PTR name of given IP :param rdata: IP address :return: PTR record name ''' try: return ipaddress.ip_address(rdata).reverse_pointer except ValueError: log.error( 'Unable to generate PTR record; %s is not a valid IP address', rdata ) return False def soa_rec(rdata): ''' Validate and parse DNS record data for SOA record(s) :param rdata: DNS record data :return: dict w/fields ''' rschema = OrderedDict(( ('mname', str), ('rname', str), ('serial', int), ('refresh', int), ('retry', int), ('expire', int), ('minimum', int), )) return _data2rec(rschema, rdata) def spf_rec(rdata): ''' Validate and parse DNS record data for SPF record(s) :param rdata: DNS record data :return: dict w/fields ''' spf_fields = rdata.split(' ') if not spf_fields.pop(0).startswith('v=spf'): raise ValueError('Not an SPF record') res = OrderedDict() mods = set() for mech_spec in spf_fields: if mech_spec.startswith(('exp', 'redirect')): # It's a modifier mod, val = mech_spec.split('=', 1) if mod in mods: raise KeyError('Modifier {0} can only appear once'.format(mod)) mods.add(mod) continue # TODO: Should be in something intelligent like an SPF_get # if mod == 'exp': # res[mod] = lookup(val, 'TXT', **qargs) # continue # elif mod == 'redirect': # return query(val, 'SPF', **qargs) mech = {} if mech_spec[0] in ('+', '-', '~', '?'): mech['qualifier'] = mech_spec[0] mech_spec = mech_spec[1:] if ':' in mech_spec: mech_spec, val = mech_spec.split(':', 1) elif '/' in mech_spec: idx = mech_spec.find('/') mech_spec = mech_spec[0:idx] val = mech_spec[idx:] else: val = None res[mech_spec] = mech if not val: continue elif mech_spec in ('ip4', 'ip6'): val = ipaddress.ip_interface(val) assert val.version == int(mech_spec[-1]) mech['value'] = val return res def srv_data(target, port, prio=10, weight=10): ''' Generate SRV record data :param target: :param port: :param prio: :param weight: :return: ''' return _rec2data(prio, weight, port, target) def srv_name(svc, proto='tcp', domain=None): ''' Generate SRV record name :param svc: ldap, 389 etc :param proto: tcp, udp, sctp etc. :param domain: name to append :return: ''' proto = RFC.validate(proto, RFC.SRV_PROTO) if isinstance(svc, int) or svc.isdigit(): svc = _to_port(svc) if domain: domain = '.' + domain return '_{0}._{1}{2}'.format(svc, proto, domain) def srv_rec(rdatas): ''' Validate and parse DNS record data for SRV record(s) :param rdata: DNS record data :return: dict w/fields ''' rschema = OrderedDict(( ('prio', int), ('weight', int), ('port', _to_port), ('name', str), )) return _data2rec_group(rschema, rdatas, 'prio') def sshfp_data(key_t, hash_t, pub): ''' Generate an SSHFP record :param key_t: rsa/dsa/ecdsa/ed25519 :param hash_t: sha1/sha256 :param pub: the SSH public key ''' key_t = RFC.validate(key_t, RFC.SSHFP_ALGO, 'in') hash_t = RFC.validate(hash_t, RFC.SSHFP_HASH) hasher = hashlib.new(hash_t) hasher.update( base64.b64decode(pub) ) ssh_fp = hasher.hexdigest() return _rec2data(key_t, hash_t, ssh_fp) def sshfp_rec(rdata): ''' Validate and parse DNS record data for TLSA record(s) :param rdata: DNS record data :return: dict w/fields ''' rschema = OrderedDict(( ('algorithm', RFC.SSHFP_ALGO), ('fp_hash', RFC.SSHFP_HASH), ('fingerprint', lambda val: val.lower()) # resolvers are inconsistent on this one )) return _data2rec(rschema, rdata) def tlsa_data(pub, usage, selector, matching): ''' Generate a TLSA rec :param pub: Pub key in PEM format :param usage: :param selector: :param matching: :return: TLSA data portion ''' usage = RFC.validate(usage, RFC.TLSA_USAGE) selector = RFC.validate(selector, RFC.TLSA_SELECT) matching = RFC.validate(matching, RFC.TLSA_MATCHING) pub = ssl.PEM_cert_to_DER_cert(pub.strip()) if matching == 0: cert_fp = binascii.b2a_hex(pub) else: hasher = hashlib.new(RFC.TLSA_MATCHING[matching]) hasher.update( pub ) cert_fp = hasher.hexdigest() return _rec2data(usage, selector, matching, cert_fp) def tlsa_rec(rdata): ''' Validate and parse DNS record data for TLSA record(s) :param rdata: DNS record data :return: dict w/fields ''' rschema = OrderedDict(( ('usage', RFC.TLSA_USAGE), ('selector', RFC.TLSA_SELECT), ('matching', RFC.TLSA_MATCHING), ('pub', str) )) return _data2rec(rschema, rdata) def service( svc, proto='tcp', domain=None, walk=False, secure=None ): ''' Find an SRV service in a domain or it's parents :param svc: service to find (ldap, 389, etc) :param proto: protocol the service talks (tcp, udp, etc) :param domain: domain to start search in :param walk: walk the parents if domain doesn't provide the service :param secure: only return DNSSEC-validated results :return: [ [ prio1server1, prio1server2 ], [ prio2server1, prio2server2 ], ] (the servers will already be weighted according to the SRV rules) ''' qres = query(srv_name(svc, proto, domain), 'SRV', walk=walk, secure=secure) if not qres: return False res = [] for _, recs in qres.items(): res.append(_weighted_order(recs)) return res def services(services_file='/etc/services'): ''' Parse through system-known services :return: { 'svc': [ { 'port': port 'proto': proto, 'desc': comment }, ], } ''' res = {} with salt.utils.files.fopen(services_file, 'r') as svc_defs: for svc_def in svc_defs.readlines(): svc_def = salt.utils.stringutils.to_unicode(svc_def.strip()) if not svc_def or svc_def.startswith('#'): continue elif '#' in svc_def: svc_def, comment = svc_def.split('#', 1) comment = comment.strip() else: comment = None svc_def = svc_def.split() port, proto = svc_def.pop(1).split('/') port = int(port) for name in svc_def: svc_res = res.get(name, {}) pp_res = svc_res.get(port, False) if not pp_res: svc = { 'port': port, 'proto': proto, } if comment: svc['desc'] = comment svc_res[port] = svc else: curr_proto = pp_res['proto'] if isinstance(curr_proto, (list, tuple)): curr_proto.append(proto) else: pp_res['proto'] = [curr_proto, proto] curr_desc = pp_res.get('desc', False) if comment: if not curr_desc: pp_res['desc'] = comment elif comment != curr_desc: pp_res['desc'] = '{0}, {1}'.format(curr_desc, comment) res[name] = svc_res for svc, data in res.items(): if len(data) == 1: res[svc] = data.values().pop() continue else: res[svc] = list(data.values()) return res def parse_resolv(src='/etc/resolv.conf'): ''' Parse a resolver configuration file (traditionally /etc/resolv.conf) ''' nameservers = [] ip4_nameservers = [] ip6_nameservers = [] search = [] sortlist = [] domain = '' options = [] try: with salt.utils.files.fopen(src) as src_file: # pylint: disable=too-many-nested-blocks for line in src_file: line = salt.utils.stringutils.to_unicode(line).strip().split() try: (directive, arg) = (line[0].lower(), line[1:]) # Drop everything after # or ; (comments) arg = list(itertools.takewhile(lambda x: x[0] not in ('#', ';'), arg)) if directive == 'nameserver': addr = arg[0] try: ip_addr = ipaddress.ip_address(addr) version = ip_addr.version ip_addr = str(ip_addr) if ip_addr not in nameservers: nameservers.append(ip_addr) if version == 4 and ip_addr not in ip4_nameservers: ip4_nameservers.append(ip_addr) elif version == 6 and ip_addr not in ip6_nameservers: ip6_nameservers.append(ip_addr) except ValueError as exc: log.error('%s: %s', src, exc) elif directive == 'domain': domain = arg[0] elif directive == 'search': search = arg elif directive == 'sortlist': # A sortlist is specified by IP address netmask pairs. # The netmask is optional and defaults to the natural # netmask of the net. The IP address and optional # network pairs are separated by slashes. for ip_raw in arg: try: ip_net = ipaddress.ip_network(ip_raw) except ValueError as exc: log.error('%s: %s', src, exc) else: if '/' not in ip_raw: # No netmask has been provided, guess # the "natural" one if ip_net.version == 4: ip_addr = six.text_type(ip_net.network_address) # pylint: disable=protected-access mask = salt.utils.network.natural_ipv4_netmask(ip_addr) ip_net = ipaddress.ip_network( '{0}{1}'.format(ip_addr, mask), strict=False ) if ip_net.version == 6: # TODO pass if ip_net not in sortlist: sortlist.append(ip_net) elif directive == 'options': # Options allows certain internal resolver variables to # be modified. if arg[0] not in options: options.append(arg[0]) except IndexError: continue if domain and search: # The domain and search keywords are mutually exclusive. If more # than one instance of these keywords is present, the last instance # will override. log.debug( '%s: The domain and search keywords are mutually exclusive.', src ) return { 'nameservers': nameservers, 'ip4_nameservers': ip4_nameservers, 'ip6_nameservers': ip6_nameservers, 'sortlist': [ip.with_netmask for ip in sortlist], 'domain': domain, 'search': search, 'options': options } except IOError: return {}
saltstack/salt
salt/utils/dns.py
_lookup_gai
python
def _lookup_gai(name, rdtype, timeout=None): ''' Use Python's socket interface to lookup addresses :param name: Name of record to search :param rdtype: A or AAAA :param timeout: ignored :return: [] of addresses or False if error ''' try: sock_t = { 'A': socket.AF_INET, 'AAAA': socket.AF_INET6 }[rdtype] except KeyError: raise ValueError('Invalid DNS type {} for gai lookup'.format(rdtype)) if timeout: log.info('Ignoring timeout on gai resolver; fix resolv.conf to do that') try: addresses = [sock[4][0] for sock in socket.getaddrinfo(name, None, sock_t, 0, socket.SOCK_RAW)] return addresses except socket.gaierror: return False
Use Python's socket interface to lookup addresses :param name: Name of record to search :param rdtype: A or AAAA :param timeout: ignored :return: [] of addresses or False if error
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/dns.py#L378-L401
null
# -*- coding: utf-8 -*- ''' Compendium of generic DNS utilities # Examples: dns.lookup(name, rdtype, ...) dns.query(name, rdtype, ...) dns.srv_rec(data) dns.srv_data('my1.example.com', 389, prio=10, weight=100) dns.srv_name('ldap/tcp', 'example.com') ''' from __future__ import absolute_import, print_function, unicode_literals # Import Python libs import base64 import binascii import hashlib import itertools import logging import random import re import shlex import socket import ssl import string import functools # Import Salt libs import salt.utils.files import salt.utils.network import salt.utils.path import salt.utils.stringutils import salt.modules.cmdmod from salt._compat import ipaddress from salt.utils.odict import OrderedDict # Import 3rd-party libs from salt.ext import six from salt.ext.six.moves import zip # pylint: disable=redefined-builtin # Integrations try: import dns.resolver HAS_DNSPYTHON = True except ImportError: HAS_DNSPYTHON = False try: import tldextract HAS_TLDEXTRACT = True except ImportError: HAS_TLDEXTRACT = False HAS_DIG = salt.utils.path.which('dig') is not None DIG_OPTIONS = '+search +fail +noall +answer +nocl +nottl' HAS_DRILL = salt.utils.path.which('drill') is not None HAS_HOST = salt.utils.path.which('host') is not None HAS_NSLOOKUP = salt.utils.path.which('nslookup') is not None __salt__ = { 'cmd.run_all': salt.modules.cmdmod.run_all } log = logging.getLogger(__name__) class RFC(object): ''' Simple holding class for all RFC/IANA registered lists & standards ''' # https://tools.ietf.org/html/rfc6844#section-3 CAA_TAGS = ( 'issue', 'issuewild', 'iodef' ) # http://www.iana.org/assignments/dns-sshfp-rr-parameters/dns-sshfp-rr-parameters.xhtml SSHFP_ALGO = OrderedDict(( (1, 'rsa'), (2, 'dsa'), (3, 'ecdsa'), (4, 'ed25519'), )) SSHFP_HASH = OrderedDict(( (1, 'sha1'), (2, 'sha256'), )) # http://www.iana.org/assignments/dane-parameters/dane-parameters.xhtml TLSA_USAGE = OrderedDict(( (0, 'pkixta'), (1, 'pkixee'), (2, 'daneta'), (3, 'daneee'), )) TLSA_SELECT = OrderedDict(( (0, 'cert'), (1, 'spki'), )) TLSA_MATCHING = OrderedDict(( (0, 'full'), (1, 'sha256'), (2, 'sha512'), )) SRV_PROTO = ( 'tcp', 'udp', 'sctp' ) @staticmethod def validate(lookup, ref, match=None): if lookup in ref: return lookup elif match == 'in': return [code for code, name in ref.items() if lookup in name][-1] else: # OrderedDicts only!(?) return ref.keys()[ref.values().index(lookup)] def _to_port(port): try: port = int(port) assert 1 <= port <= 65535 return port except (ValueError, AssertionError): raise ValueError('Invalid port {0}'.format(port)) def _tree(domain, tld=False): ''' Split out a domain in its parents Leverages tldextract to take the TLDs from publicsuffix.org or makes a valiant approximation of that :param domain: dc2.ams2.example.com :param tld: Include TLD in list :return: [ 'dc2.ams2.example.com', 'ams2.example.com', 'example.com'] ''' domain = domain.rstrip('.') assert '.' in domain, 'Provide a decent domain' if not tld: if HAS_TLDEXTRACT: tld = tldextract.extract(domain).suffix else: tld = re.search(r'((?:(?:ac|biz|com?|info|edu|gov|mil|name|net|n[oi]m|org)\.)?[^.]+)$', domain).group() log.info('Without tldextract, dns.util resolves the TLD of %s to %s', domain, tld) res = [domain] while True: idx = domain.find('.') if idx < 0: break domain = domain[idx + 1:] if domain == tld: break res.append(domain) return res def _weighted_order(recs): res = [] weights = [rec['weight'] for rec in recs] while weights: rnd = random.random() * sum(weights) for i, w in enumerate(weights): rnd -= w if rnd < 0: res.append(recs.pop(i)['name']) weights.pop(i) break return res def _cast(rec_data, rec_cast): if isinstance(rec_cast, dict): rec_data = type(rec_cast.keys()[0])(rec_data) res = rec_cast[rec_data] return res elif isinstance(rec_cast, (list, tuple)): return RFC.validate(rec_data, rec_cast) else: return rec_cast(rec_data) def _data2rec(schema, rec_data): ''' schema = OrderedDict({ 'prio': int, 'weight': int, 'port': to_port, 'name': str, }) rec_data = '10 20 25 myawesome.nl' res = {'prio': 10, 'weight': 20, 'port': 25 'name': 'myawesome.nl'} ''' try: rec_fields = rec_data.split(' ') # spaces in digest fields are allowed assert len(rec_fields) >= len(schema) if len(rec_fields) > len(schema): cutoff = len(schema) - 1 rec_fields = rec_fields[0:cutoff] + [''.join(rec_fields[cutoff:])] if len(schema) == 1: res = _cast(rec_fields[0], next(iter(schema.values()))) else: res = dict(( (field_name, _cast(rec_field, rec_cast)) for (field_name, rec_cast), rec_field in zip(schema.items(), rec_fields) )) return res except (AssertionError, AttributeError, TypeError, ValueError) as e: raise ValueError('Unable to cast "{0}" as "{2}": {1}'.format( rec_data, e, ' '.join(schema.keys()) )) def _data2rec_group(schema, recs_data, group_key): if not isinstance(recs_data, (list, tuple)): recs_data = [recs_data] res = OrderedDict() try: for rdata in recs_data: rdata = _data2rec(schema, rdata) assert rdata and group_key in rdata idx = rdata.pop(group_key) if idx not in res: res[idx] = [] if len(rdata) == 1: rdata = next(iter(rdata.values())) res[idx].append(rdata) return res except (AssertionError, ValueError) as e: raise ValueError('Unable to cast "{0}" as a group of "{1}": {2}'.format( ','.join(recs_data), ' '.join(schema.keys()), e )) def _rec2data(*rdata): return ' '.join(rdata) def _data_clean(data): data = data.strip(string.whitespace) if data.startswith(('"', '\'')) and data.endswith(('"', '\'')): return data[1:-1] else: return data def _lookup_dig(name, rdtype, timeout=None, servers=None, secure=None): ''' Use dig to lookup addresses :param name: Name of record to search :param rdtype: DNS record type :param timeout: server response timeout :param servers: [] of servers to use :return: [] of records or False if error ''' cmd = 'dig {0} -t {1} '.format(DIG_OPTIONS, rdtype) if servers: cmd += ''.join(['@{0} '.format(srv) for srv in servers]) if timeout is not None: if servers: timeout = int(float(timeout) / len(servers)) else: timeout = int(timeout) cmd += '+time={0} '.format(timeout) if secure: cmd += '+dnssec +adflag ' cmd = __salt__['cmd.run_all']('{0} {1}'.format(cmd, name), python_shell=False, output_loglevel='quiet') if 'ignoring invalid type' in cmd['stderr']: raise ValueError('Invalid DNS type {}'.format(rdtype)) elif cmd['retcode'] != 0: log.warning( 'dig returned (%s): %s', cmd['retcode'], cmd['stderr'].strip(string.whitespace + ';') ) return False elif not cmd['stdout']: return [] validated = False res = [] for line in cmd['stdout'].splitlines(): _, rtype, rdata = line.split(None, 2) if rtype == 'CNAME' and rdtype != 'CNAME': continue elif rtype == 'RRSIG': validated = True continue res.append(_data_clean(rdata)) if res and secure and not validated: return False else: return res def _lookup_drill(name, rdtype, timeout=None, servers=None, secure=None): ''' Use drill to lookup addresses :param name: Name of record to search :param rdtype: DNS record type :param timeout: command return timeout :param servers: [] of servers to use :return: [] of records or False if error ''' cmd = 'drill ' if secure: cmd += '-D -o ad ' cmd += '{0} {1} '.format(rdtype, name) if servers: cmd += ''.join(['@{0} '.format(srv) for srv in servers]) cmd = __salt__['cmd.run_all']( cmd, timeout=timeout, python_shell=False, output_loglevel='quiet') if cmd['retcode'] != 0: log.warning('drill returned (%s): %s', cmd['retcode'], cmd['stderr']) return False lookup_res = iter(cmd['stdout'].splitlines()) validated = False res = [] try: line = '' while 'ANSWER SECTION' not in line: line = next(lookup_res) while True: line = next(lookup_res) line = line.strip() if not line or line.startswith(';;'): break l_type, l_rec = line.split(None, 4)[-2:] if l_type == 'CNAME' and rdtype != 'CNAME': continue elif l_type == 'RRSIG': validated = True continue elif l_type != rdtype: raise ValueError('Invalid DNS type {}'.format(rdtype)) res.append(_data_clean(l_rec)) except StopIteration: pass if res and secure and not validated: return False else: return res def _lookup_host(name, rdtype, timeout=None, server=None): ''' Use host to lookup addresses :param name: Name of record to search :param server: Server to query :param rdtype: DNS record type :param timeout: server response wait :return: [] of records or False if error ''' cmd = 'host -t {0} '.format(rdtype) if timeout: cmd += '-W {0} '.format(int(timeout)) cmd += name if server is not None: cmd += ' {0}'.format(server) cmd = __salt__['cmd.run_all'](cmd, python_shell=False, output_loglevel='quiet') if 'invalid type' in cmd['stderr']: raise ValueError('Invalid DNS type {}'.format(rdtype)) elif cmd['retcode'] != 0: log.warning('host returned (%s): %s', cmd['retcode'], cmd['stderr']) return False elif 'has no' in cmd['stdout']: return [] res = [] _stdout = cmd['stdout'] if server is None else cmd['stdout'].split('\n\n')[-1] for line in _stdout.splitlines(): if rdtype != 'CNAME' and 'is an alias' in line: continue line = line.split(' ', 3)[-1] for prefix in ('record', 'address', 'handled by', 'alias for'): if line.startswith(prefix): line = line[len(prefix) + 1:] break res.append(_data_clean(line)) return res def _lookup_dnspython(name, rdtype, timeout=None, servers=None, secure=None): ''' Use dnspython to lookup addresses :param name: Name of record to search :param rdtype: DNS record type :param timeout: query timeout :param server: [] of server(s) to try in order :return: [] of records or False if error ''' resolver = dns.resolver.Resolver() if timeout is not None: resolver.lifetime = float(timeout) if servers: resolver.nameservers = servers if secure: resolver.ednsflags += dns.flags.DO try: res = [_data_clean(rr.to_text()) for rr in resolver.query(name, rdtype, raise_on_no_answer=False)] return res except dns.rdatatype.UnknownRdatatype: raise ValueError('Invalid DNS type {}'.format(rdtype)) except (dns.resolver.NXDOMAIN, dns.resolver.YXDOMAIN, dns.resolver.NoNameservers, dns.exception.Timeout): return False def _lookup_nslookup(name, rdtype, timeout=None, server=None): ''' Use nslookup to lookup addresses :param name: Name of record to search :param rdtype: DNS record type :param timeout: server response timeout :param server: server to query :return: [] of records or False if error ''' cmd = 'nslookup -query={0} {1}'.format(rdtype, name) if timeout is not None: cmd += ' -timeout={0}'.format(int(timeout)) if server is not None: cmd += ' {0}'.format(server) cmd = __salt__['cmd.run_all'](cmd, python_shell=False, output_loglevel='quiet') if cmd['retcode'] != 0: log.warning( 'nslookup returned (%s): %s', cmd['retcode'], cmd['stdout'].splitlines()[-1].strip(string.whitespace + ';') ) return False lookup_res = iter(cmd['stdout'].splitlines()) res = [] try: line = next(lookup_res) if 'unknown query type' in line: raise ValueError('Invalid DNS type {}'.format(rdtype)) while True: if name in line: break line = next(lookup_res) while True: line = line.strip() if not line or line.startswith('*'): break elif rdtype != 'CNAME' and 'canonical name' in line: name = line.split()[-1][:-1] line = next(lookup_res) continue elif rdtype == 'SOA': line = line.split('=') elif line.startswith('Name:'): line = next(lookup_res) line = line.split(':', 1) elif line.startswith(name): if '=' in line: line = line.split('=', 1) else: line = line.split(' ') res.append(_data_clean(line[-1])) line = next(lookup_res) except StopIteration: pass if rdtype == 'SOA': return [' '.join(res[1:])] else: return res def lookup( name, rdtype, method=None, servers=None, timeout=None, walk=False, walk_tld=False, secure=None ): ''' Lookup DNS records and return their data :param name: name to lookup :param rdtype: DNS record type :param method: gai (getaddrinfo()), dnspython, dig, drill, host, nslookup or auto (default) :param servers: (list of) server(s) to try in-order :param timeout: query timeout or a valiant approximation of that :param walk: Walk the DNS upwards looking for the record type or name/recordtype if walk='name'. :param walk_tld: Include the final domain in the walk :param secure: return only DNSSEC secured responses :return: [] of record data ''' # opts = __opts__.get('dns', {}) opts = {} method = method or opts.get('method', 'auto') secure = secure or opts.get('secure', None) servers = servers or opts.get('servers', None) timeout = timeout or opts.get('timeout', False) rdtype = rdtype.upper() # pylint: disable=bad-whitespace,multiple-spaces-before-keyword query_methods = ( ('gai', _lookup_gai, not any((rdtype not in ('A', 'AAAA'), servers, secure))), ('dnspython', _lookup_dnspython, HAS_DNSPYTHON), ('dig', _lookup_dig, HAS_DIG), ('drill', _lookup_drill, HAS_DRILL), ('host', _lookup_host, HAS_HOST and not secure), ('nslookup', _lookup_nslookup, HAS_NSLOOKUP and not secure), ) # pylint: enable=bad-whitespace,multiple-spaces-before-keyword try: if method == 'auto': # The first one not to bork on the conditions becomes the function method, resolver = next(((rname, rcb) for rname, rcb, rtest in query_methods if rtest)) else: # The first one not to bork on the conditions becomes the function. And the name must match. resolver = next((rcb for rname, rcb, rtest in query_methods if rname == method and rtest)) except StopIteration: log.error( 'Unable to lookup %s/%s: Resolver method %s invalid, unsupported ' 'or unable to perform query', method, rdtype, name ) return False res_kwargs = { 'rdtype': rdtype, } if servers: if not isinstance(servers, (list, tuple)): servers = [servers] if method in ('dnspython', 'dig', 'drill'): res_kwargs['servers'] = servers else: if timeout: timeout /= len(servers) # Inject a wrapper for multi-server behaviour def _multi_srvr(resolv_func): @functools.wraps(resolv_func) def _wrapper(**res_kwargs): for server in servers: s_res = resolv_func(server=server, **res_kwargs) if s_res: return s_res return _wrapper resolver = _multi_srvr(resolver) if not walk: name = [name] else: idx = 0 if rdtype in ('SRV', 'TLSA'): # The only RRs I know that have 2 name components idx = name.find('.') + 1 idx = name.find('.', idx) + 1 domain = name[idx:] rname = name[0:idx] name = _tree(domain, walk_tld) if walk == 'name': name = [rname + domain for domain in name] if timeout: timeout /= len(name) if secure: res_kwargs['secure'] = secure if timeout: res_kwargs['timeout'] = timeout for rname in name: res = resolver(name=rname, **res_kwargs) if res: return res return res def query( name, rdtype, method=None, servers=None, timeout=None, walk=False, walk_tld=False, secure=None ): ''' Query DNS for information. Where `lookup()` returns record data, `query()` tries to interpret the data and return it's results :param name: name to lookup :param rdtype: DNS record type :param method: gai (getaddrinfo()), pydns, dig, drill, host, nslookup or auto (default) :param servers: (list of) server(s) to try in-order :param timeout: query timeout or a valiant approximation of that :param secure: return only DNSSEC secured response :param walk: Walk the DNS upwards looking for the record type or name/recordtype if walk='name'. :param walk_tld: Include the top-level domain in the walk :return: [] of records ''' rdtype = rdtype.upper() qargs = { 'method': method, 'servers': servers, 'timeout': timeout, 'walk': walk, 'walk_tld': walk_tld, 'secure': secure } if rdtype == 'PTR' and not name.endswith('arpa'): name = ptr_name(name) if rdtype == 'SPF': # 'SPF' has become a regular 'TXT' again qres = [answer for answer in lookup(name, 'TXT', **qargs) if answer.startswith('v=spf')] if not qres: qres = lookup(name, rdtype, **qargs) else: qres = lookup(name, rdtype, **qargs) rec_map = { 'A': a_rec, 'AAAA': aaaa_rec, 'CAA': caa_rec, 'MX': mx_rec, 'SOA': soa_rec, 'SPF': spf_rec, 'SRV': srv_rec, 'SSHFP': sshfp_rec, 'TLSA': tlsa_rec, } if not qres or rdtype not in rec_map: return qres elif rdtype in ('A', 'AAAA', 'SSHFP', 'TLSA'): res = [rec_map[rdtype](res) for res in qres] elif rdtype in ('SOA', 'SPF'): res = rec_map[rdtype](qres[0]) else: res = rec_map[rdtype](qres) return res def host(name, ip4=True, ip6=True, **kwargs): ''' Return a list of addresses for name ip6: Return IPv6 addresses ip4: Return IPv4 addresses the rest is passed on to lookup() ''' res = {} if ip6: ip6 = lookup(name, 'AAAA', **kwargs) if ip6: res['ip6'] = ip6 if ip4: ip4 = lookup(name, 'A', **kwargs) if ip4: res['ip4'] = ip4 return res def a_rec(rdata): ''' Validate and parse DNS record data for an A record :param rdata: DNS record data :return: { 'address': ip } ''' rschema = OrderedDict(( ('address', ipaddress.IPv4Address), )) return _data2rec(rschema, rdata) def aaaa_rec(rdata): ''' Validate and parse DNS record data for an AAAA record :param rdata: DNS record data :return: { 'address': ip } ''' rschema = OrderedDict(( ('address', ipaddress.IPv6Address), )) return _data2rec(rschema, rdata) def caa_rec(rdatas): ''' Validate and parse DNS record data for a CAA record :param rdata: DNS record data :return: dict w/fields ''' rschema = OrderedDict(( ('flags', lambda flag: ['critical'] if int(flag) > 0 else []), ('tag', RFC.CAA_TAGS), ('value', lambda val: val.strip('\',"')) )) res = _data2rec_group(rschema, rdatas, 'tag') for tag in ('issue', 'issuewild'): tag_res = res.get(tag, False) if not tag_res: continue for idx, val in enumerate(tag_res): if ';' not in val: continue val, params = val.split(';', 1) params = dict(param.split('=') for param in shlex.split(params)) tag_res[idx] = {val: params} return res def mx_data(target, preference=10): ''' Generate MX record data :param target: server :param preference: preference number :return: DNS record data ''' return _rec2data(int(preference), target) def mx_rec(rdatas): ''' Validate and parse DNS record data for MX record(s) :param rdata: DNS record data :return: dict w/fields ''' rschema = OrderedDict(( ('preference', int), ('name', str), )) return _data2rec_group(rschema, rdatas, 'preference') def ptr_name(rdata): ''' Return PTR name of given IP :param rdata: IP address :return: PTR record name ''' try: return ipaddress.ip_address(rdata).reverse_pointer except ValueError: log.error( 'Unable to generate PTR record; %s is not a valid IP address', rdata ) return False def soa_rec(rdata): ''' Validate and parse DNS record data for SOA record(s) :param rdata: DNS record data :return: dict w/fields ''' rschema = OrderedDict(( ('mname', str), ('rname', str), ('serial', int), ('refresh', int), ('retry', int), ('expire', int), ('minimum', int), )) return _data2rec(rschema, rdata) def spf_rec(rdata): ''' Validate and parse DNS record data for SPF record(s) :param rdata: DNS record data :return: dict w/fields ''' spf_fields = rdata.split(' ') if not spf_fields.pop(0).startswith('v=spf'): raise ValueError('Not an SPF record') res = OrderedDict() mods = set() for mech_spec in spf_fields: if mech_spec.startswith(('exp', 'redirect')): # It's a modifier mod, val = mech_spec.split('=', 1) if mod in mods: raise KeyError('Modifier {0} can only appear once'.format(mod)) mods.add(mod) continue # TODO: Should be in something intelligent like an SPF_get # if mod == 'exp': # res[mod] = lookup(val, 'TXT', **qargs) # continue # elif mod == 'redirect': # return query(val, 'SPF', **qargs) mech = {} if mech_spec[0] in ('+', '-', '~', '?'): mech['qualifier'] = mech_spec[0] mech_spec = mech_spec[1:] if ':' in mech_spec: mech_spec, val = mech_spec.split(':', 1) elif '/' in mech_spec: idx = mech_spec.find('/') mech_spec = mech_spec[0:idx] val = mech_spec[idx:] else: val = None res[mech_spec] = mech if not val: continue elif mech_spec in ('ip4', 'ip6'): val = ipaddress.ip_interface(val) assert val.version == int(mech_spec[-1]) mech['value'] = val return res def srv_data(target, port, prio=10, weight=10): ''' Generate SRV record data :param target: :param port: :param prio: :param weight: :return: ''' return _rec2data(prio, weight, port, target) def srv_name(svc, proto='tcp', domain=None): ''' Generate SRV record name :param svc: ldap, 389 etc :param proto: tcp, udp, sctp etc. :param domain: name to append :return: ''' proto = RFC.validate(proto, RFC.SRV_PROTO) if isinstance(svc, int) or svc.isdigit(): svc = _to_port(svc) if domain: domain = '.' + domain return '_{0}._{1}{2}'.format(svc, proto, domain) def srv_rec(rdatas): ''' Validate and parse DNS record data for SRV record(s) :param rdata: DNS record data :return: dict w/fields ''' rschema = OrderedDict(( ('prio', int), ('weight', int), ('port', _to_port), ('name', str), )) return _data2rec_group(rschema, rdatas, 'prio') def sshfp_data(key_t, hash_t, pub): ''' Generate an SSHFP record :param key_t: rsa/dsa/ecdsa/ed25519 :param hash_t: sha1/sha256 :param pub: the SSH public key ''' key_t = RFC.validate(key_t, RFC.SSHFP_ALGO, 'in') hash_t = RFC.validate(hash_t, RFC.SSHFP_HASH) hasher = hashlib.new(hash_t) hasher.update( base64.b64decode(pub) ) ssh_fp = hasher.hexdigest() return _rec2data(key_t, hash_t, ssh_fp) def sshfp_rec(rdata): ''' Validate and parse DNS record data for TLSA record(s) :param rdata: DNS record data :return: dict w/fields ''' rschema = OrderedDict(( ('algorithm', RFC.SSHFP_ALGO), ('fp_hash', RFC.SSHFP_HASH), ('fingerprint', lambda val: val.lower()) # resolvers are inconsistent on this one )) return _data2rec(rschema, rdata) def tlsa_data(pub, usage, selector, matching): ''' Generate a TLSA rec :param pub: Pub key in PEM format :param usage: :param selector: :param matching: :return: TLSA data portion ''' usage = RFC.validate(usage, RFC.TLSA_USAGE) selector = RFC.validate(selector, RFC.TLSA_SELECT) matching = RFC.validate(matching, RFC.TLSA_MATCHING) pub = ssl.PEM_cert_to_DER_cert(pub.strip()) if matching == 0: cert_fp = binascii.b2a_hex(pub) else: hasher = hashlib.new(RFC.TLSA_MATCHING[matching]) hasher.update( pub ) cert_fp = hasher.hexdigest() return _rec2data(usage, selector, matching, cert_fp) def tlsa_rec(rdata): ''' Validate and parse DNS record data for TLSA record(s) :param rdata: DNS record data :return: dict w/fields ''' rschema = OrderedDict(( ('usage', RFC.TLSA_USAGE), ('selector', RFC.TLSA_SELECT), ('matching', RFC.TLSA_MATCHING), ('pub', str) )) return _data2rec(rschema, rdata) def service( svc, proto='tcp', domain=None, walk=False, secure=None ): ''' Find an SRV service in a domain or it's parents :param svc: service to find (ldap, 389, etc) :param proto: protocol the service talks (tcp, udp, etc) :param domain: domain to start search in :param walk: walk the parents if domain doesn't provide the service :param secure: only return DNSSEC-validated results :return: [ [ prio1server1, prio1server2 ], [ prio2server1, prio2server2 ], ] (the servers will already be weighted according to the SRV rules) ''' qres = query(srv_name(svc, proto, domain), 'SRV', walk=walk, secure=secure) if not qres: return False res = [] for _, recs in qres.items(): res.append(_weighted_order(recs)) return res def services(services_file='/etc/services'): ''' Parse through system-known services :return: { 'svc': [ { 'port': port 'proto': proto, 'desc': comment }, ], } ''' res = {} with salt.utils.files.fopen(services_file, 'r') as svc_defs: for svc_def in svc_defs.readlines(): svc_def = salt.utils.stringutils.to_unicode(svc_def.strip()) if not svc_def or svc_def.startswith('#'): continue elif '#' in svc_def: svc_def, comment = svc_def.split('#', 1) comment = comment.strip() else: comment = None svc_def = svc_def.split() port, proto = svc_def.pop(1).split('/') port = int(port) for name in svc_def: svc_res = res.get(name, {}) pp_res = svc_res.get(port, False) if not pp_res: svc = { 'port': port, 'proto': proto, } if comment: svc['desc'] = comment svc_res[port] = svc else: curr_proto = pp_res['proto'] if isinstance(curr_proto, (list, tuple)): curr_proto.append(proto) else: pp_res['proto'] = [curr_proto, proto] curr_desc = pp_res.get('desc', False) if comment: if not curr_desc: pp_res['desc'] = comment elif comment != curr_desc: pp_res['desc'] = '{0}, {1}'.format(curr_desc, comment) res[name] = svc_res for svc, data in res.items(): if len(data) == 1: res[svc] = data.values().pop() continue else: res[svc] = list(data.values()) return res def parse_resolv(src='/etc/resolv.conf'): ''' Parse a resolver configuration file (traditionally /etc/resolv.conf) ''' nameservers = [] ip4_nameservers = [] ip6_nameservers = [] search = [] sortlist = [] domain = '' options = [] try: with salt.utils.files.fopen(src) as src_file: # pylint: disable=too-many-nested-blocks for line in src_file: line = salt.utils.stringutils.to_unicode(line).strip().split() try: (directive, arg) = (line[0].lower(), line[1:]) # Drop everything after # or ; (comments) arg = list(itertools.takewhile(lambda x: x[0] not in ('#', ';'), arg)) if directive == 'nameserver': addr = arg[0] try: ip_addr = ipaddress.ip_address(addr) version = ip_addr.version ip_addr = str(ip_addr) if ip_addr not in nameservers: nameservers.append(ip_addr) if version == 4 and ip_addr not in ip4_nameservers: ip4_nameservers.append(ip_addr) elif version == 6 and ip_addr not in ip6_nameservers: ip6_nameservers.append(ip_addr) except ValueError as exc: log.error('%s: %s', src, exc) elif directive == 'domain': domain = arg[0] elif directive == 'search': search = arg elif directive == 'sortlist': # A sortlist is specified by IP address netmask pairs. # The netmask is optional and defaults to the natural # netmask of the net. The IP address and optional # network pairs are separated by slashes. for ip_raw in arg: try: ip_net = ipaddress.ip_network(ip_raw) except ValueError as exc: log.error('%s: %s', src, exc) else: if '/' not in ip_raw: # No netmask has been provided, guess # the "natural" one if ip_net.version == 4: ip_addr = six.text_type(ip_net.network_address) # pylint: disable=protected-access mask = salt.utils.network.natural_ipv4_netmask(ip_addr) ip_net = ipaddress.ip_network( '{0}{1}'.format(ip_addr, mask), strict=False ) if ip_net.version == 6: # TODO pass if ip_net not in sortlist: sortlist.append(ip_net) elif directive == 'options': # Options allows certain internal resolver variables to # be modified. if arg[0] not in options: options.append(arg[0]) except IndexError: continue if domain and search: # The domain and search keywords are mutually exclusive. If more # than one instance of these keywords is present, the last instance # will override. log.debug( '%s: The domain and search keywords are mutually exclusive.', src ) return { 'nameservers': nameservers, 'ip4_nameservers': ip4_nameservers, 'ip6_nameservers': ip6_nameservers, 'sortlist': [ip.with_netmask for ip in sortlist], 'domain': domain, 'search': search, 'options': options } except IOError: return {}
saltstack/salt
salt/utils/dns.py
_lookup_host
python
def _lookup_host(name, rdtype, timeout=None, server=None): ''' Use host to lookup addresses :param name: Name of record to search :param server: Server to query :param rdtype: DNS record type :param timeout: server response wait :return: [] of records or False if error ''' cmd = 'host -t {0} '.format(rdtype) if timeout: cmd += '-W {0} '.format(int(timeout)) cmd += name if server is not None: cmd += ' {0}'.format(server) cmd = __salt__['cmd.run_all'](cmd, python_shell=False, output_loglevel='quiet') if 'invalid type' in cmd['stderr']: raise ValueError('Invalid DNS type {}'.format(rdtype)) elif cmd['retcode'] != 0: log.warning('host returned (%s): %s', cmd['retcode'], cmd['stderr']) return False elif 'has no' in cmd['stdout']: return [] res = [] _stdout = cmd['stdout'] if server is None else cmd['stdout'].split('\n\n')[-1] for line in _stdout.splitlines(): if rdtype != 'CNAME' and 'is an alias' in line: continue line = line.split(' ', 3)[-1] for prefix in ('record', 'address', 'handled by', 'alias for'): if line.startswith(prefix): line = line[len(prefix) + 1:] break res.append(_data_clean(line)) return res
Use host to lookup addresses :param name: Name of record to search :param server: Server to query :param rdtype: DNS record type :param timeout: server response wait :return: [] of records or False if error
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/dns.py#L404-L443
null
# -*- coding: utf-8 -*- ''' Compendium of generic DNS utilities # Examples: dns.lookup(name, rdtype, ...) dns.query(name, rdtype, ...) dns.srv_rec(data) dns.srv_data('my1.example.com', 389, prio=10, weight=100) dns.srv_name('ldap/tcp', 'example.com') ''' from __future__ import absolute_import, print_function, unicode_literals # Import Python libs import base64 import binascii import hashlib import itertools import logging import random import re import shlex import socket import ssl import string import functools # Import Salt libs import salt.utils.files import salt.utils.network import salt.utils.path import salt.utils.stringutils import salt.modules.cmdmod from salt._compat import ipaddress from salt.utils.odict import OrderedDict # Import 3rd-party libs from salt.ext import six from salt.ext.six.moves import zip # pylint: disable=redefined-builtin # Integrations try: import dns.resolver HAS_DNSPYTHON = True except ImportError: HAS_DNSPYTHON = False try: import tldextract HAS_TLDEXTRACT = True except ImportError: HAS_TLDEXTRACT = False HAS_DIG = salt.utils.path.which('dig') is not None DIG_OPTIONS = '+search +fail +noall +answer +nocl +nottl' HAS_DRILL = salt.utils.path.which('drill') is not None HAS_HOST = salt.utils.path.which('host') is not None HAS_NSLOOKUP = salt.utils.path.which('nslookup') is not None __salt__ = { 'cmd.run_all': salt.modules.cmdmod.run_all } log = logging.getLogger(__name__) class RFC(object): ''' Simple holding class for all RFC/IANA registered lists & standards ''' # https://tools.ietf.org/html/rfc6844#section-3 CAA_TAGS = ( 'issue', 'issuewild', 'iodef' ) # http://www.iana.org/assignments/dns-sshfp-rr-parameters/dns-sshfp-rr-parameters.xhtml SSHFP_ALGO = OrderedDict(( (1, 'rsa'), (2, 'dsa'), (3, 'ecdsa'), (4, 'ed25519'), )) SSHFP_HASH = OrderedDict(( (1, 'sha1'), (2, 'sha256'), )) # http://www.iana.org/assignments/dane-parameters/dane-parameters.xhtml TLSA_USAGE = OrderedDict(( (0, 'pkixta'), (1, 'pkixee'), (2, 'daneta'), (3, 'daneee'), )) TLSA_SELECT = OrderedDict(( (0, 'cert'), (1, 'spki'), )) TLSA_MATCHING = OrderedDict(( (0, 'full'), (1, 'sha256'), (2, 'sha512'), )) SRV_PROTO = ( 'tcp', 'udp', 'sctp' ) @staticmethod def validate(lookup, ref, match=None): if lookup in ref: return lookup elif match == 'in': return [code for code, name in ref.items() if lookup in name][-1] else: # OrderedDicts only!(?) return ref.keys()[ref.values().index(lookup)] def _to_port(port): try: port = int(port) assert 1 <= port <= 65535 return port except (ValueError, AssertionError): raise ValueError('Invalid port {0}'.format(port)) def _tree(domain, tld=False): ''' Split out a domain in its parents Leverages tldextract to take the TLDs from publicsuffix.org or makes a valiant approximation of that :param domain: dc2.ams2.example.com :param tld: Include TLD in list :return: [ 'dc2.ams2.example.com', 'ams2.example.com', 'example.com'] ''' domain = domain.rstrip('.') assert '.' in domain, 'Provide a decent domain' if not tld: if HAS_TLDEXTRACT: tld = tldextract.extract(domain).suffix else: tld = re.search(r'((?:(?:ac|biz|com?|info|edu|gov|mil|name|net|n[oi]m|org)\.)?[^.]+)$', domain).group() log.info('Without tldextract, dns.util resolves the TLD of %s to %s', domain, tld) res = [domain] while True: idx = domain.find('.') if idx < 0: break domain = domain[idx + 1:] if domain == tld: break res.append(domain) return res def _weighted_order(recs): res = [] weights = [rec['weight'] for rec in recs] while weights: rnd = random.random() * sum(weights) for i, w in enumerate(weights): rnd -= w if rnd < 0: res.append(recs.pop(i)['name']) weights.pop(i) break return res def _cast(rec_data, rec_cast): if isinstance(rec_cast, dict): rec_data = type(rec_cast.keys()[0])(rec_data) res = rec_cast[rec_data] return res elif isinstance(rec_cast, (list, tuple)): return RFC.validate(rec_data, rec_cast) else: return rec_cast(rec_data) def _data2rec(schema, rec_data): ''' schema = OrderedDict({ 'prio': int, 'weight': int, 'port': to_port, 'name': str, }) rec_data = '10 20 25 myawesome.nl' res = {'prio': 10, 'weight': 20, 'port': 25 'name': 'myawesome.nl'} ''' try: rec_fields = rec_data.split(' ') # spaces in digest fields are allowed assert len(rec_fields) >= len(schema) if len(rec_fields) > len(schema): cutoff = len(schema) - 1 rec_fields = rec_fields[0:cutoff] + [''.join(rec_fields[cutoff:])] if len(schema) == 1: res = _cast(rec_fields[0], next(iter(schema.values()))) else: res = dict(( (field_name, _cast(rec_field, rec_cast)) for (field_name, rec_cast), rec_field in zip(schema.items(), rec_fields) )) return res except (AssertionError, AttributeError, TypeError, ValueError) as e: raise ValueError('Unable to cast "{0}" as "{2}": {1}'.format( rec_data, e, ' '.join(schema.keys()) )) def _data2rec_group(schema, recs_data, group_key): if not isinstance(recs_data, (list, tuple)): recs_data = [recs_data] res = OrderedDict() try: for rdata in recs_data: rdata = _data2rec(schema, rdata) assert rdata and group_key in rdata idx = rdata.pop(group_key) if idx not in res: res[idx] = [] if len(rdata) == 1: rdata = next(iter(rdata.values())) res[idx].append(rdata) return res except (AssertionError, ValueError) as e: raise ValueError('Unable to cast "{0}" as a group of "{1}": {2}'.format( ','.join(recs_data), ' '.join(schema.keys()), e )) def _rec2data(*rdata): return ' '.join(rdata) def _data_clean(data): data = data.strip(string.whitespace) if data.startswith(('"', '\'')) and data.endswith(('"', '\'')): return data[1:-1] else: return data def _lookup_dig(name, rdtype, timeout=None, servers=None, secure=None): ''' Use dig to lookup addresses :param name: Name of record to search :param rdtype: DNS record type :param timeout: server response timeout :param servers: [] of servers to use :return: [] of records or False if error ''' cmd = 'dig {0} -t {1} '.format(DIG_OPTIONS, rdtype) if servers: cmd += ''.join(['@{0} '.format(srv) for srv in servers]) if timeout is not None: if servers: timeout = int(float(timeout) / len(servers)) else: timeout = int(timeout) cmd += '+time={0} '.format(timeout) if secure: cmd += '+dnssec +adflag ' cmd = __salt__['cmd.run_all']('{0} {1}'.format(cmd, name), python_shell=False, output_loglevel='quiet') if 'ignoring invalid type' in cmd['stderr']: raise ValueError('Invalid DNS type {}'.format(rdtype)) elif cmd['retcode'] != 0: log.warning( 'dig returned (%s): %s', cmd['retcode'], cmd['stderr'].strip(string.whitespace + ';') ) return False elif not cmd['stdout']: return [] validated = False res = [] for line in cmd['stdout'].splitlines(): _, rtype, rdata = line.split(None, 2) if rtype == 'CNAME' and rdtype != 'CNAME': continue elif rtype == 'RRSIG': validated = True continue res.append(_data_clean(rdata)) if res and secure and not validated: return False else: return res def _lookup_drill(name, rdtype, timeout=None, servers=None, secure=None): ''' Use drill to lookup addresses :param name: Name of record to search :param rdtype: DNS record type :param timeout: command return timeout :param servers: [] of servers to use :return: [] of records or False if error ''' cmd = 'drill ' if secure: cmd += '-D -o ad ' cmd += '{0} {1} '.format(rdtype, name) if servers: cmd += ''.join(['@{0} '.format(srv) for srv in servers]) cmd = __salt__['cmd.run_all']( cmd, timeout=timeout, python_shell=False, output_loglevel='quiet') if cmd['retcode'] != 0: log.warning('drill returned (%s): %s', cmd['retcode'], cmd['stderr']) return False lookup_res = iter(cmd['stdout'].splitlines()) validated = False res = [] try: line = '' while 'ANSWER SECTION' not in line: line = next(lookup_res) while True: line = next(lookup_res) line = line.strip() if not line or line.startswith(';;'): break l_type, l_rec = line.split(None, 4)[-2:] if l_type == 'CNAME' and rdtype != 'CNAME': continue elif l_type == 'RRSIG': validated = True continue elif l_type != rdtype: raise ValueError('Invalid DNS type {}'.format(rdtype)) res.append(_data_clean(l_rec)) except StopIteration: pass if res and secure and not validated: return False else: return res def _lookup_gai(name, rdtype, timeout=None): ''' Use Python's socket interface to lookup addresses :param name: Name of record to search :param rdtype: A or AAAA :param timeout: ignored :return: [] of addresses or False if error ''' try: sock_t = { 'A': socket.AF_INET, 'AAAA': socket.AF_INET6 }[rdtype] except KeyError: raise ValueError('Invalid DNS type {} for gai lookup'.format(rdtype)) if timeout: log.info('Ignoring timeout on gai resolver; fix resolv.conf to do that') try: addresses = [sock[4][0] for sock in socket.getaddrinfo(name, None, sock_t, 0, socket.SOCK_RAW)] return addresses except socket.gaierror: return False def _lookup_dnspython(name, rdtype, timeout=None, servers=None, secure=None): ''' Use dnspython to lookup addresses :param name: Name of record to search :param rdtype: DNS record type :param timeout: query timeout :param server: [] of server(s) to try in order :return: [] of records or False if error ''' resolver = dns.resolver.Resolver() if timeout is not None: resolver.lifetime = float(timeout) if servers: resolver.nameservers = servers if secure: resolver.ednsflags += dns.flags.DO try: res = [_data_clean(rr.to_text()) for rr in resolver.query(name, rdtype, raise_on_no_answer=False)] return res except dns.rdatatype.UnknownRdatatype: raise ValueError('Invalid DNS type {}'.format(rdtype)) except (dns.resolver.NXDOMAIN, dns.resolver.YXDOMAIN, dns.resolver.NoNameservers, dns.exception.Timeout): return False def _lookup_nslookup(name, rdtype, timeout=None, server=None): ''' Use nslookup to lookup addresses :param name: Name of record to search :param rdtype: DNS record type :param timeout: server response timeout :param server: server to query :return: [] of records or False if error ''' cmd = 'nslookup -query={0} {1}'.format(rdtype, name) if timeout is not None: cmd += ' -timeout={0}'.format(int(timeout)) if server is not None: cmd += ' {0}'.format(server) cmd = __salt__['cmd.run_all'](cmd, python_shell=False, output_loglevel='quiet') if cmd['retcode'] != 0: log.warning( 'nslookup returned (%s): %s', cmd['retcode'], cmd['stdout'].splitlines()[-1].strip(string.whitespace + ';') ) return False lookup_res = iter(cmd['stdout'].splitlines()) res = [] try: line = next(lookup_res) if 'unknown query type' in line: raise ValueError('Invalid DNS type {}'.format(rdtype)) while True: if name in line: break line = next(lookup_res) while True: line = line.strip() if not line or line.startswith('*'): break elif rdtype != 'CNAME' and 'canonical name' in line: name = line.split()[-1][:-1] line = next(lookup_res) continue elif rdtype == 'SOA': line = line.split('=') elif line.startswith('Name:'): line = next(lookup_res) line = line.split(':', 1) elif line.startswith(name): if '=' in line: line = line.split('=', 1) else: line = line.split(' ') res.append(_data_clean(line[-1])) line = next(lookup_res) except StopIteration: pass if rdtype == 'SOA': return [' '.join(res[1:])] else: return res def lookup( name, rdtype, method=None, servers=None, timeout=None, walk=False, walk_tld=False, secure=None ): ''' Lookup DNS records and return their data :param name: name to lookup :param rdtype: DNS record type :param method: gai (getaddrinfo()), dnspython, dig, drill, host, nslookup or auto (default) :param servers: (list of) server(s) to try in-order :param timeout: query timeout or a valiant approximation of that :param walk: Walk the DNS upwards looking for the record type or name/recordtype if walk='name'. :param walk_tld: Include the final domain in the walk :param secure: return only DNSSEC secured responses :return: [] of record data ''' # opts = __opts__.get('dns', {}) opts = {} method = method or opts.get('method', 'auto') secure = secure or opts.get('secure', None) servers = servers or opts.get('servers', None) timeout = timeout or opts.get('timeout', False) rdtype = rdtype.upper() # pylint: disable=bad-whitespace,multiple-spaces-before-keyword query_methods = ( ('gai', _lookup_gai, not any((rdtype not in ('A', 'AAAA'), servers, secure))), ('dnspython', _lookup_dnspython, HAS_DNSPYTHON), ('dig', _lookup_dig, HAS_DIG), ('drill', _lookup_drill, HAS_DRILL), ('host', _lookup_host, HAS_HOST and not secure), ('nslookup', _lookup_nslookup, HAS_NSLOOKUP and not secure), ) # pylint: enable=bad-whitespace,multiple-spaces-before-keyword try: if method == 'auto': # The first one not to bork on the conditions becomes the function method, resolver = next(((rname, rcb) for rname, rcb, rtest in query_methods if rtest)) else: # The first one not to bork on the conditions becomes the function. And the name must match. resolver = next((rcb for rname, rcb, rtest in query_methods if rname == method and rtest)) except StopIteration: log.error( 'Unable to lookup %s/%s: Resolver method %s invalid, unsupported ' 'or unable to perform query', method, rdtype, name ) return False res_kwargs = { 'rdtype': rdtype, } if servers: if not isinstance(servers, (list, tuple)): servers = [servers] if method in ('dnspython', 'dig', 'drill'): res_kwargs['servers'] = servers else: if timeout: timeout /= len(servers) # Inject a wrapper for multi-server behaviour def _multi_srvr(resolv_func): @functools.wraps(resolv_func) def _wrapper(**res_kwargs): for server in servers: s_res = resolv_func(server=server, **res_kwargs) if s_res: return s_res return _wrapper resolver = _multi_srvr(resolver) if not walk: name = [name] else: idx = 0 if rdtype in ('SRV', 'TLSA'): # The only RRs I know that have 2 name components idx = name.find('.') + 1 idx = name.find('.', idx) + 1 domain = name[idx:] rname = name[0:idx] name = _tree(domain, walk_tld) if walk == 'name': name = [rname + domain for domain in name] if timeout: timeout /= len(name) if secure: res_kwargs['secure'] = secure if timeout: res_kwargs['timeout'] = timeout for rname in name: res = resolver(name=rname, **res_kwargs) if res: return res return res def query( name, rdtype, method=None, servers=None, timeout=None, walk=False, walk_tld=False, secure=None ): ''' Query DNS for information. Where `lookup()` returns record data, `query()` tries to interpret the data and return it's results :param name: name to lookup :param rdtype: DNS record type :param method: gai (getaddrinfo()), pydns, dig, drill, host, nslookup or auto (default) :param servers: (list of) server(s) to try in-order :param timeout: query timeout or a valiant approximation of that :param secure: return only DNSSEC secured response :param walk: Walk the DNS upwards looking for the record type or name/recordtype if walk='name'. :param walk_tld: Include the top-level domain in the walk :return: [] of records ''' rdtype = rdtype.upper() qargs = { 'method': method, 'servers': servers, 'timeout': timeout, 'walk': walk, 'walk_tld': walk_tld, 'secure': secure } if rdtype == 'PTR' and not name.endswith('arpa'): name = ptr_name(name) if rdtype == 'SPF': # 'SPF' has become a regular 'TXT' again qres = [answer for answer in lookup(name, 'TXT', **qargs) if answer.startswith('v=spf')] if not qres: qres = lookup(name, rdtype, **qargs) else: qres = lookup(name, rdtype, **qargs) rec_map = { 'A': a_rec, 'AAAA': aaaa_rec, 'CAA': caa_rec, 'MX': mx_rec, 'SOA': soa_rec, 'SPF': spf_rec, 'SRV': srv_rec, 'SSHFP': sshfp_rec, 'TLSA': tlsa_rec, } if not qres or rdtype not in rec_map: return qres elif rdtype in ('A', 'AAAA', 'SSHFP', 'TLSA'): res = [rec_map[rdtype](res) for res in qres] elif rdtype in ('SOA', 'SPF'): res = rec_map[rdtype](qres[0]) else: res = rec_map[rdtype](qres) return res def host(name, ip4=True, ip6=True, **kwargs): ''' Return a list of addresses for name ip6: Return IPv6 addresses ip4: Return IPv4 addresses the rest is passed on to lookup() ''' res = {} if ip6: ip6 = lookup(name, 'AAAA', **kwargs) if ip6: res['ip6'] = ip6 if ip4: ip4 = lookup(name, 'A', **kwargs) if ip4: res['ip4'] = ip4 return res def a_rec(rdata): ''' Validate and parse DNS record data for an A record :param rdata: DNS record data :return: { 'address': ip } ''' rschema = OrderedDict(( ('address', ipaddress.IPv4Address), )) return _data2rec(rschema, rdata) def aaaa_rec(rdata): ''' Validate and parse DNS record data for an AAAA record :param rdata: DNS record data :return: { 'address': ip } ''' rschema = OrderedDict(( ('address', ipaddress.IPv6Address), )) return _data2rec(rschema, rdata) def caa_rec(rdatas): ''' Validate and parse DNS record data for a CAA record :param rdata: DNS record data :return: dict w/fields ''' rschema = OrderedDict(( ('flags', lambda flag: ['critical'] if int(flag) > 0 else []), ('tag', RFC.CAA_TAGS), ('value', lambda val: val.strip('\',"')) )) res = _data2rec_group(rschema, rdatas, 'tag') for tag in ('issue', 'issuewild'): tag_res = res.get(tag, False) if not tag_res: continue for idx, val in enumerate(tag_res): if ';' not in val: continue val, params = val.split(';', 1) params = dict(param.split('=') for param in shlex.split(params)) tag_res[idx] = {val: params} return res def mx_data(target, preference=10): ''' Generate MX record data :param target: server :param preference: preference number :return: DNS record data ''' return _rec2data(int(preference), target) def mx_rec(rdatas): ''' Validate and parse DNS record data for MX record(s) :param rdata: DNS record data :return: dict w/fields ''' rschema = OrderedDict(( ('preference', int), ('name', str), )) return _data2rec_group(rschema, rdatas, 'preference') def ptr_name(rdata): ''' Return PTR name of given IP :param rdata: IP address :return: PTR record name ''' try: return ipaddress.ip_address(rdata).reverse_pointer except ValueError: log.error( 'Unable to generate PTR record; %s is not a valid IP address', rdata ) return False def soa_rec(rdata): ''' Validate and parse DNS record data for SOA record(s) :param rdata: DNS record data :return: dict w/fields ''' rschema = OrderedDict(( ('mname', str), ('rname', str), ('serial', int), ('refresh', int), ('retry', int), ('expire', int), ('minimum', int), )) return _data2rec(rschema, rdata) def spf_rec(rdata): ''' Validate and parse DNS record data for SPF record(s) :param rdata: DNS record data :return: dict w/fields ''' spf_fields = rdata.split(' ') if not spf_fields.pop(0).startswith('v=spf'): raise ValueError('Not an SPF record') res = OrderedDict() mods = set() for mech_spec in spf_fields: if mech_spec.startswith(('exp', 'redirect')): # It's a modifier mod, val = mech_spec.split('=', 1) if mod in mods: raise KeyError('Modifier {0} can only appear once'.format(mod)) mods.add(mod) continue # TODO: Should be in something intelligent like an SPF_get # if mod == 'exp': # res[mod] = lookup(val, 'TXT', **qargs) # continue # elif mod == 'redirect': # return query(val, 'SPF', **qargs) mech = {} if mech_spec[0] in ('+', '-', '~', '?'): mech['qualifier'] = mech_spec[0] mech_spec = mech_spec[1:] if ':' in mech_spec: mech_spec, val = mech_spec.split(':', 1) elif '/' in mech_spec: idx = mech_spec.find('/') mech_spec = mech_spec[0:idx] val = mech_spec[idx:] else: val = None res[mech_spec] = mech if not val: continue elif mech_spec in ('ip4', 'ip6'): val = ipaddress.ip_interface(val) assert val.version == int(mech_spec[-1]) mech['value'] = val return res def srv_data(target, port, prio=10, weight=10): ''' Generate SRV record data :param target: :param port: :param prio: :param weight: :return: ''' return _rec2data(prio, weight, port, target) def srv_name(svc, proto='tcp', domain=None): ''' Generate SRV record name :param svc: ldap, 389 etc :param proto: tcp, udp, sctp etc. :param domain: name to append :return: ''' proto = RFC.validate(proto, RFC.SRV_PROTO) if isinstance(svc, int) or svc.isdigit(): svc = _to_port(svc) if domain: domain = '.' + domain return '_{0}._{1}{2}'.format(svc, proto, domain) def srv_rec(rdatas): ''' Validate and parse DNS record data for SRV record(s) :param rdata: DNS record data :return: dict w/fields ''' rschema = OrderedDict(( ('prio', int), ('weight', int), ('port', _to_port), ('name', str), )) return _data2rec_group(rschema, rdatas, 'prio') def sshfp_data(key_t, hash_t, pub): ''' Generate an SSHFP record :param key_t: rsa/dsa/ecdsa/ed25519 :param hash_t: sha1/sha256 :param pub: the SSH public key ''' key_t = RFC.validate(key_t, RFC.SSHFP_ALGO, 'in') hash_t = RFC.validate(hash_t, RFC.SSHFP_HASH) hasher = hashlib.new(hash_t) hasher.update( base64.b64decode(pub) ) ssh_fp = hasher.hexdigest() return _rec2data(key_t, hash_t, ssh_fp) def sshfp_rec(rdata): ''' Validate and parse DNS record data for TLSA record(s) :param rdata: DNS record data :return: dict w/fields ''' rschema = OrderedDict(( ('algorithm', RFC.SSHFP_ALGO), ('fp_hash', RFC.SSHFP_HASH), ('fingerprint', lambda val: val.lower()) # resolvers are inconsistent on this one )) return _data2rec(rschema, rdata) def tlsa_data(pub, usage, selector, matching): ''' Generate a TLSA rec :param pub: Pub key in PEM format :param usage: :param selector: :param matching: :return: TLSA data portion ''' usage = RFC.validate(usage, RFC.TLSA_USAGE) selector = RFC.validate(selector, RFC.TLSA_SELECT) matching = RFC.validate(matching, RFC.TLSA_MATCHING) pub = ssl.PEM_cert_to_DER_cert(pub.strip()) if matching == 0: cert_fp = binascii.b2a_hex(pub) else: hasher = hashlib.new(RFC.TLSA_MATCHING[matching]) hasher.update( pub ) cert_fp = hasher.hexdigest() return _rec2data(usage, selector, matching, cert_fp) def tlsa_rec(rdata): ''' Validate and parse DNS record data for TLSA record(s) :param rdata: DNS record data :return: dict w/fields ''' rschema = OrderedDict(( ('usage', RFC.TLSA_USAGE), ('selector', RFC.TLSA_SELECT), ('matching', RFC.TLSA_MATCHING), ('pub', str) )) return _data2rec(rschema, rdata) def service( svc, proto='tcp', domain=None, walk=False, secure=None ): ''' Find an SRV service in a domain or it's parents :param svc: service to find (ldap, 389, etc) :param proto: protocol the service talks (tcp, udp, etc) :param domain: domain to start search in :param walk: walk the parents if domain doesn't provide the service :param secure: only return DNSSEC-validated results :return: [ [ prio1server1, prio1server2 ], [ prio2server1, prio2server2 ], ] (the servers will already be weighted according to the SRV rules) ''' qres = query(srv_name(svc, proto, domain), 'SRV', walk=walk, secure=secure) if not qres: return False res = [] for _, recs in qres.items(): res.append(_weighted_order(recs)) return res def services(services_file='/etc/services'): ''' Parse through system-known services :return: { 'svc': [ { 'port': port 'proto': proto, 'desc': comment }, ], } ''' res = {} with salt.utils.files.fopen(services_file, 'r') as svc_defs: for svc_def in svc_defs.readlines(): svc_def = salt.utils.stringutils.to_unicode(svc_def.strip()) if not svc_def or svc_def.startswith('#'): continue elif '#' in svc_def: svc_def, comment = svc_def.split('#', 1) comment = comment.strip() else: comment = None svc_def = svc_def.split() port, proto = svc_def.pop(1).split('/') port = int(port) for name in svc_def: svc_res = res.get(name, {}) pp_res = svc_res.get(port, False) if not pp_res: svc = { 'port': port, 'proto': proto, } if comment: svc['desc'] = comment svc_res[port] = svc else: curr_proto = pp_res['proto'] if isinstance(curr_proto, (list, tuple)): curr_proto.append(proto) else: pp_res['proto'] = [curr_proto, proto] curr_desc = pp_res.get('desc', False) if comment: if not curr_desc: pp_res['desc'] = comment elif comment != curr_desc: pp_res['desc'] = '{0}, {1}'.format(curr_desc, comment) res[name] = svc_res for svc, data in res.items(): if len(data) == 1: res[svc] = data.values().pop() continue else: res[svc] = list(data.values()) return res def parse_resolv(src='/etc/resolv.conf'): ''' Parse a resolver configuration file (traditionally /etc/resolv.conf) ''' nameservers = [] ip4_nameservers = [] ip6_nameservers = [] search = [] sortlist = [] domain = '' options = [] try: with salt.utils.files.fopen(src) as src_file: # pylint: disable=too-many-nested-blocks for line in src_file: line = salt.utils.stringutils.to_unicode(line).strip().split() try: (directive, arg) = (line[0].lower(), line[1:]) # Drop everything after # or ; (comments) arg = list(itertools.takewhile(lambda x: x[0] not in ('#', ';'), arg)) if directive == 'nameserver': addr = arg[0] try: ip_addr = ipaddress.ip_address(addr) version = ip_addr.version ip_addr = str(ip_addr) if ip_addr not in nameservers: nameservers.append(ip_addr) if version == 4 and ip_addr not in ip4_nameservers: ip4_nameservers.append(ip_addr) elif version == 6 and ip_addr not in ip6_nameservers: ip6_nameservers.append(ip_addr) except ValueError as exc: log.error('%s: %s', src, exc) elif directive == 'domain': domain = arg[0] elif directive == 'search': search = arg elif directive == 'sortlist': # A sortlist is specified by IP address netmask pairs. # The netmask is optional and defaults to the natural # netmask of the net. The IP address and optional # network pairs are separated by slashes. for ip_raw in arg: try: ip_net = ipaddress.ip_network(ip_raw) except ValueError as exc: log.error('%s: %s', src, exc) else: if '/' not in ip_raw: # No netmask has been provided, guess # the "natural" one if ip_net.version == 4: ip_addr = six.text_type(ip_net.network_address) # pylint: disable=protected-access mask = salt.utils.network.natural_ipv4_netmask(ip_addr) ip_net = ipaddress.ip_network( '{0}{1}'.format(ip_addr, mask), strict=False ) if ip_net.version == 6: # TODO pass if ip_net not in sortlist: sortlist.append(ip_net) elif directive == 'options': # Options allows certain internal resolver variables to # be modified. if arg[0] not in options: options.append(arg[0]) except IndexError: continue if domain and search: # The domain and search keywords are mutually exclusive. If more # than one instance of these keywords is present, the last instance # will override. log.debug( '%s: The domain and search keywords are mutually exclusive.', src ) return { 'nameservers': nameservers, 'ip4_nameservers': ip4_nameservers, 'ip6_nameservers': ip6_nameservers, 'sortlist': [ip.with_netmask for ip in sortlist], 'domain': domain, 'search': search, 'options': options } except IOError: return {}
saltstack/salt
salt/utils/dns.py
_lookup_dnspython
python
def _lookup_dnspython(name, rdtype, timeout=None, servers=None, secure=None): ''' Use dnspython to lookup addresses :param name: Name of record to search :param rdtype: DNS record type :param timeout: query timeout :param server: [] of server(s) to try in order :return: [] of records or False if error ''' resolver = dns.resolver.Resolver() if timeout is not None: resolver.lifetime = float(timeout) if servers: resolver.nameservers = servers if secure: resolver.ednsflags += dns.flags.DO try: res = [_data_clean(rr.to_text()) for rr in resolver.query(name, rdtype, raise_on_no_answer=False)] return res except dns.rdatatype.UnknownRdatatype: raise ValueError('Invalid DNS type {}'.format(rdtype)) except (dns.resolver.NXDOMAIN, dns.resolver.YXDOMAIN, dns.resolver.NoNameservers, dns.exception.Timeout): return False
Use dnspython to lookup addresses :param name: Name of record to search :param rdtype: DNS record type :param timeout: query timeout :param server: [] of server(s) to try in order :return: [] of records or False if error
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/dns.py#L446-L474
null
# -*- coding: utf-8 -*- ''' Compendium of generic DNS utilities # Examples: dns.lookup(name, rdtype, ...) dns.query(name, rdtype, ...) dns.srv_rec(data) dns.srv_data('my1.example.com', 389, prio=10, weight=100) dns.srv_name('ldap/tcp', 'example.com') ''' from __future__ import absolute_import, print_function, unicode_literals # Import Python libs import base64 import binascii import hashlib import itertools import logging import random import re import shlex import socket import ssl import string import functools # Import Salt libs import salt.utils.files import salt.utils.network import salt.utils.path import salt.utils.stringutils import salt.modules.cmdmod from salt._compat import ipaddress from salt.utils.odict import OrderedDict # Import 3rd-party libs from salt.ext import six from salt.ext.six.moves import zip # pylint: disable=redefined-builtin # Integrations try: import dns.resolver HAS_DNSPYTHON = True except ImportError: HAS_DNSPYTHON = False try: import tldextract HAS_TLDEXTRACT = True except ImportError: HAS_TLDEXTRACT = False HAS_DIG = salt.utils.path.which('dig') is not None DIG_OPTIONS = '+search +fail +noall +answer +nocl +nottl' HAS_DRILL = salt.utils.path.which('drill') is not None HAS_HOST = salt.utils.path.which('host') is not None HAS_NSLOOKUP = salt.utils.path.which('nslookup') is not None __salt__ = { 'cmd.run_all': salt.modules.cmdmod.run_all } log = logging.getLogger(__name__) class RFC(object): ''' Simple holding class for all RFC/IANA registered lists & standards ''' # https://tools.ietf.org/html/rfc6844#section-3 CAA_TAGS = ( 'issue', 'issuewild', 'iodef' ) # http://www.iana.org/assignments/dns-sshfp-rr-parameters/dns-sshfp-rr-parameters.xhtml SSHFP_ALGO = OrderedDict(( (1, 'rsa'), (2, 'dsa'), (3, 'ecdsa'), (4, 'ed25519'), )) SSHFP_HASH = OrderedDict(( (1, 'sha1'), (2, 'sha256'), )) # http://www.iana.org/assignments/dane-parameters/dane-parameters.xhtml TLSA_USAGE = OrderedDict(( (0, 'pkixta'), (1, 'pkixee'), (2, 'daneta'), (3, 'daneee'), )) TLSA_SELECT = OrderedDict(( (0, 'cert'), (1, 'spki'), )) TLSA_MATCHING = OrderedDict(( (0, 'full'), (1, 'sha256'), (2, 'sha512'), )) SRV_PROTO = ( 'tcp', 'udp', 'sctp' ) @staticmethod def validate(lookup, ref, match=None): if lookup in ref: return lookup elif match == 'in': return [code for code, name in ref.items() if lookup in name][-1] else: # OrderedDicts only!(?) return ref.keys()[ref.values().index(lookup)] def _to_port(port): try: port = int(port) assert 1 <= port <= 65535 return port except (ValueError, AssertionError): raise ValueError('Invalid port {0}'.format(port)) def _tree(domain, tld=False): ''' Split out a domain in its parents Leverages tldextract to take the TLDs from publicsuffix.org or makes a valiant approximation of that :param domain: dc2.ams2.example.com :param tld: Include TLD in list :return: [ 'dc2.ams2.example.com', 'ams2.example.com', 'example.com'] ''' domain = domain.rstrip('.') assert '.' in domain, 'Provide a decent domain' if not tld: if HAS_TLDEXTRACT: tld = tldextract.extract(domain).suffix else: tld = re.search(r'((?:(?:ac|biz|com?|info|edu|gov|mil|name|net|n[oi]m|org)\.)?[^.]+)$', domain).group() log.info('Without tldextract, dns.util resolves the TLD of %s to %s', domain, tld) res = [domain] while True: idx = domain.find('.') if idx < 0: break domain = domain[idx + 1:] if domain == tld: break res.append(domain) return res def _weighted_order(recs): res = [] weights = [rec['weight'] for rec in recs] while weights: rnd = random.random() * sum(weights) for i, w in enumerate(weights): rnd -= w if rnd < 0: res.append(recs.pop(i)['name']) weights.pop(i) break return res def _cast(rec_data, rec_cast): if isinstance(rec_cast, dict): rec_data = type(rec_cast.keys()[0])(rec_data) res = rec_cast[rec_data] return res elif isinstance(rec_cast, (list, tuple)): return RFC.validate(rec_data, rec_cast) else: return rec_cast(rec_data) def _data2rec(schema, rec_data): ''' schema = OrderedDict({ 'prio': int, 'weight': int, 'port': to_port, 'name': str, }) rec_data = '10 20 25 myawesome.nl' res = {'prio': 10, 'weight': 20, 'port': 25 'name': 'myawesome.nl'} ''' try: rec_fields = rec_data.split(' ') # spaces in digest fields are allowed assert len(rec_fields) >= len(schema) if len(rec_fields) > len(schema): cutoff = len(schema) - 1 rec_fields = rec_fields[0:cutoff] + [''.join(rec_fields[cutoff:])] if len(schema) == 1: res = _cast(rec_fields[0], next(iter(schema.values()))) else: res = dict(( (field_name, _cast(rec_field, rec_cast)) for (field_name, rec_cast), rec_field in zip(schema.items(), rec_fields) )) return res except (AssertionError, AttributeError, TypeError, ValueError) as e: raise ValueError('Unable to cast "{0}" as "{2}": {1}'.format( rec_data, e, ' '.join(schema.keys()) )) def _data2rec_group(schema, recs_data, group_key): if not isinstance(recs_data, (list, tuple)): recs_data = [recs_data] res = OrderedDict() try: for rdata in recs_data: rdata = _data2rec(schema, rdata) assert rdata and group_key in rdata idx = rdata.pop(group_key) if idx not in res: res[idx] = [] if len(rdata) == 1: rdata = next(iter(rdata.values())) res[idx].append(rdata) return res except (AssertionError, ValueError) as e: raise ValueError('Unable to cast "{0}" as a group of "{1}": {2}'.format( ','.join(recs_data), ' '.join(schema.keys()), e )) def _rec2data(*rdata): return ' '.join(rdata) def _data_clean(data): data = data.strip(string.whitespace) if data.startswith(('"', '\'')) and data.endswith(('"', '\'')): return data[1:-1] else: return data def _lookup_dig(name, rdtype, timeout=None, servers=None, secure=None): ''' Use dig to lookup addresses :param name: Name of record to search :param rdtype: DNS record type :param timeout: server response timeout :param servers: [] of servers to use :return: [] of records or False if error ''' cmd = 'dig {0} -t {1} '.format(DIG_OPTIONS, rdtype) if servers: cmd += ''.join(['@{0} '.format(srv) for srv in servers]) if timeout is not None: if servers: timeout = int(float(timeout) / len(servers)) else: timeout = int(timeout) cmd += '+time={0} '.format(timeout) if secure: cmd += '+dnssec +adflag ' cmd = __salt__['cmd.run_all']('{0} {1}'.format(cmd, name), python_shell=False, output_loglevel='quiet') if 'ignoring invalid type' in cmd['stderr']: raise ValueError('Invalid DNS type {}'.format(rdtype)) elif cmd['retcode'] != 0: log.warning( 'dig returned (%s): %s', cmd['retcode'], cmd['stderr'].strip(string.whitespace + ';') ) return False elif not cmd['stdout']: return [] validated = False res = [] for line in cmd['stdout'].splitlines(): _, rtype, rdata = line.split(None, 2) if rtype == 'CNAME' and rdtype != 'CNAME': continue elif rtype == 'RRSIG': validated = True continue res.append(_data_clean(rdata)) if res and secure and not validated: return False else: return res def _lookup_drill(name, rdtype, timeout=None, servers=None, secure=None): ''' Use drill to lookup addresses :param name: Name of record to search :param rdtype: DNS record type :param timeout: command return timeout :param servers: [] of servers to use :return: [] of records or False if error ''' cmd = 'drill ' if secure: cmd += '-D -o ad ' cmd += '{0} {1} '.format(rdtype, name) if servers: cmd += ''.join(['@{0} '.format(srv) for srv in servers]) cmd = __salt__['cmd.run_all']( cmd, timeout=timeout, python_shell=False, output_loglevel='quiet') if cmd['retcode'] != 0: log.warning('drill returned (%s): %s', cmd['retcode'], cmd['stderr']) return False lookup_res = iter(cmd['stdout'].splitlines()) validated = False res = [] try: line = '' while 'ANSWER SECTION' not in line: line = next(lookup_res) while True: line = next(lookup_res) line = line.strip() if not line or line.startswith(';;'): break l_type, l_rec = line.split(None, 4)[-2:] if l_type == 'CNAME' and rdtype != 'CNAME': continue elif l_type == 'RRSIG': validated = True continue elif l_type != rdtype: raise ValueError('Invalid DNS type {}'.format(rdtype)) res.append(_data_clean(l_rec)) except StopIteration: pass if res and secure and not validated: return False else: return res def _lookup_gai(name, rdtype, timeout=None): ''' Use Python's socket interface to lookup addresses :param name: Name of record to search :param rdtype: A or AAAA :param timeout: ignored :return: [] of addresses or False if error ''' try: sock_t = { 'A': socket.AF_INET, 'AAAA': socket.AF_INET6 }[rdtype] except KeyError: raise ValueError('Invalid DNS type {} for gai lookup'.format(rdtype)) if timeout: log.info('Ignoring timeout on gai resolver; fix resolv.conf to do that') try: addresses = [sock[4][0] for sock in socket.getaddrinfo(name, None, sock_t, 0, socket.SOCK_RAW)] return addresses except socket.gaierror: return False def _lookup_host(name, rdtype, timeout=None, server=None): ''' Use host to lookup addresses :param name: Name of record to search :param server: Server to query :param rdtype: DNS record type :param timeout: server response wait :return: [] of records or False if error ''' cmd = 'host -t {0} '.format(rdtype) if timeout: cmd += '-W {0} '.format(int(timeout)) cmd += name if server is not None: cmd += ' {0}'.format(server) cmd = __salt__['cmd.run_all'](cmd, python_shell=False, output_loglevel='quiet') if 'invalid type' in cmd['stderr']: raise ValueError('Invalid DNS type {}'.format(rdtype)) elif cmd['retcode'] != 0: log.warning('host returned (%s): %s', cmd['retcode'], cmd['stderr']) return False elif 'has no' in cmd['stdout']: return [] res = [] _stdout = cmd['stdout'] if server is None else cmd['stdout'].split('\n\n')[-1] for line in _stdout.splitlines(): if rdtype != 'CNAME' and 'is an alias' in line: continue line = line.split(' ', 3)[-1] for prefix in ('record', 'address', 'handled by', 'alias for'): if line.startswith(prefix): line = line[len(prefix) + 1:] break res.append(_data_clean(line)) return res def _lookup_nslookup(name, rdtype, timeout=None, server=None): ''' Use nslookup to lookup addresses :param name: Name of record to search :param rdtype: DNS record type :param timeout: server response timeout :param server: server to query :return: [] of records or False if error ''' cmd = 'nslookup -query={0} {1}'.format(rdtype, name) if timeout is not None: cmd += ' -timeout={0}'.format(int(timeout)) if server is not None: cmd += ' {0}'.format(server) cmd = __salt__['cmd.run_all'](cmd, python_shell=False, output_loglevel='quiet') if cmd['retcode'] != 0: log.warning( 'nslookup returned (%s): %s', cmd['retcode'], cmd['stdout'].splitlines()[-1].strip(string.whitespace + ';') ) return False lookup_res = iter(cmd['stdout'].splitlines()) res = [] try: line = next(lookup_res) if 'unknown query type' in line: raise ValueError('Invalid DNS type {}'.format(rdtype)) while True: if name in line: break line = next(lookup_res) while True: line = line.strip() if not line or line.startswith('*'): break elif rdtype != 'CNAME' and 'canonical name' in line: name = line.split()[-1][:-1] line = next(lookup_res) continue elif rdtype == 'SOA': line = line.split('=') elif line.startswith('Name:'): line = next(lookup_res) line = line.split(':', 1) elif line.startswith(name): if '=' in line: line = line.split('=', 1) else: line = line.split(' ') res.append(_data_clean(line[-1])) line = next(lookup_res) except StopIteration: pass if rdtype == 'SOA': return [' '.join(res[1:])] else: return res def lookup( name, rdtype, method=None, servers=None, timeout=None, walk=False, walk_tld=False, secure=None ): ''' Lookup DNS records and return their data :param name: name to lookup :param rdtype: DNS record type :param method: gai (getaddrinfo()), dnspython, dig, drill, host, nslookup or auto (default) :param servers: (list of) server(s) to try in-order :param timeout: query timeout or a valiant approximation of that :param walk: Walk the DNS upwards looking for the record type or name/recordtype if walk='name'. :param walk_tld: Include the final domain in the walk :param secure: return only DNSSEC secured responses :return: [] of record data ''' # opts = __opts__.get('dns', {}) opts = {} method = method or opts.get('method', 'auto') secure = secure or opts.get('secure', None) servers = servers or opts.get('servers', None) timeout = timeout or opts.get('timeout', False) rdtype = rdtype.upper() # pylint: disable=bad-whitespace,multiple-spaces-before-keyword query_methods = ( ('gai', _lookup_gai, not any((rdtype not in ('A', 'AAAA'), servers, secure))), ('dnspython', _lookup_dnspython, HAS_DNSPYTHON), ('dig', _lookup_dig, HAS_DIG), ('drill', _lookup_drill, HAS_DRILL), ('host', _lookup_host, HAS_HOST and not secure), ('nslookup', _lookup_nslookup, HAS_NSLOOKUP and not secure), ) # pylint: enable=bad-whitespace,multiple-spaces-before-keyword try: if method == 'auto': # The first one not to bork on the conditions becomes the function method, resolver = next(((rname, rcb) for rname, rcb, rtest in query_methods if rtest)) else: # The first one not to bork on the conditions becomes the function. And the name must match. resolver = next((rcb for rname, rcb, rtest in query_methods if rname == method and rtest)) except StopIteration: log.error( 'Unable to lookup %s/%s: Resolver method %s invalid, unsupported ' 'or unable to perform query', method, rdtype, name ) return False res_kwargs = { 'rdtype': rdtype, } if servers: if not isinstance(servers, (list, tuple)): servers = [servers] if method in ('dnspython', 'dig', 'drill'): res_kwargs['servers'] = servers else: if timeout: timeout /= len(servers) # Inject a wrapper for multi-server behaviour def _multi_srvr(resolv_func): @functools.wraps(resolv_func) def _wrapper(**res_kwargs): for server in servers: s_res = resolv_func(server=server, **res_kwargs) if s_res: return s_res return _wrapper resolver = _multi_srvr(resolver) if not walk: name = [name] else: idx = 0 if rdtype in ('SRV', 'TLSA'): # The only RRs I know that have 2 name components idx = name.find('.') + 1 idx = name.find('.', idx) + 1 domain = name[idx:] rname = name[0:idx] name = _tree(domain, walk_tld) if walk == 'name': name = [rname + domain for domain in name] if timeout: timeout /= len(name) if secure: res_kwargs['secure'] = secure if timeout: res_kwargs['timeout'] = timeout for rname in name: res = resolver(name=rname, **res_kwargs) if res: return res return res def query( name, rdtype, method=None, servers=None, timeout=None, walk=False, walk_tld=False, secure=None ): ''' Query DNS for information. Where `lookup()` returns record data, `query()` tries to interpret the data and return it's results :param name: name to lookup :param rdtype: DNS record type :param method: gai (getaddrinfo()), pydns, dig, drill, host, nslookup or auto (default) :param servers: (list of) server(s) to try in-order :param timeout: query timeout or a valiant approximation of that :param secure: return only DNSSEC secured response :param walk: Walk the DNS upwards looking for the record type or name/recordtype if walk='name'. :param walk_tld: Include the top-level domain in the walk :return: [] of records ''' rdtype = rdtype.upper() qargs = { 'method': method, 'servers': servers, 'timeout': timeout, 'walk': walk, 'walk_tld': walk_tld, 'secure': secure } if rdtype == 'PTR' and not name.endswith('arpa'): name = ptr_name(name) if rdtype == 'SPF': # 'SPF' has become a regular 'TXT' again qres = [answer for answer in lookup(name, 'TXT', **qargs) if answer.startswith('v=spf')] if not qres: qres = lookup(name, rdtype, **qargs) else: qres = lookup(name, rdtype, **qargs) rec_map = { 'A': a_rec, 'AAAA': aaaa_rec, 'CAA': caa_rec, 'MX': mx_rec, 'SOA': soa_rec, 'SPF': spf_rec, 'SRV': srv_rec, 'SSHFP': sshfp_rec, 'TLSA': tlsa_rec, } if not qres or rdtype not in rec_map: return qres elif rdtype in ('A', 'AAAA', 'SSHFP', 'TLSA'): res = [rec_map[rdtype](res) for res in qres] elif rdtype in ('SOA', 'SPF'): res = rec_map[rdtype](qres[0]) else: res = rec_map[rdtype](qres) return res def host(name, ip4=True, ip6=True, **kwargs): ''' Return a list of addresses for name ip6: Return IPv6 addresses ip4: Return IPv4 addresses the rest is passed on to lookup() ''' res = {} if ip6: ip6 = lookup(name, 'AAAA', **kwargs) if ip6: res['ip6'] = ip6 if ip4: ip4 = lookup(name, 'A', **kwargs) if ip4: res['ip4'] = ip4 return res def a_rec(rdata): ''' Validate and parse DNS record data for an A record :param rdata: DNS record data :return: { 'address': ip } ''' rschema = OrderedDict(( ('address', ipaddress.IPv4Address), )) return _data2rec(rschema, rdata) def aaaa_rec(rdata): ''' Validate and parse DNS record data for an AAAA record :param rdata: DNS record data :return: { 'address': ip } ''' rschema = OrderedDict(( ('address', ipaddress.IPv6Address), )) return _data2rec(rschema, rdata) def caa_rec(rdatas): ''' Validate and parse DNS record data for a CAA record :param rdata: DNS record data :return: dict w/fields ''' rschema = OrderedDict(( ('flags', lambda flag: ['critical'] if int(flag) > 0 else []), ('tag', RFC.CAA_TAGS), ('value', lambda val: val.strip('\',"')) )) res = _data2rec_group(rschema, rdatas, 'tag') for tag in ('issue', 'issuewild'): tag_res = res.get(tag, False) if not tag_res: continue for idx, val in enumerate(tag_res): if ';' not in val: continue val, params = val.split(';', 1) params = dict(param.split('=') for param in shlex.split(params)) tag_res[idx] = {val: params} return res def mx_data(target, preference=10): ''' Generate MX record data :param target: server :param preference: preference number :return: DNS record data ''' return _rec2data(int(preference), target) def mx_rec(rdatas): ''' Validate and parse DNS record data for MX record(s) :param rdata: DNS record data :return: dict w/fields ''' rschema = OrderedDict(( ('preference', int), ('name', str), )) return _data2rec_group(rschema, rdatas, 'preference') def ptr_name(rdata): ''' Return PTR name of given IP :param rdata: IP address :return: PTR record name ''' try: return ipaddress.ip_address(rdata).reverse_pointer except ValueError: log.error( 'Unable to generate PTR record; %s is not a valid IP address', rdata ) return False def soa_rec(rdata): ''' Validate and parse DNS record data for SOA record(s) :param rdata: DNS record data :return: dict w/fields ''' rschema = OrderedDict(( ('mname', str), ('rname', str), ('serial', int), ('refresh', int), ('retry', int), ('expire', int), ('minimum', int), )) return _data2rec(rschema, rdata) def spf_rec(rdata): ''' Validate and parse DNS record data for SPF record(s) :param rdata: DNS record data :return: dict w/fields ''' spf_fields = rdata.split(' ') if not spf_fields.pop(0).startswith('v=spf'): raise ValueError('Not an SPF record') res = OrderedDict() mods = set() for mech_spec in spf_fields: if mech_spec.startswith(('exp', 'redirect')): # It's a modifier mod, val = mech_spec.split('=', 1) if mod in mods: raise KeyError('Modifier {0} can only appear once'.format(mod)) mods.add(mod) continue # TODO: Should be in something intelligent like an SPF_get # if mod == 'exp': # res[mod] = lookup(val, 'TXT', **qargs) # continue # elif mod == 'redirect': # return query(val, 'SPF', **qargs) mech = {} if mech_spec[0] in ('+', '-', '~', '?'): mech['qualifier'] = mech_spec[0] mech_spec = mech_spec[1:] if ':' in mech_spec: mech_spec, val = mech_spec.split(':', 1) elif '/' in mech_spec: idx = mech_spec.find('/') mech_spec = mech_spec[0:idx] val = mech_spec[idx:] else: val = None res[mech_spec] = mech if not val: continue elif mech_spec in ('ip4', 'ip6'): val = ipaddress.ip_interface(val) assert val.version == int(mech_spec[-1]) mech['value'] = val return res def srv_data(target, port, prio=10, weight=10): ''' Generate SRV record data :param target: :param port: :param prio: :param weight: :return: ''' return _rec2data(prio, weight, port, target) def srv_name(svc, proto='tcp', domain=None): ''' Generate SRV record name :param svc: ldap, 389 etc :param proto: tcp, udp, sctp etc. :param domain: name to append :return: ''' proto = RFC.validate(proto, RFC.SRV_PROTO) if isinstance(svc, int) or svc.isdigit(): svc = _to_port(svc) if domain: domain = '.' + domain return '_{0}._{1}{2}'.format(svc, proto, domain) def srv_rec(rdatas): ''' Validate and parse DNS record data for SRV record(s) :param rdata: DNS record data :return: dict w/fields ''' rschema = OrderedDict(( ('prio', int), ('weight', int), ('port', _to_port), ('name', str), )) return _data2rec_group(rschema, rdatas, 'prio') def sshfp_data(key_t, hash_t, pub): ''' Generate an SSHFP record :param key_t: rsa/dsa/ecdsa/ed25519 :param hash_t: sha1/sha256 :param pub: the SSH public key ''' key_t = RFC.validate(key_t, RFC.SSHFP_ALGO, 'in') hash_t = RFC.validate(hash_t, RFC.SSHFP_HASH) hasher = hashlib.new(hash_t) hasher.update( base64.b64decode(pub) ) ssh_fp = hasher.hexdigest() return _rec2data(key_t, hash_t, ssh_fp) def sshfp_rec(rdata): ''' Validate and parse DNS record data for TLSA record(s) :param rdata: DNS record data :return: dict w/fields ''' rschema = OrderedDict(( ('algorithm', RFC.SSHFP_ALGO), ('fp_hash', RFC.SSHFP_HASH), ('fingerprint', lambda val: val.lower()) # resolvers are inconsistent on this one )) return _data2rec(rschema, rdata) def tlsa_data(pub, usage, selector, matching): ''' Generate a TLSA rec :param pub: Pub key in PEM format :param usage: :param selector: :param matching: :return: TLSA data portion ''' usage = RFC.validate(usage, RFC.TLSA_USAGE) selector = RFC.validate(selector, RFC.TLSA_SELECT) matching = RFC.validate(matching, RFC.TLSA_MATCHING) pub = ssl.PEM_cert_to_DER_cert(pub.strip()) if matching == 0: cert_fp = binascii.b2a_hex(pub) else: hasher = hashlib.new(RFC.TLSA_MATCHING[matching]) hasher.update( pub ) cert_fp = hasher.hexdigest() return _rec2data(usage, selector, matching, cert_fp) def tlsa_rec(rdata): ''' Validate and parse DNS record data for TLSA record(s) :param rdata: DNS record data :return: dict w/fields ''' rschema = OrderedDict(( ('usage', RFC.TLSA_USAGE), ('selector', RFC.TLSA_SELECT), ('matching', RFC.TLSA_MATCHING), ('pub', str) )) return _data2rec(rschema, rdata) def service( svc, proto='tcp', domain=None, walk=False, secure=None ): ''' Find an SRV service in a domain or it's parents :param svc: service to find (ldap, 389, etc) :param proto: protocol the service talks (tcp, udp, etc) :param domain: domain to start search in :param walk: walk the parents if domain doesn't provide the service :param secure: only return DNSSEC-validated results :return: [ [ prio1server1, prio1server2 ], [ prio2server1, prio2server2 ], ] (the servers will already be weighted according to the SRV rules) ''' qres = query(srv_name(svc, proto, domain), 'SRV', walk=walk, secure=secure) if not qres: return False res = [] for _, recs in qres.items(): res.append(_weighted_order(recs)) return res def services(services_file='/etc/services'): ''' Parse through system-known services :return: { 'svc': [ { 'port': port 'proto': proto, 'desc': comment }, ], } ''' res = {} with salt.utils.files.fopen(services_file, 'r') as svc_defs: for svc_def in svc_defs.readlines(): svc_def = salt.utils.stringutils.to_unicode(svc_def.strip()) if not svc_def or svc_def.startswith('#'): continue elif '#' in svc_def: svc_def, comment = svc_def.split('#', 1) comment = comment.strip() else: comment = None svc_def = svc_def.split() port, proto = svc_def.pop(1).split('/') port = int(port) for name in svc_def: svc_res = res.get(name, {}) pp_res = svc_res.get(port, False) if not pp_res: svc = { 'port': port, 'proto': proto, } if comment: svc['desc'] = comment svc_res[port] = svc else: curr_proto = pp_res['proto'] if isinstance(curr_proto, (list, tuple)): curr_proto.append(proto) else: pp_res['proto'] = [curr_proto, proto] curr_desc = pp_res.get('desc', False) if comment: if not curr_desc: pp_res['desc'] = comment elif comment != curr_desc: pp_res['desc'] = '{0}, {1}'.format(curr_desc, comment) res[name] = svc_res for svc, data in res.items(): if len(data) == 1: res[svc] = data.values().pop() continue else: res[svc] = list(data.values()) return res def parse_resolv(src='/etc/resolv.conf'): ''' Parse a resolver configuration file (traditionally /etc/resolv.conf) ''' nameservers = [] ip4_nameservers = [] ip6_nameservers = [] search = [] sortlist = [] domain = '' options = [] try: with salt.utils.files.fopen(src) as src_file: # pylint: disable=too-many-nested-blocks for line in src_file: line = salt.utils.stringutils.to_unicode(line).strip().split() try: (directive, arg) = (line[0].lower(), line[1:]) # Drop everything after # or ; (comments) arg = list(itertools.takewhile(lambda x: x[0] not in ('#', ';'), arg)) if directive == 'nameserver': addr = arg[0] try: ip_addr = ipaddress.ip_address(addr) version = ip_addr.version ip_addr = str(ip_addr) if ip_addr not in nameservers: nameservers.append(ip_addr) if version == 4 and ip_addr not in ip4_nameservers: ip4_nameservers.append(ip_addr) elif version == 6 and ip_addr not in ip6_nameservers: ip6_nameservers.append(ip_addr) except ValueError as exc: log.error('%s: %s', src, exc) elif directive == 'domain': domain = arg[0] elif directive == 'search': search = arg elif directive == 'sortlist': # A sortlist is specified by IP address netmask pairs. # The netmask is optional and defaults to the natural # netmask of the net. The IP address and optional # network pairs are separated by slashes. for ip_raw in arg: try: ip_net = ipaddress.ip_network(ip_raw) except ValueError as exc: log.error('%s: %s', src, exc) else: if '/' not in ip_raw: # No netmask has been provided, guess # the "natural" one if ip_net.version == 4: ip_addr = six.text_type(ip_net.network_address) # pylint: disable=protected-access mask = salt.utils.network.natural_ipv4_netmask(ip_addr) ip_net = ipaddress.ip_network( '{0}{1}'.format(ip_addr, mask), strict=False ) if ip_net.version == 6: # TODO pass if ip_net not in sortlist: sortlist.append(ip_net) elif directive == 'options': # Options allows certain internal resolver variables to # be modified. if arg[0] not in options: options.append(arg[0]) except IndexError: continue if domain and search: # The domain and search keywords are mutually exclusive. If more # than one instance of these keywords is present, the last instance # will override. log.debug( '%s: The domain and search keywords are mutually exclusive.', src ) return { 'nameservers': nameservers, 'ip4_nameservers': ip4_nameservers, 'ip6_nameservers': ip6_nameservers, 'sortlist': [ip.with_netmask for ip in sortlist], 'domain': domain, 'search': search, 'options': options } except IOError: return {}
saltstack/salt
salt/utils/dns.py
_lookup_nslookup
python
def _lookup_nslookup(name, rdtype, timeout=None, server=None): ''' Use nslookup to lookup addresses :param name: Name of record to search :param rdtype: DNS record type :param timeout: server response timeout :param server: server to query :return: [] of records or False if error ''' cmd = 'nslookup -query={0} {1}'.format(rdtype, name) if timeout is not None: cmd += ' -timeout={0}'.format(int(timeout)) if server is not None: cmd += ' {0}'.format(server) cmd = __salt__['cmd.run_all'](cmd, python_shell=False, output_loglevel='quiet') if cmd['retcode'] != 0: log.warning( 'nslookup returned (%s): %s', cmd['retcode'], cmd['stdout'].splitlines()[-1].strip(string.whitespace + ';') ) return False lookup_res = iter(cmd['stdout'].splitlines()) res = [] try: line = next(lookup_res) if 'unknown query type' in line: raise ValueError('Invalid DNS type {}'.format(rdtype)) while True: if name in line: break line = next(lookup_res) while True: line = line.strip() if not line or line.startswith('*'): break elif rdtype != 'CNAME' and 'canonical name' in line: name = line.split()[-1][:-1] line = next(lookup_res) continue elif rdtype == 'SOA': line = line.split('=') elif line.startswith('Name:'): line = next(lookup_res) line = line.split(':', 1) elif line.startswith(name): if '=' in line: line = line.split('=', 1) else: line = line.split(' ') res.append(_data_clean(line[-1])) line = next(lookup_res) except StopIteration: pass if rdtype == 'SOA': return [' '.join(res[1:])] else: return res
Use nslookup to lookup addresses :param name: Name of record to search :param rdtype: DNS record type :param timeout: server response timeout :param server: server to query :return: [] of records or False if error
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/dns.py#L477-L543
null
# -*- coding: utf-8 -*- ''' Compendium of generic DNS utilities # Examples: dns.lookup(name, rdtype, ...) dns.query(name, rdtype, ...) dns.srv_rec(data) dns.srv_data('my1.example.com', 389, prio=10, weight=100) dns.srv_name('ldap/tcp', 'example.com') ''' from __future__ import absolute_import, print_function, unicode_literals # Import Python libs import base64 import binascii import hashlib import itertools import logging import random import re import shlex import socket import ssl import string import functools # Import Salt libs import salt.utils.files import salt.utils.network import salt.utils.path import salt.utils.stringutils import salt.modules.cmdmod from salt._compat import ipaddress from salt.utils.odict import OrderedDict # Import 3rd-party libs from salt.ext import six from salt.ext.six.moves import zip # pylint: disable=redefined-builtin # Integrations try: import dns.resolver HAS_DNSPYTHON = True except ImportError: HAS_DNSPYTHON = False try: import tldextract HAS_TLDEXTRACT = True except ImportError: HAS_TLDEXTRACT = False HAS_DIG = salt.utils.path.which('dig') is not None DIG_OPTIONS = '+search +fail +noall +answer +nocl +nottl' HAS_DRILL = salt.utils.path.which('drill') is not None HAS_HOST = salt.utils.path.which('host') is not None HAS_NSLOOKUP = salt.utils.path.which('nslookup') is not None __salt__ = { 'cmd.run_all': salt.modules.cmdmod.run_all } log = logging.getLogger(__name__) class RFC(object): ''' Simple holding class for all RFC/IANA registered lists & standards ''' # https://tools.ietf.org/html/rfc6844#section-3 CAA_TAGS = ( 'issue', 'issuewild', 'iodef' ) # http://www.iana.org/assignments/dns-sshfp-rr-parameters/dns-sshfp-rr-parameters.xhtml SSHFP_ALGO = OrderedDict(( (1, 'rsa'), (2, 'dsa'), (3, 'ecdsa'), (4, 'ed25519'), )) SSHFP_HASH = OrderedDict(( (1, 'sha1'), (2, 'sha256'), )) # http://www.iana.org/assignments/dane-parameters/dane-parameters.xhtml TLSA_USAGE = OrderedDict(( (0, 'pkixta'), (1, 'pkixee'), (2, 'daneta'), (3, 'daneee'), )) TLSA_SELECT = OrderedDict(( (0, 'cert'), (1, 'spki'), )) TLSA_MATCHING = OrderedDict(( (0, 'full'), (1, 'sha256'), (2, 'sha512'), )) SRV_PROTO = ( 'tcp', 'udp', 'sctp' ) @staticmethod def validate(lookup, ref, match=None): if lookup in ref: return lookup elif match == 'in': return [code for code, name in ref.items() if lookup in name][-1] else: # OrderedDicts only!(?) return ref.keys()[ref.values().index(lookup)] def _to_port(port): try: port = int(port) assert 1 <= port <= 65535 return port except (ValueError, AssertionError): raise ValueError('Invalid port {0}'.format(port)) def _tree(domain, tld=False): ''' Split out a domain in its parents Leverages tldextract to take the TLDs from publicsuffix.org or makes a valiant approximation of that :param domain: dc2.ams2.example.com :param tld: Include TLD in list :return: [ 'dc2.ams2.example.com', 'ams2.example.com', 'example.com'] ''' domain = domain.rstrip('.') assert '.' in domain, 'Provide a decent domain' if not tld: if HAS_TLDEXTRACT: tld = tldextract.extract(domain).suffix else: tld = re.search(r'((?:(?:ac|biz|com?|info|edu|gov|mil|name|net|n[oi]m|org)\.)?[^.]+)$', domain).group() log.info('Without tldextract, dns.util resolves the TLD of %s to %s', domain, tld) res = [domain] while True: idx = domain.find('.') if idx < 0: break domain = domain[idx + 1:] if domain == tld: break res.append(domain) return res def _weighted_order(recs): res = [] weights = [rec['weight'] for rec in recs] while weights: rnd = random.random() * sum(weights) for i, w in enumerate(weights): rnd -= w if rnd < 0: res.append(recs.pop(i)['name']) weights.pop(i) break return res def _cast(rec_data, rec_cast): if isinstance(rec_cast, dict): rec_data = type(rec_cast.keys()[0])(rec_data) res = rec_cast[rec_data] return res elif isinstance(rec_cast, (list, tuple)): return RFC.validate(rec_data, rec_cast) else: return rec_cast(rec_data) def _data2rec(schema, rec_data): ''' schema = OrderedDict({ 'prio': int, 'weight': int, 'port': to_port, 'name': str, }) rec_data = '10 20 25 myawesome.nl' res = {'prio': 10, 'weight': 20, 'port': 25 'name': 'myawesome.nl'} ''' try: rec_fields = rec_data.split(' ') # spaces in digest fields are allowed assert len(rec_fields) >= len(schema) if len(rec_fields) > len(schema): cutoff = len(schema) - 1 rec_fields = rec_fields[0:cutoff] + [''.join(rec_fields[cutoff:])] if len(schema) == 1: res = _cast(rec_fields[0], next(iter(schema.values()))) else: res = dict(( (field_name, _cast(rec_field, rec_cast)) for (field_name, rec_cast), rec_field in zip(schema.items(), rec_fields) )) return res except (AssertionError, AttributeError, TypeError, ValueError) as e: raise ValueError('Unable to cast "{0}" as "{2}": {1}'.format( rec_data, e, ' '.join(schema.keys()) )) def _data2rec_group(schema, recs_data, group_key): if not isinstance(recs_data, (list, tuple)): recs_data = [recs_data] res = OrderedDict() try: for rdata in recs_data: rdata = _data2rec(schema, rdata) assert rdata and group_key in rdata idx = rdata.pop(group_key) if idx not in res: res[idx] = [] if len(rdata) == 1: rdata = next(iter(rdata.values())) res[idx].append(rdata) return res except (AssertionError, ValueError) as e: raise ValueError('Unable to cast "{0}" as a group of "{1}": {2}'.format( ','.join(recs_data), ' '.join(schema.keys()), e )) def _rec2data(*rdata): return ' '.join(rdata) def _data_clean(data): data = data.strip(string.whitespace) if data.startswith(('"', '\'')) and data.endswith(('"', '\'')): return data[1:-1] else: return data def _lookup_dig(name, rdtype, timeout=None, servers=None, secure=None): ''' Use dig to lookup addresses :param name: Name of record to search :param rdtype: DNS record type :param timeout: server response timeout :param servers: [] of servers to use :return: [] of records or False if error ''' cmd = 'dig {0} -t {1} '.format(DIG_OPTIONS, rdtype) if servers: cmd += ''.join(['@{0} '.format(srv) for srv in servers]) if timeout is not None: if servers: timeout = int(float(timeout) / len(servers)) else: timeout = int(timeout) cmd += '+time={0} '.format(timeout) if secure: cmd += '+dnssec +adflag ' cmd = __salt__['cmd.run_all']('{0} {1}'.format(cmd, name), python_shell=False, output_loglevel='quiet') if 'ignoring invalid type' in cmd['stderr']: raise ValueError('Invalid DNS type {}'.format(rdtype)) elif cmd['retcode'] != 0: log.warning( 'dig returned (%s): %s', cmd['retcode'], cmd['stderr'].strip(string.whitespace + ';') ) return False elif not cmd['stdout']: return [] validated = False res = [] for line in cmd['stdout'].splitlines(): _, rtype, rdata = line.split(None, 2) if rtype == 'CNAME' and rdtype != 'CNAME': continue elif rtype == 'RRSIG': validated = True continue res.append(_data_clean(rdata)) if res and secure and not validated: return False else: return res def _lookup_drill(name, rdtype, timeout=None, servers=None, secure=None): ''' Use drill to lookup addresses :param name: Name of record to search :param rdtype: DNS record type :param timeout: command return timeout :param servers: [] of servers to use :return: [] of records or False if error ''' cmd = 'drill ' if secure: cmd += '-D -o ad ' cmd += '{0} {1} '.format(rdtype, name) if servers: cmd += ''.join(['@{0} '.format(srv) for srv in servers]) cmd = __salt__['cmd.run_all']( cmd, timeout=timeout, python_shell=False, output_loglevel='quiet') if cmd['retcode'] != 0: log.warning('drill returned (%s): %s', cmd['retcode'], cmd['stderr']) return False lookup_res = iter(cmd['stdout'].splitlines()) validated = False res = [] try: line = '' while 'ANSWER SECTION' not in line: line = next(lookup_res) while True: line = next(lookup_res) line = line.strip() if not line or line.startswith(';;'): break l_type, l_rec = line.split(None, 4)[-2:] if l_type == 'CNAME' and rdtype != 'CNAME': continue elif l_type == 'RRSIG': validated = True continue elif l_type != rdtype: raise ValueError('Invalid DNS type {}'.format(rdtype)) res.append(_data_clean(l_rec)) except StopIteration: pass if res and secure and not validated: return False else: return res def _lookup_gai(name, rdtype, timeout=None): ''' Use Python's socket interface to lookup addresses :param name: Name of record to search :param rdtype: A or AAAA :param timeout: ignored :return: [] of addresses or False if error ''' try: sock_t = { 'A': socket.AF_INET, 'AAAA': socket.AF_INET6 }[rdtype] except KeyError: raise ValueError('Invalid DNS type {} for gai lookup'.format(rdtype)) if timeout: log.info('Ignoring timeout on gai resolver; fix resolv.conf to do that') try: addresses = [sock[4][0] for sock in socket.getaddrinfo(name, None, sock_t, 0, socket.SOCK_RAW)] return addresses except socket.gaierror: return False def _lookup_host(name, rdtype, timeout=None, server=None): ''' Use host to lookup addresses :param name: Name of record to search :param server: Server to query :param rdtype: DNS record type :param timeout: server response wait :return: [] of records or False if error ''' cmd = 'host -t {0} '.format(rdtype) if timeout: cmd += '-W {0} '.format(int(timeout)) cmd += name if server is not None: cmd += ' {0}'.format(server) cmd = __salt__['cmd.run_all'](cmd, python_shell=False, output_loglevel='quiet') if 'invalid type' in cmd['stderr']: raise ValueError('Invalid DNS type {}'.format(rdtype)) elif cmd['retcode'] != 0: log.warning('host returned (%s): %s', cmd['retcode'], cmd['stderr']) return False elif 'has no' in cmd['stdout']: return [] res = [] _stdout = cmd['stdout'] if server is None else cmd['stdout'].split('\n\n')[-1] for line in _stdout.splitlines(): if rdtype != 'CNAME' and 'is an alias' in line: continue line = line.split(' ', 3)[-1] for prefix in ('record', 'address', 'handled by', 'alias for'): if line.startswith(prefix): line = line[len(prefix) + 1:] break res.append(_data_clean(line)) return res def _lookup_dnspython(name, rdtype, timeout=None, servers=None, secure=None): ''' Use dnspython to lookup addresses :param name: Name of record to search :param rdtype: DNS record type :param timeout: query timeout :param server: [] of server(s) to try in order :return: [] of records or False if error ''' resolver = dns.resolver.Resolver() if timeout is not None: resolver.lifetime = float(timeout) if servers: resolver.nameservers = servers if secure: resolver.ednsflags += dns.flags.DO try: res = [_data_clean(rr.to_text()) for rr in resolver.query(name, rdtype, raise_on_no_answer=False)] return res except dns.rdatatype.UnknownRdatatype: raise ValueError('Invalid DNS type {}'.format(rdtype)) except (dns.resolver.NXDOMAIN, dns.resolver.YXDOMAIN, dns.resolver.NoNameservers, dns.exception.Timeout): return False def lookup( name, rdtype, method=None, servers=None, timeout=None, walk=False, walk_tld=False, secure=None ): ''' Lookup DNS records and return their data :param name: name to lookup :param rdtype: DNS record type :param method: gai (getaddrinfo()), dnspython, dig, drill, host, nslookup or auto (default) :param servers: (list of) server(s) to try in-order :param timeout: query timeout or a valiant approximation of that :param walk: Walk the DNS upwards looking for the record type or name/recordtype if walk='name'. :param walk_tld: Include the final domain in the walk :param secure: return only DNSSEC secured responses :return: [] of record data ''' # opts = __opts__.get('dns', {}) opts = {} method = method or opts.get('method', 'auto') secure = secure or opts.get('secure', None) servers = servers or opts.get('servers', None) timeout = timeout or opts.get('timeout', False) rdtype = rdtype.upper() # pylint: disable=bad-whitespace,multiple-spaces-before-keyword query_methods = ( ('gai', _lookup_gai, not any((rdtype not in ('A', 'AAAA'), servers, secure))), ('dnspython', _lookup_dnspython, HAS_DNSPYTHON), ('dig', _lookup_dig, HAS_DIG), ('drill', _lookup_drill, HAS_DRILL), ('host', _lookup_host, HAS_HOST and not secure), ('nslookup', _lookup_nslookup, HAS_NSLOOKUP and not secure), ) # pylint: enable=bad-whitespace,multiple-spaces-before-keyword try: if method == 'auto': # The first one not to bork on the conditions becomes the function method, resolver = next(((rname, rcb) for rname, rcb, rtest in query_methods if rtest)) else: # The first one not to bork on the conditions becomes the function. And the name must match. resolver = next((rcb for rname, rcb, rtest in query_methods if rname == method and rtest)) except StopIteration: log.error( 'Unable to lookup %s/%s: Resolver method %s invalid, unsupported ' 'or unable to perform query', method, rdtype, name ) return False res_kwargs = { 'rdtype': rdtype, } if servers: if not isinstance(servers, (list, tuple)): servers = [servers] if method in ('dnspython', 'dig', 'drill'): res_kwargs['servers'] = servers else: if timeout: timeout /= len(servers) # Inject a wrapper for multi-server behaviour def _multi_srvr(resolv_func): @functools.wraps(resolv_func) def _wrapper(**res_kwargs): for server in servers: s_res = resolv_func(server=server, **res_kwargs) if s_res: return s_res return _wrapper resolver = _multi_srvr(resolver) if not walk: name = [name] else: idx = 0 if rdtype in ('SRV', 'TLSA'): # The only RRs I know that have 2 name components idx = name.find('.') + 1 idx = name.find('.', idx) + 1 domain = name[idx:] rname = name[0:idx] name = _tree(domain, walk_tld) if walk == 'name': name = [rname + domain for domain in name] if timeout: timeout /= len(name) if secure: res_kwargs['secure'] = secure if timeout: res_kwargs['timeout'] = timeout for rname in name: res = resolver(name=rname, **res_kwargs) if res: return res return res def query( name, rdtype, method=None, servers=None, timeout=None, walk=False, walk_tld=False, secure=None ): ''' Query DNS for information. Where `lookup()` returns record data, `query()` tries to interpret the data and return it's results :param name: name to lookup :param rdtype: DNS record type :param method: gai (getaddrinfo()), pydns, dig, drill, host, nslookup or auto (default) :param servers: (list of) server(s) to try in-order :param timeout: query timeout or a valiant approximation of that :param secure: return only DNSSEC secured response :param walk: Walk the DNS upwards looking for the record type or name/recordtype if walk='name'. :param walk_tld: Include the top-level domain in the walk :return: [] of records ''' rdtype = rdtype.upper() qargs = { 'method': method, 'servers': servers, 'timeout': timeout, 'walk': walk, 'walk_tld': walk_tld, 'secure': secure } if rdtype == 'PTR' and not name.endswith('arpa'): name = ptr_name(name) if rdtype == 'SPF': # 'SPF' has become a regular 'TXT' again qres = [answer for answer in lookup(name, 'TXT', **qargs) if answer.startswith('v=spf')] if not qres: qres = lookup(name, rdtype, **qargs) else: qres = lookup(name, rdtype, **qargs) rec_map = { 'A': a_rec, 'AAAA': aaaa_rec, 'CAA': caa_rec, 'MX': mx_rec, 'SOA': soa_rec, 'SPF': spf_rec, 'SRV': srv_rec, 'SSHFP': sshfp_rec, 'TLSA': tlsa_rec, } if not qres or rdtype not in rec_map: return qres elif rdtype in ('A', 'AAAA', 'SSHFP', 'TLSA'): res = [rec_map[rdtype](res) for res in qres] elif rdtype in ('SOA', 'SPF'): res = rec_map[rdtype](qres[0]) else: res = rec_map[rdtype](qres) return res def host(name, ip4=True, ip6=True, **kwargs): ''' Return a list of addresses for name ip6: Return IPv6 addresses ip4: Return IPv4 addresses the rest is passed on to lookup() ''' res = {} if ip6: ip6 = lookup(name, 'AAAA', **kwargs) if ip6: res['ip6'] = ip6 if ip4: ip4 = lookup(name, 'A', **kwargs) if ip4: res['ip4'] = ip4 return res def a_rec(rdata): ''' Validate and parse DNS record data for an A record :param rdata: DNS record data :return: { 'address': ip } ''' rschema = OrderedDict(( ('address', ipaddress.IPv4Address), )) return _data2rec(rschema, rdata) def aaaa_rec(rdata): ''' Validate and parse DNS record data for an AAAA record :param rdata: DNS record data :return: { 'address': ip } ''' rschema = OrderedDict(( ('address', ipaddress.IPv6Address), )) return _data2rec(rschema, rdata) def caa_rec(rdatas): ''' Validate and parse DNS record data for a CAA record :param rdata: DNS record data :return: dict w/fields ''' rschema = OrderedDict(( ('flags', lambda flag: ['critical'] if int(flag) > 0 else []), ('tag', RFC.CAA_TAGS), ('value', lambda val: val.strip('\',"')) )) res = _data2rec_group(rschema, rdatas, 'tag') for tag in ('issue', 'issuewild'): tag_res = res.get(tag, False) if not tag_res: continue for idx, val in enumerate(tag_res): if ';' not in val: continue val, params = val.split(';', 1) params = dict(param.split('=') for param in shlex.split(params)) tag_res[idx] = {val: params} return res def mx_data(target, preference=10): ''' Generate MX record data :param target: server :param preference: preference number :return: DNS record data ''' return _rec2data(int(preference), target) def mx_rec(rdatas): ''' Validate and parse DNS record data for MX record(s) :param rdata: DNS record data :return: dict w/fields ''' rschema = OrderedDict(( ('preference', int), ('name', str), )) return _data2rec_group(rschema, rdatas, 'preference') def ptr_name(rdata): ''' Return PTR name of given IP :param rdata: IP address :return: PTR record name ''' try: return ipaddress.ip_address(rdata).reverse_pointer except ValueError: log.error( 'Unable to generate PTR record; %s is not a valid IP address', rdata ) return False def soa_rec(rdata): ''' Validate and parse DNS record data for SOA record(s) :param rdata: DNS record data :return: dict w/fields ''' rschema = OrderedDict(( ('mname', str), ('rname', str), ('serial', int), ('refresh', int), ('retry', int), ('expire', int), ('minimum', int), )) return _data2rec(rschema, rdata) def spf_rec(rdata): ''' Validate and parse DNS record data for SPF record(s) :param rdata: DNS record data :return: dict w/fields ''' spf_fields = rdata.split(' ') if not spf_fields.pop(0).startswith('v=spf'): raise ValueError('Not an SPF record') res = OrderedDict() mods = set() for mech_spec in spf_fields: if mech_spec.startswith(('exp', 'redirect')): # It's a modifier mod, val = mech_spec.split('=', 1) if mod in mods: raise KeyError('Modifier {0} can only appear once'.format(mod)) mods.add(mod) continue # TODO: Should be in something intelligent like an SPF_get # if mod == 'exp': # res[mod] = lookup(val, 'TXT', **qargs) # continue # elif mod == 'redirect': # return query(val, 'SPF', **qargs) mech = {} if mech_spec[0] in ('+', '-', '~', '?'): mech['qualifier'] = mech_spec[0] mech_spec = mech_spec[1:] if ':' in mech_spec: mech_spec, val = mech_spec.split(':', 1) elif '/' in mech_spec: idx = mech_spec.find('/') mech_spec = mech_spec[0:idx] val = mech_spec[idx:] else: val = None res[mech_spec] = mech if not val: continue elif mech_spec in ('ip4', 'ip6'): val = ipaddress.ip_interface(val) assert val.version == int(mech_spec[-1]) mech['value'] = val return res def srv_data(target, port, prio=10, weight=10): ''' Generate SRV record data :param target: :param port: :param prio: :param weight: :return: ''' return _rec2data(prio, weight, port, target) def srv_name(svc, proto='tcp', domain=None): ''' Generate SRV record name :param svc: ldap, 389 etc :param proto: tcp, udp, sctp etc. :param domain: name to append :return: ''' proto = RFC.validate(proto, RFC.SRV_PROTO) if isinstance(svc, int) or svc.isdigit(): svc = _to_port(svc) if domain: domain = '.' + domain return '_{0}._{1}{2}'.format(svc, proto, domain) def srv_rec(rdatas): ''' Validate and parse DNS record data for SRV record(s) :param rdata: DNS record data :return: dict w/fields ''' rschema = OrderedDict(( ('prio', int), ('weight', int), ('port', _to_port), ('name', str), )) return _data2rec_group(rschema, rdatas, 'prio') def sshfp_data(key_t, hash_t, pub): ''' Generate an SSHFP record :param key_t: rsa/dsa/ecdsa/ed25519 :param hash_t: sha1/sha256 :param pub: the SSH public key ''' key_t = RFC.validate(key_t, RFC.SSHFP_ALGO, 'in') hash_t = RFC.validate(hash_t, RFC.SSHFP_HASH) hasher = hashlib.new(hash_t) hasher.update( base64.b64decode(pub) ) ssh_fp = hasher.hexdigest() return _rec2data(key_t, hash_t, ssh_fp) def sshfp_rec(rdata): ''' Validate and parse DNS record data for TLSA record(s) :param rdata: DNS record data :return: dict w/fields ''' rschema = OrderedDict(( ('algorithm', RFC.SSHFP_ALGO), ('fp_hash', RFC.SSHFP_HASH), ('fingerprint', lambda val: val.lower()) # resolvers are inconsistent on this one )) return _data2rec(rschema, rdata) def tlsa_data(pub, usage, selector, matching): ''' Generate a TLSA rec :param pub: Pub key in PEM format :param usage: :param selector: :param matching: :return: TLSA data portion ''' usage = RFC.validate(usage, RFC.TLSA_USAGE) selector = RFC.validate(selector, RFC.TLSA_SELECT) matching = RFC.validate(matching, RFC.TLSA_MATCHING) pub = ssl.PEM_cert_to_DER_cert(pub.strip()) if matching == 0: cert_fp = binascii.b2a_hex(pub) else: hasher = hashlib.new(RFC.TLSA_MATCHING[matching]) hasher.update( pub ) cert_fp = hasher.hexdigest() return _rec2data(usage, selector, matching, cert_fp) def tlsa_rec(rdata): ''' Validate and parse DNS record data for TLSA record(s) :param rdata: DNS record data :return: dict w/fields ''' rschema = OrderedDict(( ('usage', RFC.TLSA_USAGE), ('selector', RFC.TLSA_SELECT), ('matching', RFC.TLSA_MATCHING), ('pub', str) )) return _data2rec(rschema, rdata) def service( svc, proto='tcp', domain=None, walk=False, secure=None ): ''' Find an SRV service in a domain or it's parents :param svc: service to find (ldap, 389, etc) :param proto: protocol the service talks (tcp, udp, etc) :param domain: domain to start search in :param walk: walk the parents if domain doesn't provide the service :param secure: only return DNSSEC-validated results :return: [ [ prio1server1, prio1server2 ], [ prio2server1, prio2server2 ], ] (the servers will already be weighted according to the SRV rules) ''' qres = query(srv_name(svc, proto, domain), 'SRV', walk=walk, secure=secure) if not qres: return False res = [] for _, recs in qres.items(): res.append(_weighted_order(recs)) return res def services(services_file='/etc/services'): ''' Parse through system-known services :return: { 'svc': [ { 'port': port 'proto': proto, 'desc': comment }, ], } ''' res = {} with salt.utils.files.fopen(services_file, 'r') as svc_defs: for svc_def in svc_defs.readlines(): svc_def = salt.utils.stringutils.to_unicode(svc_def.strip()) if not svc_def or svc_def.startswith('#'): continue elif '#' in svc_def: svc_def, comment = svc_def.split('#', 1) comment = comment.strip() else: comment = None svc_def = svc_def.split() port, proto = svc_def.pop(1).split('/') port = int(port) for name in svc_def: svc_res = res.get(name, {}) pp_res = svc_res.get(port, False) if not pp_res: svc = { 'port': port, 'proto': proto, } if comment: svc['desc'] = comment svc_res[port] = svc else: curr_proto = pp_res['proto'] if isinstance(curr_proto, (list, tuple)): curr_proto.append(proto) else: pp_res['proto'] = [curr_proto, proto] curr_desc = pp_res.get('desc', False) if comment: if not curr_desc: pp_res['desc'] = comment elif comment != curr_desc: pp_res['desc'] = '{0}, {1}'.format(curr_desc, comment) res[name] = svc_res for svc, data in res.items(): if len(data) == 1: res[svc] = data.values().pop() continue else: res[svc] = list(data.values()) return res def parse_resolv(src='/etc/resolv.conf'): ''' Parse a resolver configuration file (traditionally /etc/resolv.conf) ''' nameservers = [] ip4_nameservers = [] ip6_nameservers = [] search = [] sortlist = [] domain = '' options = [] try: with salt.utils.files.fopen(src) as src_file: # pylint: disable=too-many-nested-blocks for line in src_file: line = salt.utils.stringutils.to_unicode(line).strip().split() try: (directive, arg) = (line[0].lower(), line[1:]) # Drop everything after # or ; (comments) arg = list(itertools.takewhile(lambda x: x[0] not in ('#', ';'), arg)) if directive == 'nameserver': addr = arg[0] try: ip_addr = ipaddress.ip_address(addr) version = ip_addr.version ip_addr = str(ip_addr) if ip_addr not in nameservers: nameservers.append(ip_addr) if version == 4 and ip_addr not in ip4_nameservers: ip4_nameservers.append(ip_addr) elif version == 6 and ip_addr not in ip6_nameservers: ip6_nameservers.append(ip_addr) except ValueError as exc: log.error('%s: %s', src, exc) elif directive == 'domain': domain = arg[0] elif directive == 'search': search = arg elif directive == 'sortlist': # A sortlist is specified by IP address netmask pairs. # The netmask is optional and defaults to the natural # netmask of the net. The IP address and optional # network pairs are separated by slashes. for ip_raw in arg: try: ip_net = ipaddress.ip_network(ip_raw) except ValueError as exc: log.error('%s: %s', src, exc) else: if '/' not in ip_raw: # No netmask has been provided, guess # the "natural" one if ip_net.version == 4: ip_addr = six.text_type(ip_net.network_address) # pylint: disable=protected-access mask = salt.utils.network.natural_ipv4_netmask(ip_addr) ip_net = ipaddress.ip_network( '{0}{1}'.format(ip_addr, mask), strict=False ) if ip_net.version == 6: # TODO pass if ip_net not in sortlist: sortlist.append(ip_net) elif directive == 'options': # Options allows certain internal resolver variables to # be modified. if arg[0] not in options: options.append(arg[0]) except IndexError: continue if domain and search: # The domain and search keywords are mutually exclusive. If more # than one instance of these keywords is present, the last instance # will override. log.debug( '%s: The domain and search keywords are mutually exclusive.', src ) return { 'nameservers': nameservers, 'ip4_nameservers': ip4_nameservers, 'ip6_nameservers': ip6_nameservers, 'sortlist': [ip.with_netmask for ip in sortlist], 'domain': domain, 'search': search, 'options': options } except IOError: return {}
saltstack/salt
salt/utils/dns.py
lookup
python
def lookup( name, rdtype, method=None, servers=None, timeout=None, walk=False, walk_tld=False, secure=None ): ''' Lookup DNS records and return their data :param name: name to lookup :param rdtype: DNS record type :param method: gai (getaddrinfo()), dnspython, dig, drill, host, nslookup or auto (default) :param servers: (list of) server(s) to try in-order :param timeout: query timeout or a valiant approximation of that :param walk: Walk the DNS upwards looking for the record type or name/recordtype if walk='name'. :param walk_tld: Include the final domain in the walk :param secure: return only DNSSEC secured responses :return: [] of record data ''' # opts = __opts__.get('dns', {}) opts = {} method = method or opts.get('method', 'auto') secure = secure or opts.get('secure', None) servers = servers or opts.get('servers', None) timeout = timeout or opts.get('timeout', False) rdtype = rdtype.upper() # pylint: disable=bad-whitespace,multiple-spaces-before-keyword query_methods = ( ('gai', _lookup_gai, not any((rdtype not in ('A', 'AAAA'), servers, secure))), ('dnspython', _lookup_dnspython, HAS_DNSPYTHON), ('dig', _lookup_dig, HAS_DIG), ('drill', _lookup_drill, HAS_DRILL), ('host', _lookup_host, HAS_HOST and not secure), ('nslookup', _lookup_nslookup, HAS_NSLOOKUP and not secure), ) # pylint: enable=bad-whitespace,multiple-spaces-before-keyword try: if method == 'auto': # The first one not to bork on the conditions becomes the function method, resolver = next(((rname, rcb) for rname, rcb, rtest in query_methods if rtest)) else: # The first one not to bork on the conditions becomes the function. And the name must match. resolver = next((rcb for rname, rcb, rtest in query_methods if rname == method and rtest)) except StopIteration: log.error( 'Unable to lookup %s/%s: Resolver method %s invalid, unsupported ' 'or unable to perform query', method, rdtype, name ) return False res_kwargs = { 'rdtype': rdtype, } if servers: if not isinstance(servers, (list, tuple)): servers = [servers] if method in ('dnspython', 'dig', 'drill'): res_kwargs['servers'] = servers else: if timeout: timeout /= len(servers) # Inject a wrapper for multi-server behaviour def _multi_srvr(resolv_func): @functools.wraps(resolv_func) def _wrapper(**res_kwargs): for server in servers: s_res = resolv_func(server=server, **res_kwargs) if s_res: return s_res return _wrapper resolver = _multi_srvr(resolver) if not walk: name = [name] else: idx = 0 if rdtype in ('SRV', 'TLSA'): # The only RRs I know that have 2 name components idx = name.find('.') + 1 idx = name.find('.', idx) + 1 domain = name[idx:] rname = name[0:idx] name = _tree(domain, walk_tld) if walk == 'name': name = [rname + domain for domain in name] if timeout: timeout /= len(name) if secure: res_kwargs['secure'] = secure if timeout: res_kwargs['timeout'] = timeout for rname in name: res = resolver(name=rname, **res_kwargs) if res: return res return res
Lookup DNS records and return their data :param name: name to lookup :param rdtype: DNS record type :param method: gai (getaddrinfo()), dnspython, dig, drill, host, nslookup or auto (default) :param servers: (list of) server(s) to try in-order :param timeout: query timeout or a valiant approximation of that :param walk: Walk the DNS upwards looking for the record type or name/recordtype if walk='name'. :param walk_tld: Include the final domain in the walk :param secure: return only DNSSEC secured responses :return: [] of record data
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/dns.py#L546-L654
null
# -*- coding: utf-8 -*- ''' Compendium of generic DNS utilities # Examples: dns.lookup(name, rdtype, ...) dns.query(name, rdtype, ...) dns.srv_rec(data) dns.srv_data('my1.example.com', 389, prio=10, weight=100) dns.srv_name('ldap/tcp', 'example.com') ''' from __future__ import absolute_import, print_function, unicode_literals # Import Python libs import base64 import binascii import hashlib import itertools import logging import random import re import shlex import socket import ssl import string import functools # Import Salt libs import salt.utils.files import salt.utils.network import salt.utils.path import salt.utils.stringutils import salt.modules.cmdmod from salt._compat import ipaddress from salt.utils.odict import OrderedDict # Import 3rd-party libs from salt.ext import six from salt.ext.six.moves import zip # pylint: disable=redefined-builtin # Integrations try: import dns.resolver HAS_DNSPYTHON = True except ImportError: HAS_DNSPYTHON = False try: import tldextract HAS_TLDEXTRACT = True except ImportError: HAS_TLDEXTRACT = False HAS_DIG = salt.utils.path.which('dig') is not None DIG_OPTIONS = '+search +fail +noall +answer +nocl +nottl' HAS_DRILL = salt.utils.path.which('drill') is not None HAS_HOST = salt.utils.path.which('host') is not None HAS_NSLOOKUP = salt.utils.path.which('nslookup') is not None __salt__ = { 'cmd.run_all': salt.modules.cmdmod.run_all } log = logging.getLogger(__name__) class RFC(object): ''' Simple holding class for all RFC/IANA registered lists & standards ''' # https://tools.ietf.org/html/rfc6844#section-3 CAA_TAGS = ( 'issue', 'issuewild', 'iodef' ) # http://www.iana.org/assignments/dns-sshfp-rr-parameters/dns-sshfp-rr-parameters.xhtml SSHFP_ALGO = OrderedDict(( (1, 'rsa'), (2, 'dsa'), (3, 'ecdsa'), (4, 'ed25519'), )) SSHFP_HASH = OrderedDict(( (1, 'sha1'), (2, 'sha256'), )) # http://www.iana.org/assignments/dane-parameters/dane-parameters.xhtml TLSA_USAGE = OrderedDict(( (0, 'pkixta'), (1, 'pkixee'), (2, 'daneta'), (3, 'daneee'), )) TLSA_SELECT = OrderedDict(( (0, 'cert'), (1, 'spki'), )) TLSA_MATCHING = OrderedDict(( (0, 'full'), (1, 'sha256'), (2, 'sha512'), )) SRV_PROTO = ( 'tcp', 'udp', 'sctp' ) @staticmethod def validate(lookup, ref, match=None): if lookup in ref: return lookup elif match == 'in': return [code for code, name in ref.items() if lookup in name][-1] else: # OrderedDicts only!(?) return ref.keys()[ref.values().index(lookup)] def _to_port(port): try: port = int(port) assert 1 <= port <= 65535 return port except (ValueError, AssertionError): raise ValueError('Invalid port {0}'.format(port)) def _tree(domain, tld=False): ''' Split out a domain in its parents Leverages tldextract to take the TLDs from publicsuffix.org or makes a valiant approximation of that :param domain: dc2.ams2.example.com :param tld: Include TLD in list :return: [ 'dc2.ams2.example.com', 'ams2.example.com', 'example.com'] ''' domain = domain.rstrip('.') assert '.' in domain, 'Provide a decent domain' if not tld: if HAS_TLDEXTRACT: tld = tldextract.extract(domain).suffix else: tld = re.search(r'((?:(?:ac|biz|com?|info|edu|gov|mil|name|net|n[oi]m|org)\.)?[^.]+)$', domain).group() log.info('Without tldextract, dns.util resolves the TLD of %s to %s', domain, tld) res = [domain] while True: idx = domain.find('.') if idx < 0: break domain = domain[idx + 1:] if domain == tld: break res.append(domain) return res def _weighted_order(recs): res = [] weights = [rec['weight'] for rec in recs] while weights: rnd = random.random() * sum(weights) for i, w in enumerate(weights): rnd -= w if rnd < 0: res.append(recs.pop(i)['name']) weights.pop(i) break return res def _cast(rec_data, rec_cast): if isinstance(rec_cast, dict): rec_data = type(rec_cast.keys()[0])(rec_data) res = rec_cast[rec_data] return res elif isinstance(rec_cast, (list, tuple)): return RFC.validate(rec_data, rec_cast) else: return rec_cast(rec_data) def _data2rec(schema, rec_data): ''' schema = OrderedDict({ 'prio': int, 'weight': int, 'port': to_port, 'name': str, }) rec_data = '10 20 25 myawesome.nl' res = {'prio': 10, 'weight': 20, 'port': 25 'name': 'myawesome.nl'} ''' try: rec_fields = rec_data.split(' ') # spaces in digest fields are allowed assert len(rec_fields) >= len(schema) if len(rec_fields) > len(schema): cutoff = len(schema) - 1 rec_fields = rec_fields[0:cutoff] + [''.join(rec_fields[cutoff:])] if len(schema) == 1: res = _cast(rec_fields[0], next(iter(schema.values()))) else: res = dict(( (field_name, _cast(rec_field, rec_cast)) for (field_name, rec_cast), rec_field in zip(schema.items(), rec_fields) )) return res except (AssertionError, AttributeError, TypeError, ValueError) as e: raise ValueError('Unable to cast "{0}" as "{2}": {1}'.format( rec_data, e, ' '.join(schema.keys()) )) def _data2rec_group(schema, recs_data, group_key): if not isinstance(recs_data, (list, tuple)): recs_data = [recs_data] res = OrderedDict() try: for rdata in recs_data: rdata = _data2rec(schema, rdata) assert rdata and group_key in rdata idx = rdata.pop(group_key) if idx not in res: res[idx] = [] if len(rdata) == 1: rdata = next(iter(rdata.values())) res[idx].append(rdata) return res except (AssertionError, ValueError) as e: raise ValueError('Unable to cast "{0}" as a group of "{1}": {2}'.format( ','.join(recs_data), ' '.join(schema.keys()), e )) def _rec2data(*rdata): return ' '.join(rdata) def _data_clean(data): data = data.strip(string.whitespace) if data.startswith(('"', '\'')) and data.endswith(('"', '\'')): return data[1:-1] else: return data def _lookup_dig(name, rdtype, timeout=None, servers=None, secure=None): ''' Use dig to lookup addresses :param name: Name of record to search :param rdtype: DNS record type :param timeout: server response timeout :param servers: [] of servers to use :return: [] of records or False if error ''' cmd = 'dig {0} -t {1} '.format(DIG_OPTIONS, rdtype) if servers: cmd += ''.join(['@{0} '.format(srv) for srv in servers]) if timeout is not None: if servers: timeout = int(float(timeout) / len(servers)) else: timeout = int(timeout) cmd += '+time={0} '.format(timeout) if secure: cmd += '+dnssec +adflag ' cmd = __salt__['cmd.run_all']('{0} {1}'.format(cmd, name), python_shell=False, output_loglevel='quiet') if 'ignoring invalid type' in cmd['stderr']: raise ValueError('Invalid DNS type {}'.format(rdtype)) elif cmd['retcode'] != 0: log.warning( 'dig returned (%s): %s', cmd['retcode'], cmd['stderr'].strip(string.whitespace + ';') ) return False elif not cmd['stdout']: return [] validated = False res = [] for line in cmd['stdout'].splitlines(): _, rtype, rdata = line.split(None, 2) if rtype == 'CNAME' and rdtype != 'CNAME': continue elif rtype == 'RRSIG': validated = True continue res.append(_data_clean(rdata)) if res and secure and not validated: return False else: return res def _lookup_drill(name, rdtype, timeout=None, servers=None, secure=None): ''' Use drill to lookup addresses :param name: Name of record to search :param rdtype: DNS record type :param timeout: command return timeout :param servers: [] of servers to use :return: [] of records or False if error ''' cmd = 'drill ' if secure: cmd += '-D -o ad ' cmd += '{0} {1} '.format(rdtype, name) if servers: cmd += ''.join(['@{0} '.format(srv) for srv in servers]) cmd = __salt__['cmd.run_all']( cmd, timeout=timeout, python_shell=False, output_loglevel='quiet') if cmd['retcode'] != 0: log.warning('drill returned (%s): %s', cmd['retcode'], cmd['stderr']) return False lookup_res = iter(cmd['stdout'].splitlines()) validated = False res = [] try: line = '' while 'ANSWER SECTION' not in line: line = next(lookup_res) while True: line = next(lookup_res) line = line.strip() if not line or line.startswith(';;'): break l_type, l_rec = line.split(None, 4)[-2:] if l_type == 'CNAME' and rdtype != 'CNAME': continue elif l_type == 'RRSIG': validated = True continue elif l_type != rdtype: raise ValueError('Invalid DNS type {}'.format(rdtype)) res.append(_data_clean(l_rec)) except StopIteration: pass if res and secure and not validated: return False else: return res def _lookup_gai(name, rdtype, timeout=None): ''' Use Python's socket interface to lookup addresses :param name: Name of record to search :param rdtype: A or AAAA :param timeout: ignored :return: [] of addresses or False if error ''' try: sock_t = { 'A': socket.AF_INET, 'AAAA': socket.AF_INET6 }[rdtype] except KeyError: raise ValueError('Invalid DNS type {} for gai lookup'.format(rdtype)) if timeout: log.info('Ignoring timeout on gai resolver; fix resolv.conf to do that') try: addresses = [sock[4][0] for sock in socket.getaddrinfo(name, None, sock_t, 0, socket.SOCK_RAW)] return addresses except socket.gaierror: return False def _lookup_host(name, rdtype, timeout=None, server=None): ''' Use host to lookup addresses :param name: Name of record to search :param server: Server to query :param rdtype: DNS record type :param timeout: server response wait :return: [] of records or False if error ''' cmd = 'host -t {0} '.format(rdtype) if timeout: cmd += '-W {0} '.format(int(timeout)) cmd += name if server is not None: cmd += ' {0}'.format(server) cmd = __salt__['cmd.run_all'](cmd, python_shell=False, output_loglevel='quiet') if 'invalid type' in cmd['stderr']: raise ValueError('Invalid DNS type {}'.format(rdtype)) elif cmd['retcode'] != 0: log.warning('host returned (%s): %s', cmd['retcode'], cmd['stderr']) return False elif 'has no' in cmd['stdout']: return [] res = [] _stdout = cmd['stdout'] if server is None else cmd['stdout'].split('\n\n')[-1] for line in _stdout.splitlines(): if rdtype != 'CNAME' and 'is an alias' in line: continue line = line.split(' ', 3)[-1] for prefix in ('record', 'address', 'handled by', 'alias for'): if line.startswith(prefix): line = line[len(prefix) + 1:] break res.append(_data_clean(line)) return res def _lookup_dnspython(name, rdtype, timeout=None, servers=None, secure=None): ''' Use dnspython to lookup addresses :param name: Name of record to search :param rdtype: DNS record type :param timeout: query timeout :param server: [] of server(s) to try in order :return: [] of records or False if error ''' resolver = dns.resolver.Resolver() if timeout is not None: resolver.lifetime = float(timeout) if servers: resolver.nameservers = servers if secure: resolver.ednsflags += dns.flags.DO try: res = [_data_clean(rr.to_text()) for rr in resolver.query(name, rdtype, raise_on_no_answer=False)] return res except dns.rdatatype.UnknownRdatatype: raise ValueError('Invalid DNS type {}'.format(rdtype)) except (dns.resolver.NXDOMAIN, dns.resolver.YXDOMAIN, dns.resolver.NoNameservers, dns.exception.Timeout): return False def _lookup_nslookup(name, rdtype, timeout=None, server=None): ''' Use nslookup to lookup addresses :param name: Name of record to search :param rdtype: DNS record type :param timeout: server response timeout :param server: server to query :return: [] of records or False if error ''' cmd = 'nslookup -query={0} {1}'.format(rdtype, name) if timeout is not None: cmd += ' -timeout={0}'.format(int(timeout)) if server is not None: cmd += ' {0}'.format(server) cmd = __salt__['cmd.run_all'](cmd, python_shell=False, output_loglevel='quiet') if cmd['retcode'] != 0: log.warning( 'nslookup returned (%s): %s', cmd['retcode'], cmd['stdout'].splitlines()[-1].strip(string.whitespace + ';') ) return False lookup_res = iter(cmd['stdout'].splitlines()) res = [] try: line = next(lookup_res) if 'unknown query type' in line: raise ValueError('Invalid DNS type {}'.format(rdtype)) while True: if name in line: break line = next(lookup_res) while True: line = line.strip() if not line or line.startswith('*'): break elif rdtype != 'CNAME' and 'canonical name' in line: name = line.split()[-1][:-1] line = next(lookup_res) continue elif rdtype == 'SOA': line = line.split('=') elif line.startswith('Name:'): line = next(lookup_res) line = line.split(':', 1) elif line.startswith(name): if '=' in line: line = line.split('=', 1) else: line = line.split(' ') res.append(_data_clean(line[-1])) line = next(lookup_res) except StopIteration: pass if rdtype == 'SOA': return [' '.join(res[1:])] else: return res def query( name, rdtype, method=None, servers=None, timeout=None, walk=False, walk_tld=False, secure=None ): ''' Query DNS for information. Where `lookup()` returns record data, `query()` tries to interpret the data and return it's results :param name: name to lookup :param rdtype: DNS record type :param method: gai (getaddrinfo()), pydns, dig, drill, host, nslookup or auto (default) :param servers: (list of) server(s) to try in-order :param timeout: query timeout or a valiant approximation of that :param secure: return only DNSSEC secured response :param walk: Walk the DNS upwards looking for the record type or name/recordtype if walk='name'. :param walk_tld: Include the top-level domain in the walk :return: [] of records ''' rdtype = rdtype.upper() qargs = { 'method': method, 'servers': servers, 'timeout': timeout, 'walk': walk, 'walk_tld': walk_tld, 'secure': secure } if rdtype == 'PTR' and not name.endswith('arpa'): name = ptr_name(name) if rdtype == 'SPF': # 'SPF' has become a regular 'TXT' again qres = [answer for answer in lookup(name, 'TXT', **qargs) if answer.startswith('v=spf')] if not qres: qres = lookup(name, rdtype, **qargs) else: qres = lookup(name, rdtype, **qargs) rec_map = { 'A': a_rec, 'AAAA': aaaa_rec, 'CAA': caa_rec, 'MX': mx_rec, 'SOA': soa_rec, 'SPF': spf_rec, 'SRV': srv_rec, 'SSHFP': sshfp_rec, 'TLSA': tlsa_rec, } if not qres or rdtype not in rec_map: return qres elif rdtype in ('A', 'AAAA', 'SSHFP', 'TLSA'): res = [rec_map[rdtype](res) for res in qres] elif rdtype in ('SOA', 'SPF'): res = rec_map[rdtype](qres[0]) else: res = rec_map[rdtype](qres) return res def host(name, ip4=True, ip6=True, **kwargs): ''' Return a list of addresses for name ip6: Return IPv6 addresses ip4: Return IPv4 addresses the rest is passed on to lookup() ''' res = {} if ip6: ip6 = lookup(name, 'AAAA', **kwargs) if ip6: res['ip6'] = ip6 if ip4: ip4 = lookup(name, 'A', **kwargs) if ip4: res['ip4'] = ip4 return res def a_rec(rdata): ''' Validate and parse DNS record data for an A record :param rdata: DNS record data :return: { 'address': ip } ''' rschema = OrderedDict(( ('address', ipaddress.IPv4Address), )) return _data2rec(rschema, rdata) def aaaa_rec(rdata): ''' Validate and parse DNS record data for an AAAA record :param rdata: DNS record data :return: { 'address': ip } ''' rschema = OrderedDict(( ('address', ipaddress.IPv6Address), )) return _data2rec(rschema, rdata) def caa_rec(rdatas): ''' Validate and parse DNS record data for a CAA record :param rdata: DNS record data :return: dict w/fields ''' rschema = OrderedDict(( ('flags', lambda flag: ['critical'] if int(flag) > 0 else []), ('tag', RFC.CAA_TAGS), ('value', lambda val: val.strip('\',"')) )) res = _data2rec_group(rschema, rdatas, 'tag') for tag in ('issue', 'issuewild'): tag_res = res.get(tag, False) if not tag_res: continue for idx, val in enumerate(tag_res): if ';' not in val: continue val, params = val.split(';', 1) params = dict(param.split('=') for param in shlex.split(params)) tag_res[idx] = {val: params} return res def mx_data(target, preference=10): ''' Generate MX record data :param target: server :param preference: preference number :return: DNS record data ''' return _rec2data(int(preference), target) def mx_rec(rdatas): ''' Validate and parse DNS record data for MX record(s) :param rdata: DNS record data :return: dict w/fields ''' rschema = OrderedDict(( ('preference', int), ('name', str), )) return _data2rec_group(rschema, rdatas, 'preference') def ptr_name(rdata): ''' Return PTR name of given IP :param rdata: IP address :return: PTR record name ''' try: return ipaddress.ip_address(rdata).reverse_pointer except ValueError: log.error( 'Unable to generate PTR record; %s is not a valid IP address', rdata ) return False def soa_rec(rdata): ''' Validate and parse DNS record data for SOA record(s) :param rdata: DNS record data :return: dict w/fields ''' rschema = OrderedDict(( ('mname', str), ('rname', str), ('serial', int), ('refresh', int), ('retry', int), ('expire', int), ('minimum', int), )) return _data2rec(rschema, rdata) def spf_rec(rdata): ''' Validate and parse DNS record data for SPF record(s) :param rdata: DNS record data :return: dict w/fields ''' spf_fields = rdata.split(' ') if not spf_fields.pop(0).startswith('v=spf'): raise ValueError('Not an SPF record') res = OrderedDict() mods = set() for mech_spec in spf_fields: if mech_spec.startswith(('exp', 'redirect')): # It's a modifier mod, val = mech_spec.split('=', 1) if mod in mods: raise KeyError('Modifier {0} can only appear once'.format(mod)) mods.add(mod) continue # TODO: Should be in something intelligent like an SPF_get # if mod == 'exp': # res[mod] = lookup(val, 'TXT', **qargs) # continue # elif mod == 'redirect': # return query(val, 'SPF', **qargs) mech = {} if mech_spec[0] in ('+', '-', '~', '?'): mech['qualifier'] = mech_spec[0] mech_spec = mech_spec[1:] if ':' in mech_spec: mech_spec, val = mech_spec.split(':', 1) elif '/' in mech_spec: idx = mech_spec.find('/') mech_spec = mech_spec[0:idx] val = mech_spec[idx:] else: val = None res[mech_spec] = mech if not val: continue elif mech_spec in ('ip4', 'ip6'): val = ipaddress.ip_interface(val) assert val.version == int(mech_spec[-1]) mech['value'] = val return res def srv_data(target, port, prio=10, weight=10): ''' Generate SRV record data :param target: :param port: :param prio: :param weight: :return: ''' return _rec2data(prio, weight, port, target) def srv_name(svc, proto='tcp', domain=None): ''' Generate SRV record name :param svc: ldap, 389 etc :param proto: tcp, udp, sctp etc. :param domain: name to append :return: ''' proto = RFC.validate(proto, RFC.SRV_PROTO) if isinstance(svc, int) or svc.isdigit(): svc = _to_port(svc) if domain: domain = '.' + domain return '_{0}._{1}{2}'.format(svc, proto, domain) def srv_rec(rdatas): ''' Validate and parse DNS record data for SRV record(s) :param rdata: DNS record data :return: dict w/fields ''' rschema = OrderedDict(( ('prio', int), ('weight', int), ('port', _to_port), ('name', str), )) return _data2rec_group(rschema, rdatas, 'prio') def sshfp_data(key_t, hash_t, pub): ''' Generate an SSHFP record :param key_t: rsa/dsa/ecdsa/ed25519 :param hash_t: sha1/sha256 :param pub: the SSH public key ''' key_t = RFC.validate(key_t, RFC.SSHFP_ALGO, 'in') hash_t = RFC.validate(hash_t, RFC.SSHFP_HASH) hasher = hashlib.new(hash_t) hasher.update( base64.b64decode(pub) ) ssh_fp = hasher.hexdigest() return _rec2data(key_t, hash_t, ssh_fp) def sshfp_rec(rdata): ''' Validate and parse DNS record data for TLSA record(s) :param rdata: DNS record data :return: dict w/fields ''' rschema = OrderedDict(( ('algorithm', RFC.SSHFP_ALGO), ('fp_hash', RFC.SSHFP_HASH), ('fingerprint', lambda val: val.lower()) # resolvers are inconsistent on this one )) return _data2rec(rschema, rdata) def tlsa_data(pub, usage, selector, matching): ''' Generate a TLSA rec :param pub: Pub key in PEM format :param usage: :param selector: :param matching: :return: TLSA data portion ''' usage = RFC.validate(usage, RFC.TLSA_USAGE) selector = RFC.validate(selector, RFC.TLSA_SELECT) matching = RFC.validate(matching, RFC.TLSA_MATCHING) pub = ssl.PEM_cert_to_DER_cert(pub.strip()) if matching == 0: cert_fp = binascii.b2a_hex(pub) else: hasher = hashlib.new(RFC.TLSA_MATCHING[matching]) hasher.update( pub ) cert_fp = hasher.hexdigest() return _rec2data(usage, selector, matching, cert_fp) def tlsa_rec(rdata): ''' Validate and parse DNS record data for TLSA record(s) :param rdata: DNS record data :return: dict w/fields ''' rschema = OrderedDict(( ('usage', RFC.TLSA_USAGE), ('selector', RFC.TLSA_SELECT), ('matching', RFC.TLSA_MATCHING), ('pub', str) )) return _data2rec(rschema, rdata) def service( svc, proto='tcp', domain=None, walk=False, secure=None ): ''' Find an SRV service in a domain or it's parents :param svc: service to find (ldap, 389, etc) :param proto: protocol the service talks (tcp, udp, etc) :param domain: domain to start search in :param walk: walk the parents if domain doesn't provide the service :param secure: only return DNSSEC-validated results :return: [ [ prio1server1, prio1server2 ], [ prio2server1, prio2server2 ], ] (the servers will already be weighted according to the SRV rules) ''' qres = query(srv_name(svc, proto, domain), 'SRV', walk=walk, secure=secure) if not qres: return False res = [] for _, recs in qres.items(): res.append(_weighted_order(recs)) return res def services(services_file='/etc/services'): ''' Parse through system-known services :return: { 'svc': [ { 'port': port 'proto': proto, 'desc': comment }, ], } ''' res = {} with salt.utils.files.fopen(services_file, 'r') as svc_defs: for svc_def in svc_defs.readlines(): svc_def = salt.utils.stringutils.to_unicode(svc_def.strip()) if not svc_def or svc_def.startswith('#'): continue elif '#' in svc_def: svc_def, comment = svc_def.split('#', 1) comment = comment.strip() else: comment = None svc_def = svc_def.split() port, proto = svc_def.pop(1).split('/') port = int(port) for name in svc_def: svc_res = res.get(name, {}) pp_res = svc_res.get(port, False) if not pp_res: svc = { 'port': port, 'proto': proto, } if comment: svc['desc'] = comment svc_res[port] = svc else: curr_proto = pp_res['proto'] if isinstance(curr_proto, (list, tuple)): curr_proto.append(proto) else: pp_res['proto'] = [curr_proto, proto] curr_desc = pp_res.get('desc', False) if comment: if not curr_desc: pp_res['desc'] = comment elif comment != curr_desc: pp_res['desc'] = '{0}, {1}'.format(curr_desc, comment) res[name] = svc_res for svc, data in res.items(): if len(data) == 1: res[svc] = data.values().pop() continue else: res[svc] = list(data.values()) return res def parse_resolv(src='/etc/resolv.conf'): ''' Parse a resolver configuration file (traditionally /etc/resolv.conf) ''' nameservers = [] ip4_nameservers = [] ip6_nameservers = [] search = [] sortlist = [] domain = '' options = [] try: with salt.utils.files.fopen(src) as src_file: # pylint: disable=too-many-nested-blocks for line in src_file: line = salt.utils.stringutils.to_unicode(line).strip().split() try: (directive, arg) = (line[0].lower(), line[1:]) # Drop everything after # or ; (comments) arg = list(itertools.takewhile(lambda x: x[0] not in ('#', ';'), arg)) if directive == 'nameserver': addr = arg[0] try: ip_addr = ipaddress.ip_address(addr) version = ip_addr.version ip_addr = str(ip_addr) if ip_addr not in nameservers: nameservers.append(ip_addr) if version == 4 and ip_addr not in ip4_nameservers: ip4_nameservers.append(ip_addr) elif version == 6 and ip_addr not in ip6_nameservers: ip6_nameservers.append(ip_addr) except ValueError as exc: log.error('%s: %s', src, exc) elif directive == 'domain': domain = arg[0] elif directive == 'search': search = arg elif directive == 'sortlist': # A sortlist is specified by IP address netmask pairs. # The netmask is optional and defaults to the natural # netmask of the net. The IP address and optional # network pairs are separated by slashes. for ip_raw in arg: try: ip_net = ipaddress.ip_network(ip_raw) except ValueError as exc: log.error('%s: %s', src, exc) else: if '/' not in ip_raw: # No netmask has been provided, guess # the "natural" one if ip_net.version == 4: ip_addr = six.text_type(ip_net.network_address) # pylint: disable=protected-access mask = salt.utils.network.natural_ipv4_netmask(ip_addr) ip_net = ipaddress.ip_network( '{0}{1}'.format(ip_addr, mask), strict=False ) if ip_net.version == 6: # TODO pass if ip_net not in sortlist: sortlist.append(ip_net) elif directive == 'options': # Options allows certain internal resolver variables to # be modified. if arg[0] not in options: options.append(arg[0]) except IndexError: continue if domain and search: # The domain and search keywords are mutually exclusive. If more # than one instance of these keywords is present, the last instance # will override. log.debug( '%s: The domain and search keywords are mutually exclusive.', src ) return { 'nameservers': nameservers, 'ip4_nameservers': ip4_nameservers, 'ip6_nameservers': ip6_nameservers, 'sortlist': [ip.with_netmask for ip in sortlist], 'domain': domain, 'search': search, 'options': options } except IOError: return {}
saltstack/salt
salt/utils/dns.py
query
python
def query( name, rdtype, method=None, servers=None, timeout=None, walk=False, walk_tld=False, secure=None ): ''' Query DNS for information. Where `lookup()` returns record data, `query()` tries to interpret the data and return it's results :param name: name to lookup :param rdtype: DNS record type :param method: gai (getaddrinfo()), pydns, dig, drill, host, nslookup or auto (default) :param servers: (list of) server(s) to try in-order :param timeout: query timeout or a valiant approximation of that :param secure: return only DNSSEC secured response :param walk: Walk the DNS upwards looking for the record type or name/recordtype if walk='name'. :param walk_tld: Include the top-level domain in the walk :return: [] of records ''' rdtype = rdtype.upper() qargs = { 'method': method, 'servers': servers, 'timeout': timeout, 'walk': walk, 'walk_tld': walk_tld, 'secure': secure } if rdtype == 'PTR' and not name.endswith('arpa'): name = ptr_name(name) if rdtype == 'SPF': # 'SPF' has become a regular 'TXT' again qres = [answer for answer in lookup(name, 'TXT', **qargs) if answer.startswith('v=spf')] if not qres: qres = lookup(name, rdtype, **qargs) else: qres = lookup(name, rdtype, **qargs) rec_map = { 'A': a_rec, 'AAAA': aaaa_rec, 'CAA': caa_rec, 'MX': mx_rec, 'SOA': soa_rec, 'SPF': spf_rec, 'SRV': srv_rec, 'SSHFP': sshfp_rec, 'TLSA': tlsa_rec, } if not qres or rdtype not in rec_map: return qres elif rdtype in ('A', 'AAAA', 'SSHFP', 'TLSA'): res = [rec_map[rdtype](res) for res in qres] elif rdtype in ('SOA', 'SPF'): res = rec_map[rdtype](qres[0]) else: res = rec_map[rdtype](qres) return res
Query DNS for information. Where `lookup()` returns record data, `query()` tries to interpret the data and return it's results :param name: name to lookup :param rdtype: DNS record type :param method: gai (getaddrinfo()), pydns, dig, drill, host, nslookup or auto (default) :param servers: (list of) server(s) to try in-order :param timeout: query timeout or a valiant approximation of that :param secure: return only DNSSEC secured response :param walk: Walk the DNS upwards looking for the record type or name/recordtype if walk='name'. :param walk_tld: Include the top-level domain in the walk :return: [] of records
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/dns.py#L657-L723
[ "def lookup(\n name,\n rdtype,\n method=None,\n servers=None,\n timeout=None,\n walk=False,\n walk_tld=False,\n secure=None\n):\n '''\n Lookup DNS records and return their data\n\n :param name: name to lookup\n :param rdtype: DNS record type\n :param method: gai (getaddrinfo()), dnspython, dig, drill, host, nslookup or auto (default)\n :param servers: (list of) server(s) to try in-order\n :param timeout: query timeout or a valiant approximation of that\n :param walk: Walk the DNS upwards looking for the record type or name/recordtype if walk='name'.\n :param walk_tld: Include the final domain in the walk\n :param secure: return only DNSSEC secured responses\n :return: [] of record data\n '''\n # opts = __opts__.get('dns', {})\n opts = {}\n method = method or opts.get('method', 'auto')\n secure = secure or opts.get('secure', None)\n servers = servers or opts.get('servers', None)\n timeout = timeout or opts.get('timeout', False)\n\n rdtype = rdtype.upper()\n\n # pylint: disable=bad-whitespace,multiple-spaces-before-keyword\n query_methods = (\n ('gai', _lookup_gai, not any((rdtype not in ('A', 'AAAA'), servers, secure))),\n ('dnspython', _lookup_dnspython, HAS_DNSPYTHON),\n ('dig', _lookup_dig, HAS_DIG),\n ('drill', _lookup_drill, HAS_DRILL),\n ('host', _lookup_host, HAS_HOST and not secure),\n ('nslookup', _lookup_nslookup, HAS_NSLOOKUP and not secure),\n )\n # pylint: enable=bad-whitespace,multiple-spaces-before-keyword\n\n try:\n if method == 'auto':\n # The first one not to bork on the conditions becomes the function\n method, resolver = next(((rname, rcb) for rname, rcb, rtest in query_methods if rtest))\n else:\n # The first one not to bork on the conditions becomes the function. And the name must match.\n resolver = next((rcb for rname, rcb, rtest in query_methods if rname == method and rtest))\n except StopIteration:\n log.error(\n 'Unable to lookup %s/%s: Resolver method %s invalid, unsupported '\n 'or unable to perform query', method, rdtype, name\n )\n return False\n\n res_kwargs = {\n 'rdtype': rdtype,\n }\n\n if servers:\n if not isinstance(servers, (list, tuple)):\n servers = [servers]\n if method in ('dnspython', 'dig', 'drill'):\n res_kwargs['servers'] = servers\n else:\n if timeout:\n timeout /= len(servers)\n\n # Inject a wrapper for multi-server behaviour\n def _multi_srvr(resolv_func):\n @functools.wraps(resolv_func)\n def _wrapper(**res_kwargs):\n for server in servers:\n s_res = resolv_func(server=server, **res_kwargs)\n if s_res:\n return s_res\n return _wrapper\n resolver = _multi_srvr(resolver)\n\n if not walk:\n name = [name]\n else:\n idx = 0\n if rdtype in ('SRV', 'TLSA'): # The only RRs I know that have 2 name components\n idx = name.find('.') + 1\n idx = name.find('.', idx) + 1\n domain = name[idx:]\n rname = name[0:idx]\n\n name = _tree(domain, walk_tld)\n if walk == 'name':\n name = [rname + domain for domain in name]\n\n if timeout:\n timeout /= len(name)\n\n if secure:\n res_kwargs['secure'] = secure\n if timeout:\n res_kwargs['timeout'] = timeout\n\n for rname in name:\n res = resolver(name=rname, **res_kwargs)\n if res:\n return res\n\n return res\n", "def ptr_name(rdata):\n '''\n Return PTR name of given IP\n :param rdata: IP address\n :return: PTR record name\n '''\n try:\n return ipaddress.ip_address(rdata).reverse_pointer\n except ValueError:\n log.error(\n 'Unable to generate PTR record; %s is not a valid IP address',\n rdata\n )\n return False\n" ]
# -*- coding: utf-8 -*- ''' Compendium of generic DNS utilities # Examples: dns.lookup(name, rdtype, ...) dns.query(name, rdtype, ...) dns.srv_rec(data) dns.srv_data('my1.example.com', 389, prio=10, weight=100) dns.srv_name('ldap/tcp', 'example.com') ''' from __future__ import absolute_import, print_function, unicode_literals # Import Python libs import base64 import binascii import hashlib import itertools import logging import random import re import shlex import socket import ssl import string import functools # Import Salt libs import salt.utils.files import salt.utils.network import salt.utils.path import salt.utils.stringutils import salt.modules.cmdmod from salt._compat import ipaddress from salt.utils.odict import OrderedDict # Import 3rd-party libs from salt.ext import six from salt.ext.six.moves import zip # pylint: disable=redefined-builtin # Integrations try: import dns.resolver HAS_DNSPYTHON = True except ImportError: HAS_DNSPYTHON = False try: import tldextract HAS_TLDEXTRACT = True except ImportError: HAS_TLDEXTRACT = False HAS_DIG = salt.utils.path.which('dig') is not None DIG_OPTIONS = '+search +fail +noall +answer +nocl +nottl' HAS_DRILL = salt.utils.path.which('drill') is not None HAS_HOST = salt.utils.path.which('host') is not None HAS_NSLOOKUP = salt.utils.path.which('nslookup') is not None __salt__ = { 'cmd.run_all': salt.modules.cmdmod.run_all } log = logging.getLogger(__name__) class RFC(object): ''' Simple holding class for all RFC/IANA registered lists & standards ''' # https://tools.ietf.org/html/rfc6844#section-3 CAA_TAGS = ( 'issue', 'issuewild', 'iodef' ) # http://www.iana.org/assignments/dns-sshfp-rr-parameters/dns-sshfp-rr-parameters.xhtml SSHFP_ALGO = OrderedDict(( (1, 'rsa'), (2, 'dsa'), (3, 'ecdsa'), (4, 'ed25519'), )) SSHFP_HASH = OrderedDict(( (1, 'sha1'), (2, 'sha256'), )) # http://www.iana.org/assignments/dane-parameters/dane-parameters.xhtml TLSA_USAGE = OrderedDict(( (0, 'pkixta'), (1, 'pkixee'), (2, 'daneta'), (3, 'daneee'), )) TLSA_SELECT = OrderedDict(( (0, 'cert'), (1, 'spki'), )) TLSA_MATCHING = OrderedDict(( (0, 'full'), (1, 'sha256'), (2, 'sha512'), )) SRV_PROTO = ( 'tcp', 'udp', 'sctp' ) @staticmethod def validate(lookup, ref, match=None): if lookup in ref: return lookup elif match == 'in': return [code for code, name in ref.items() if lookup in name][-1] else: # OrderedDicts only!(?) return ref.keys()[ref.values().index(lookup)] def _to_port(port): try: port = int(port) assert 1 <= port <= 65535 return port except (ValueError, AssertionError): raise ValueError('Invalid port {0}'.format(port)) def _tree(domain, tld=False): ''' Split out a domain in its parents Leverages tldextract to take the TLDs from publicsuffix.org or makes a valiant approximation of that :param domain: dc2.ams2.example.com :param tld: Include TLD in list :return: [ 'dc2.ams2.example.com', 'ams2.example.com', 'example.com'] ''' domain = domain.rstrip('.') assert '.' in domain, 'Provide a decent domain' if not tld: if HAS_TLDEXTRACT: tld = tldextract.extract(domain).suffix else: tld = re.search(r'((?:(?:ac|biz|com?|info|edu|gov|mil|name|net|n[oi]m|org)\.)?[^.]+)$', domain).group() log.info('Without tldextract, dns.util resolves the TLD of %s to %s', domain, tld) res = [domain] while True: idx = domain.find('.') if idx < 0: break domain = domain[idx + 1:] if domain == tld: break res.append(domain) return res def _weighted_order(recs): res = [] weights = [rec['weight'] for rec in recs] while weights: rnd = random.random() * sum(weights) for i, w in enumerate(weights): rnd -= w if rnd < 0: res.append(recs.pop(i)['name']) weights.pop(i) break return res def _cast(rec_data, rec_cast): if isinstance(rec_cast, dict): rec_data = type(rec_cast.keys()[0])(rec_data) res = rec_cast[rec_data] return res elif isinstance(rec_cast, (list, tuple)): return RFC.validate(rec_data, rec_cast) else: return rec_cast(rec_data) def _data2rec(schema, rec_data): ''' schema = OrderedDict({ 'prio': int, 'weight': int, 'port': to_port, 'name': str, }) rec_data = '10 20 25 myawesome.nl' res = {'prio': 10, 'weight': 20, 'port': 25 'name': 'myawesome.nl'} ''' try: rec_fields = rec_data.split(' ') # spaces in digest fields are allowed assert len(rec_fields) >= len(schema) if len(rec_fields) > len(schema): cutoff = len(schema) - 1 rec_fields = rec_fields[0:cutoff] + [''.join(rec_fields[cutoff:])] if len(schema) == 1: res = _cast(rec_fields[0], next(iter(schema.values()))) else: res = dict(( (field_name, _cast(rec_field, rec_cast)) for (field_name, rec_cast), rec_field in zip(schema.items(), rec_fields) )) return res except (AssertionError, AttributeError, TypeError, ValueError) as e: raise ValueError('Unable to cast "{0}" as "{2}": {1}'.format( rec_data, e, ' '.join(schema.keys()) )) def _data2rec_group(schema, recs_data, group_key): if not isinstance(recs_data, (list, tuple)): recs_data = [recs_data] res = OrderedDict() try: for rdata in recs_data: rdata = _data2rec(schema, rdata) assert rdata and group_key in rdata idx = rdata.pop(group_key) if idx not in res: res[idx] = [] if len(rdata) == 1: rdata = next(iter(rdata.values())) res[idx].append(rdata) return res except (AssertionError, ValueError) as e: raise ValueError('Unable to cast "{0}" as a group of "{1}": {2}'.format( ','.join(recs_data), ' '.join(schema.keys()), e )) def _rec2data(*rdata): return ' '.join(rdata) def _data_clean(data): data = data.strip(string.whitespace) if data.startswith(('"', '\'')) and data.endswith(('"', '\'')): return data[1:-1] else: return data def _lookup_dig(name, rdtype, timeout=None, servers=None, secure=None): ''' Use dig to lookup addresses :param name: Name of record to search :param rdtype: DNS record type :param timeout: server response timeout :param servers: [] of servers to use :return: [] of records or False if error ''' cmd = 'dig {0} -t {1} '.format(DIG_OPTIONS, rdtype) if servers: cmd += ''.join(['@{0} '.format(srv) for srv in servers]) if timeout is not None: if servers: timeout = int(float(timeout) / len(servers)) else: timeout = int(timeout) cmd += '+time={0} '.format(timeout) if secure: cmd += '+dnssec +adflag ' cmd = __salt__['cmd.run_all']('{0} {1}'.format(cmd, name), python_shell=False, output_loglevel='quiet') if 'ignoring invalid type' in cmd['stderr']: raise ValueError('Invalid DNS type {}'.format(rdtype)) elif cmd['retcode'] != 0: log.warning( 'dig returned (%s): %s', cmd['retcode'], cmd['stderr'].strip(string.whitespace + ';') ) return False elif not cmd['stdout']: return [] validated = False res = [] for line in cmd['stdout'].splitlines(): _, rtype, rdata = line.split(None, 2) if rtype == 'CNAME' and rdtype != 'CNAME': continue elif rtype == 'RRSIG': validated = True continue res.append(_data_clean(rdata)) if res and secure and not validated: return False else: return res def _lookup_drill(name, rdtype, timeout=None, servers=None, secure=None): ''' Use drill to lookup addresses :param name: Name of record to search :param rdtype: DNS record type :param timeout: command return timeout :param servers: [] of servers to use :return: [] of records or False if error ''' cmd = 'drill ' if secure: cmd += '-D -o ad ' cmd += '{0} {1} '.format(rdtype, name) if servers: cmd += ''.join(['@{0} '.format(srv) for srv in servers]) cmd = __salt__['cmd.run_all']( cmd, timeout=timeout, python_shell=False, output_loglevel='quiet') if cmd['retcode'] != 0: log.warning('drill returned (%s): %s', cmd['retcode'], cmd['stderr']) return False lookup_res = iter(cmd['stdout'].splitlines()) validated = False res = [] try: line = '' while 'ANSWER SECTION' not in line: line = next(lookup_res) while True: line = next(lookup_res) line = line.strip() if not line or line.startswith(';;'): break l_type, l_rec = line.split(None, 4)[-2:] if l_type == 'CNAME' and rdtype != 'CNAME': continue elif l_type == 'RRSIG': validated = True continue elif l_type != rdtype: raise ValueError('Invalid DNS type {}'.format(rdtype)) res.append(_data_clean(l_rec)) except StopIteration: pass if res and secure and not validated: return False else: return res def _lookup_gai(name, rdtype, timeout=None): ''' Use Python's socket interface to lookup addresses :param name: Name of record to search :param rdtype: A or AAAA :param timeout: ignored :return: [] of addresses or False if error ''' try: sock_t = { 'A': socket.AF_INET, 'AAAA': socket.AF_INET6 }[rdtype] except KeyError: raise ValueError('Invalid DNS type {} for gai lookup'.format(rdtype)) if timeout: log.info('Ignoring timeout on gai resolver; fix resolv.conf to do that') try: addresses = [sock[4][0] for sock in socket.getaddrinfo(name, None, sock_t, 0, socket.SOCK_RAW)] return addresses except socket.gaierror: return False def _lookup_host(name, rdtype, timeout=None, server=None): ''' Use host to lookup addresses :param name: Name of record to search :param server: Server to query :param rdtype: DNS record type :param timeout: server response wait :return: [] of records or False if error ''' cmd = 'host -t {0} '.format(rdtype) if timeout: cmd += '-W {0} '.format(int(timeout)) cmd += name if server is not None: cmd += ' {0}'.format(server) cmd = __salt__['cmd.run_all'](cmd, python_shell=False, output_loglevel='quiet') if 'invalid type' in cmd['stderr']: raise ValueError('Invalid DNS type {}'.format(rdtype)) elif cmd['retcode'] != 0: log.warning('host returned (%s): %s', cmd['retcode'], cmd['stderr']) return False elif 'has no' in cmd['stdout']: return [] res = [] _stdout = cmd['stdout'] if server is None else cmd['stdout'].split('\n\n')[-1] for line in _stdout.splitlines(): if rdtype != 'CNAME' and 'is an alias' in line: continue line = line.split(' ', 3)[-1] for prefix in ('record', 'address', 'handled by', 'alias for'): if line.startswith(prefix): line = line[len(prefix) + 1:] break res.append(_data_clean(line)) return res def _lookup_dnspython(name, rdtype, timeout=None, servers=None, secure=None): ''' Use dnspython to lookup addresses :param name: Name of record to search :param rdtype: DNS record type :param timeout: query timeout :param server: [] of server(s) to try in order :return: [] of records or False if error ''' resolver = dns.resolver.Resolver() if timeout is not None: resolver.lifetime = float(timeout) if servers: resolver.nameservers = servers if secure: resolver.ednsflags += dns.flags.DO try: res = [_data_clean(rr.to_text()) for rr in resolver.query(name, rdtype, raise_on_no_answer=False)] return res except dns.rdatatype.UnknownRdatatype: raise ValueError('Invalid DNS type {}'.format(rdtype)) except (dns.resolver.NXDOMAIN, dns.resolver.YXDOMAIN, dns.resolver.NoNameservers, dns.exception.Timeout): return False def _lookup_nslookup(name, rdtype, timeout=None, server=None): ''' Use nslookup to lookup addresses :param name: Name of record to search :param rdtype: DNS record type :param timeout: server response timeout :param server: server to query :return: [] of records or False if error ''' cmd = 'nslookup -query={0} {1}'.format(rdtype, name) if timeout is not None: cmd += ' -timeout={0}'.format(int(timeout)) if server is not None: cmd += ' {0}'.format(server) cmd = __salt__['cmd.run_all'](cmd, python_shell=False, output_loglevel='quiet') if cmd['retcode'] != 0: log.warning( 'nslookup returned (%s): %s', cmd['retcode'], cmd['stdout'].splitlines()[-1].strip(string.whitespace + ';') ) return False lookup_res = iter(cmd['stdout'].splitlines()) res = [] try: line = next(lookup_res) if 'unknown query type' in line: raise ValueError('Invalid DNS type {}'.format(rdtype)) while True: if name in line: break line = next(lookup_res) while True: line = line.strip() if not line or line.startswith('*'): break elif rdtype != 'CNAME' and 'canonical name' in line: name = line.split()[-1][:-1] line = next(lookup_res) continue elif rdtype == 'SOA': line = line.split('=') elif line.startswith('Name:'): line = next(lookup_res) line = line.split(':', 1) elif line.startswith(name): if '=' in line: line = line.split('=', 1) else: line = line.split(' ') res.append(_data_clean(line[-1])) line = next(lookup_res) except StopIteration: pass if rdtype == 'SOA': return [' '.join(res[1:])] else: return res def lookup( name, rdtype, method=None, servers=None, timeout=None, walk=False, walk_tld=False, secure=None ): ''' Lookup DNS records and return their data :param name: name to lookup :param rdtype: DNS record type :param method: gai (getaddrinfo()), dnspython, dig, drill, host, nslookup or auto (default) :param servers: (list of) server(s) to try in-order :param timeout: query timeout or a valiant approximation of that :param walk: Walk the DNS upwards looking for the record type or name/recordtype if walk='name'. :param walk_tld: Include the final domain in the walk :param secure: return only DNSSEC secured responses :return: [] of record data ''' # opts = __opts__.get('dns', {}) opts = {} method = method or opts.get('method', 'auto') secure = secure or opts.get('secure', None) servers = servers or opts.get('servers', None) timeout = timeout or opts.get('timeout', False) rdtype = rdtype.upper() # pylint: disable=bad-whitespace,multiple-spaces-before-keyword query_methods = ( ('gai', _lookup_gai, not any((rdtype not in ('A', 'AAAA'), servers, secure))), ('dnspython', _lookup_dnspython, HAS_DNSPYTHON), ('dig', _lookup_dig, HAS_DIG), ('drill', _lookup_drill, HAS_DRILL), ('host', _lookup_host, HAS_HOST and not secure), ('nslookup', _lookup_nslookup, HAS_NSLOOKUP and not secure), ) # pylint: enable=bad-whitespace,multiple-spaces-before-keyword try: if method == 'auto': # The first one not to bork on the conditions becomes the function method, resolver = next(((rname, rcb) for rname, rcb, rtest in query_methods if rtest)) else: # The first one not to bork on the conditions becomes the function. And the name must match. resolver = next((rcb for rname, rcb, rtest in query_methods if rname == method and rtest)) except StopIteration: log.error( 'Unable to lookup %s/%s: Resolver method %s invalid, unsupported ' 'or unable to perform query', method, rdtype, name ) return False res_kwargs = { 'rdtype': rdtype, } if servers: if not isinstance(servers, (list, tuple)): servers = [servers] if method in ('dnspython', 'dig', 'drill'): res_kwargs['servers'] = servers else: if timeout: timeout /= len(servers) # Inject a wrapper for multi-server behaviour def _multi_srvr(resolv_func): @functools.wraps(resolv_func) def _wrapper(**res_kwargs): for server in servers: s_res = resolv_func(server=server, **res_kwargs) if s_res: return s_res return _wrapper resolver = _multi_srvr(resolver) if not walk: name = [name] else: idx = 0 if rdtype in ('SRV', 'TLSA'): # The only RRs I know that have 2 name components idx = name.find('.') + 1 idx = name.find('.', idx) + 1 domain = name[idx:] rname = name[0:idx] name = _tree(domain, walk_tld) if walk == 'name': name = [rname + domain for domain in name] if timeout: timeout /= len(name) if secure: res_kwargs['secure'] = secure if timeout: res_kwargs['timeout'] = timeout for rname in name: res = resolver(name=rname, **res_kwargs) if res: return res return res def host(name, ip4=True, ip6=True, **kwargs): ''' Return a list of addresses for name ip6: Return IPv6 addresses ip4: Return IPv4 addresses the rest is passed on to lookup() ''' res = {} if ip6: ip6 = lookup(name, 'AAAA', **kwargs) if ip6: res['ip6'] = ip6 if ip4: ip4 = lookup(name, 'A', **kwargs) if ip4: res['ip4'] = ip4 return res def a_rec(rdata): ''' Validate and parse DNS record data for an A record :param rdata: DNS record data :return: { 'address': ip } ''' rschema = OrderedDict(( ('address', ipaddress.IPv4Address), )) return _data2rec(rschema, rdata) def aaaa_rec(rdata): ''' Validate and parse DNS record data for an AAAA record :param rdata: DNS record data :return: { 'address': ip } ''' rschema = OrderedDict(( ('address', ipaddress.IPv6Address), )) return _data2rec(rschema, rdata) def caa_rec(rdatas): ''' Validate and parse DNS record data for a CAA record :param rdata: DNS record data :return: dict w/fields ''' rschema = OrderedDict(( ('flags', lambda flag: ['critical'] if int(flag) > 0 else []), ('tag', RFC.CAA_TAGS), ('value', lambda val: val.strip('\',"')) )) res = _data2rec_group(rschema, rdatas, 'tag') for tag in ('issue', 'issuewild'): tag_res = res.get(tag, False) if not tag_res: continue for idx, val in enumerate(tag_res): if ';' not in val: continue val, params = val.split(';', 1) params = dict(param.split('=') for param in shlex.split(params)) tag_res[idx] = {val: params} return res def mx_data(target, preference=10): ''' Generate MX record data :param target: server :param preference: preference number :return: DNS record data ''' return _rec2data(int(preference), target) def mx_rec(rdatas): ''' Validate and parse DNS record data for MX record(s) :param rdata: DNS record data :return: dict w/fields ''' rschema = OrderedDict(( ('preference', int), ('name', str), )) return _data2rec_group(rschema, rdatas, 'preference') def ptr_name(rdata): ''' Return PTR name of given IP :param rdata: IP address :return: PTR record name ''' try: return ipaddress.ip_address(rdata).reverse_pointer except ValueError: log.error( 'Unable to generate PTR record; %s is not a valid IP address', rdata ) return False def soa_rec(rdata): ''' Validate and parse DNS record data for SOA record(s) :param rdata: DNS record data :return: dict w/fields ''' rschema = OrderedDict(( ('mname', str), ('rname', str), ('serial', int), ('refresh', int), ('retry', int), ('expire', int), ('minimum', int), )) return _data2rec(rschema, rdata) def spf_rec(rdata): ''' Validate and parse DNS record data for SPF record(s) :param rdata: DNS record data :return: dict w/fields ''' spf_fields = rdata.split(' ') if not spf_fields.pop(0).startswith('v=spf'): raise ValueError('Not an SPF record') res = OrderedDict() mods = set() for mech_spec in spf_fields: if mech_spec.startswith(('exp', 'redirect')): # It's a modifier mod, val = mech_spec.split('=', 1) if mod in mods: raise KeyError('Modifier {0} can only appear once'.format(mod)) mods.add(mod) continue # TODO: Should be in something intelligent like an SPF_get # if mod == 'exp': # res[mod] = lookup(val, 'TXT', **qargs) # continue # elif mod == 'redirect': # return query(val, 'SPF', **qargs) mech = {} if mech_spec[0] in ('+', '-', '~', '?'): mech['qualifier'] = mech_spec[0] mech_spec = mech_spec[1:] if ':' in mech_spec: mech_spec, val = mech_spec.split(':', 1) elif '/' in mech_spec: idx = mech_spec.find('/') mech_spec = mech_spec[0:idx] val = mech_spec[idx:] else: val = None res[mech_spec] = mech if not val: continue elif mech_spec in ('ip4', 'ip6'): val = ipaddress.ip_interface(val) assert val.version == int(mech_spec[-1]) mech['value'] = val return res def srv_data(target, port, prio=10, weight=10): ''' Generate SRV record data :param target: :param port: :param prio: :param weight: :return: ''' return _rec2data(prio, weight, port, target) def srv_name(svc, proto='tcp', domain=None): ''' Generate SRV record name :param svc: ldap, 389 etc :param proto: tcp, udp, sctp etc. :param domain: name to append :return: ''' proto = RFC.validate(proto, RFC.SRV_PROTO) if isinstance(svc, int) or svc.isdigit(): svc = _to_port(svc) if domain: domain = '.' + domain return '_{0}._{1}{2}'.format(svc, proto, domain) def srv_rec(rdatas): ''' Validate and parse DNS record data for SRV record(s) :param rdata: DNS record data :return: dict w/fields ''' rschema = OrderedDict(( ('prio', int), ('weight', int), ('port', _to_port), ('name', str), )) return _data2rec_group(rschema, rdatas, 'prio') def sshfp_data(key_t, hash_t, pub): ''' Generate an SSHFP record :param key_t: rsa/dsa/ecdsa/ed25519 :param hash_t: sha1/sha256 :param pub: the SSH public key ''' key_t = RFC.validate(key_t, RFC.SSHFP_ALGO, 'in') hash_t = RFC.validate(hash_t, RFC.SSHFP_HASH) hasher = hashlib.new(hash_t) hasher.update( base64.b64decode(pub) ) ssh_fp = hasher.hexdigest() return _rec2data(key_t, hash_t, ssh_fp) def sshfp_rec(rdata): ''' Validate and parse DNS record data for TLSA record(s) :param rdata: DNS record data :return: dict w/fields ''' rschema = OrderedDict(( ('algorithm', RFC.SSHFP_ALGO), ('fp_hash', RFC.SSHFP_HASH), ('fingerprint', lambda val: val.lower()) # resolvers are inconsistent on this one )) return _data2rec(rschema, rdata) def tlsa_data(pub, usage, selector, matching): ''' Generate a TLSA rec :param pub: Pub key in PEM format :param usage: :param selector: :param matching: :return: TLSA data portion ''' usage = RFC.validate(usage, RFC.TLSA_USAGE) selector = RFC.validate(selector, RFC.TLSA_SELECT) matching = RFC.validate(matching, RFC.TLSA_MATCHING) pub = ssl.PEM_cert_to_DER_cert(pub.strip()) if matching == 0: cert_fp = binascii.b2a_hex(pub) else: hasher = hashlib.new(RFC.TLSA_MATCHING[matching]) hasher.update( pub ) cert_fp = hasher.hexdigest() return _rec2data(usage, selector, matching, cert_fp) def tlsa_rec(rdata): ''' Validate and parse DNS record data for TLSA record(s) :param rdata: DNS record data :return: dict w/fields ''' rschema = OrderedDict(( ('usage', RFC.TLSA_USAGE), ('selector', RFC.TLSA_SELECT), ('matching', RFC.TLSA_MATCHING), ('pub', str) )) return _data2rec(rschema, rdata) def service( svc, proto='tcp', domain=None, walk=False, secure=None ): ''' Find an SRV service in a domain or it's parents :param svc: service to find (ldap, 389, etc) :param proto: protocol the service talks (tcp, udp, etc) :param domain: domain to start search in :param walk: walk the parents if domain doesn't provide the service :param secure: only return DNSSEC-validated results :return: [ [ prio1server1, prio1server2 ], [ prio2server1, prio2server2 ], ] (the servers will already be weighted according to the SRV rules) ''' qres = query(srv_name(svc, proto, domain), 'SRV', walk=walk, secure=secure) if not qres: return False res = [] for _, recs in qres.items(): res.append(_weighted_order(recs)) return res def services(services_file='/etc/services'): ''' Parse through system-known services :return: { 'svc': [ { 'port': port 'proto': proto, 'desc': comment }, ], } ''' res = {} with salt.utils.files.fopen(services_file, 'r') as svc_defs: for svc_def in svc_defs.readlines(): svc_def = salt.utils.stringutils.to_unicode(svc_def.strip()) if not svc_def or svc_def.startswith('#'): continue elif '#' in svc_def: svc_def, comment = svc_def.split('#', 1) comment = comment.strip() else: comment = None svc_def = svc_def.split() port, proto = svc_def.pop(1).split('/') port = int(port) for name in svc_def: svc_res = res.get(name, {}) pp_res = svc_res.get(port, False) if not pp_res: svc = { 'port': port, 'proto': proto, } if comment: svc['desc'] = comment svc_res[port] = svc else: curr_proto = pp_res['proto'] if isinstance(curr_proto, (list, tuple)): curr_proto.append(proto) else: pp_res['proto'] = [curr_proto, proto] curr_desc = pp_res.get('desc', False) if comment: if not curr_desc: pp_res['desc'] = comment elif comment != curr_desc: pp_res['desc'] = '{0}, {1}'.format(curr_desc, comment) res[name] = svc_res for svc, data in res.items(): if len(data) == 1: res[svc] = data.values().pop() continue else: res[svc] = list(data.values()) return res def parse_resolv(src='/etc/resolv.conf'): ''' Parse a resolver configuration file (traditionally /etc/resolv.conf) ''' nameservers = [] ip4_nameservers = [] ip6_nameservers = [] search = [] sortlist = [] domain = '' options = [] try: with salt.utils.files.fopen(src) as src_file: # pylint: disable=too-many-nested-blocks for line in src_file: line = salt.utils.stringutils.to_unicode(line).strip().split() try: (directive, arg) = (line[0].lower(), line[1:]) # Drop everything after # or ; (comments) arg = list(itertools.takewhile(lambda x: x[0] not in ('#', ';'), arg)) if directive == 'nameserver': addr = arg[0] try: ip_addr = ipaddress.ip_address(addr) version = ip_addr.version ip_addr = str(ip_addr) if ip_addr not in nameservers: nameservers.append(ip_addr) if version == 4 and ip_addr not in ip4_nameservers: ip4_nameservers.append(ip_addr) elif version == 6 and ip_addr not in ip6_nameservers: ip6_nameservers.append(ip_addr) except ValueError as exc: log.error('%s: %s', src, exc) elif directive == 'domain': domain = arg[0] elif directive == 'search': search = arg elif directive == 'sortlist': # A sortlist is specified by IP address netmask pairs. # The netmask is optional and defaults to the natural # netmask of the net. The IP address and optional # network pairs are separated by slashes. for ip_raw in arg: try: ip_net = ipaddress.ip_network(ip_raw) except ValueError as exc: log.error('%s: %s', src, exc) else: if '/' not in ip_raw: # No netmask has been provided, guess # the "natural" one if ip_net.version == 4: ip_addr = six.text_type(ip_net.network_address) # pylint: disable=protected-access mask = salt.utils.network.natural_ipv4_netmask(ip_addr) ip_net = ipaddress.ip_network( '{0}{1}'.format(ip_addr, mask), strict=False ) if ip_net.version == 6: # TODO pass if ip_net not in sortlist: sortlist.append(ip_net) elif directive == 'options': # Options allows certain internal resolver variables to # be modified. if arg[0] not in options: options.append(arg[0]) except IndexError: continue if domain and search: # The domain and search keywords are mutually exclusive. If more # than one instance of these keywords is present, the last instance # will override. log.debug( '%s: The domain and search keywords are mutually exclusive.', src ) return { 'nameservers': nameservers, 'ip4_nameservers': ip4_nameservers, 'ip6_nameservers': ip6_nameservers, 'sortlist': [ip.with_netmask for ip in sortlist], 'domain': domain, 'search': search, 'options': options } except IOError: return {}
saltstack/salt
salt/utils/dns.py
host
python
def host(name, ip4=True, ip6=True, **kwargs): ''' Return a list of addresses for name ip6: Return IPv6 addresses ip4: Return IPv4 addresses the rest is passed on to lookup() ''' res = {} if ip6: ip6 = lookup(name, 'AAAA', **kwargs) if ip6: res['ip6'] = ip6 if ip4: ip4 = lookup(name, 'A', **kwargs) if ip4: res['ip4'] = ip4 return res
Return a list of addresses for name ip6: Return IPv6 addresses ip4: Return IPv4 addresses the rest is passed on to lookup()
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/dns.py#L726-L747
[ "def lookup(\n name,\n rdtype,\n method=None,\n servers=None,\n timeout=None,\n walk=False,\n walk_tld=False,\n secure=None\n):\n '''\n Lookup DNS records and return their data\n\n :param name: name to lookup\n :param rdtype: DNS record type\n :param method: gai (getaddrinfo()), dnspython, dig, drill, host, nslookup or auto (default)\n :param servers: (list of) server(s) to try in-order\n :param timeout: query timeout or a valiant approximation of that\n :param walk: Walk the DNS upwards looking for the record type or name/recordtype if walk='name'.\n :param walk_tld: Include the final domain in the walk\n :param secure: return only DNSSEC secured responses\n :return: [] of record data\n '''\n # opts = __opts__.get('dns', {})\n opts = {}\n method = method or opts.get('method', 'auto')\n secure = secure or opts.get('secure', None)\n servers = servers or opts.get('servers', None)\n timeout = timeout or opts.get('timeout', False)\n\n rdtype = rdtype.upper()\n\n # pylint: disable=bad-whitespace,multiple-spaces-before-keyword\n query_methods = (\n ('gai', _lookup_gai, not any((rdtype not in ('A', 'AAAA'), servers, secure))),\n ('dnspython', _lookup_dnspython, HAS_DNSPYTHON),\n ('dig', _lookup_dig, HAS_DIG),\n ('drill', _lookup_drill, HAS_DRILL),\n ('host', _lookup_host, HAS_HOST and not secure),\n ('nslookup', _lookup_nslookup, HAS_NSLOOKUP and not secure),\n )\n # pylint: enable=bad-whitespace,multiple-spaces-before-keyword\n\n try:\n if method == 'auto':\n # The first one not to bork on the conditions becomes the function\n method, resolver = next(((rname, rcb) for rname, rcb, rtest in query_methods if rtest))\n else:\n # The first one not to bork on the conditions becomes the function. And the name must match.\n resolver = next((rcb for rname, rcb, rtest in query_methods if rname == method and rtest))\n except StopIteration:\n log.error(\n 'Unable to lookup %s/%s: Resolver method %s invalid, unsupported '\n 'or unable to perform query', method, rdtype, name\n )\n return False\n\n res_kwargs = {\n 'rdtype': rdtype,\n }\n\n if servers:\n if not isinstance(servers, (list, tuple)):\n servers = [servers]\n if method in ('dnspython', 'dig', 'drill'):\n res_kwargs['servers'] = servers\n else:\n if timeout:\n timeout /= len(servers)\n\n # Inject a wrapper for multi-server behaviour\n def _multi_srvr(resolv_func):\n @functools.wraps(resolv_func)\n def _wrapper(**res_kwargs):\n for server in servers:\n s_res = resolv_func(server=server, **res_kwargs)\n if s_res:\n return s_res\n return _wrapper\n resolver = _multi_srvr(resolver)\n\n if not walk:\n name = [name]\n else:\n idx = 0\n if rdtype in ('SRV', 'TLSA'): # The only RRs I know that have 2 name components\n idx = name.find('.') + 1\n idx = name.find('.', idx) + 1\n domain = name[idx:]\n rname = name[0:idx]\n\n name = _tree(domain, walk_tld)\n if walk == 'name':\n name = [rname + domain for domain in name]\n\n if timeout:\n timeout /= len(name)\n\n if secure:\n res_kwargs['secure'] = secure\n if timeout:\n res_kwargs['timeout'] = timeout\n\n for rname in name:\n res = resolver(name=rname, **res_kwargs)\n if res:\n return res\n\n return res\n" ]
# -*- coding: utf-8 -*- ''' Compendium of generic DNS utilities # Examples: dns.lookup(name, rdtype, ...) dns.query(name, rdtype, ...) dns.srv_rec(data) dns.srv_data('my1.example.com', 389, prio=10, weight=100) dns.srv_name('ldap/tcp', 'example.com') ''' from __future__ import absolute_import, print_function, unicode_literals # Import Python libs import base64 import binascii import hashlib import itertools import logging import random import re import shlex import socket import ssl import string import functools # Import Salt libs import salt.utils.files import salt.utils.network import salt.utils.path import salt.utils.stringutils import salt.modules.cmdmod from salt._compat import ipaddress from salt.utils.odict import OrderedDict # Import 3rd-party libs from salt.ext import six from salt.ext.six.moves import zip # pylint: disable=redefined-builtin # Integrations try: import dns.resolver HAS_DNSPYTHON = True except ImportError: HAS_DNSPYTHON = False try: import tldextract HAS_TLDEXTRACT = True except ImportError: HAS_TLDEXTRACT = False HAS_DIG = salt.utils.path.which('dig') is not None DIG_OPTIONS = '+search +fail +noall +answer +nocl +nottl' HAS_DRILL = salt.utils.path.which('drill') is not None HAS_HOST = salt.utils.path.which('host') is not None HAS_NSLOOKUP = salt.utils.path.which('nslookup') is not None __salt__ = { 'cmd.run_all': salt.modules.cmdmod.run_all } log = logging.getLogger(__name__) class RFC(object): ''' Simple holding class for all RFC/IANA registered lists & standards ''' # https://tools.ietf.org/html/rfc6844#section-3 CAA_TAGS = ( 'issue', 'issuewild', 'iodef' ) # http://www.iana.org/assignments/dns-sshfp-rr-parameters/dns-sshfp-rr-parameters.xhtml SSHFP_ALGO = OrderedDict(( (1, 'rsa'), (2, 'dsa'), (3, 'ecdsa'), (4, 'ed25519'), )) SSHFP_HASH = OrderedDict(( (1, 'sha1'), (2, 'sha256'), )) # http://www.iana.org/assignments/dane-parameters/dane-parameters.xhtml TLSA_USAGE = OrderedDict(( (0, 'pkixta'), (1, 'pkixee'), (2, 'daneta'), (3, 'daneee'), )) TLSA_SELECT = OrderedDict(( (0, 'cert'), (1, 'spki'), )) TLSA_MATCHING = OrderedDict(( (0, 'full'), (1, 'sha256'), (2, 'sha512'), )) SRV_PROTO = ( 'tcp', 'udp', 'sctp' ) @staticmethod def validate(lookup, ref, match=None): if lookup in ref: return lookup elif match == 'in': return [code for code, name in ref.items() if lookup in name][-1] else: # OrderedDicts only!(?) return ref.keys()[ref.values().index(lookup)] def _to_port(port): try: port = int(port) assert 1 <= port <= 65535 return port except (ValueError, AssertionError): raise ValueError('Invalid port {0}'.format(port)) def _tree(domain, tld=False): ''' Split out a domain in its parents Leverages tldextract to take the TLDs from publicsuffix.org or makes a valiant approximation of that :param domain: dc2.ams2.example.com :param tld: Include TLD in list :return: [ 'dc2.ams2.example.com', 'ams2.example.com', 'example.com'] ''' domain = domain.rstrip('.') assert '.' in domain, 'Provide a decent domain' if not tld: if HAS_TLDEXTRACT: tld = tldextract.extract(domain).suffix else: tld = re.search(r'((?:(?:ac|biz|com?|info|edu|gov|mil|name|net|n[oi]m|org)\.)?[^.]+)$', domain).group() log.info('Without tldextract, dns.util resolves the TLD of %s to %s', domain, tld) res = [domain] while True: idx = domain.find('.') if idx < 0: break domain = domain[idx + 1:] if domain == tld: break res.append(domain) return res def _weighted_order(recs): res = [] weights = [rec['weight'] for rec in recs] while weights: rnd = random.random() * sum(weights) for i, w in enumerate(weights): rnd -= w if rnd < 0: res.append(recs.pop(i)['name']) weights.pop(i) break return res def _cast(rec_data, rec_cast): if isinstance(rec_cast, dict): rec_data = type(rec_cast.keys()[0])(rec_data) res = rec_cast[rec_data] return res elif isinstance(rec_cast, (list, tuple)): return RFC.validate(rec_data, rec_cast) else: return rec_cast(rec_data) def _data2rec(schema, rec_data): ''' schema = OrderedDict({ 'prio': int, 'weight': int, 'port': to_port, 'name': str, }) rec_data = '10 20 25 myawesome.nl' res = {'prio': 10, 'weight': 20, 'port': 25 'name': 'myawesome.nl'} ''' try: rec_fields = rec_data.split(' ') # spaces in digest fields are allowed assert len(rec_fields) >= len(schema) if len(rec_fields) > len(schema): cutoff = len(schema) - 1 rec_fields = rec_fields[0:cutoff] + [''.join(rec_fields[cutoff:])] if len(schema) == 1: res = _cast(rec_fields[0], next(iter(schema.values()))) else: res = dict(( (field_name, _cast(rec_field, rec_cast)) for (field_name, rec_cast), rec_field in zip(schema.items(), rec_fields) )) return res except (AssertionError, AttributeError, TypeError, ValueError) as e: raise ValueError('Unable to cast "{0}" as "{2}": {1}'.format( rec_data, e, ' '.join(schema.keys()) )) def _data2rec_group(schema, recs_data, group_key): if not isinstance(recs_data, (list, tuple)): recs_data = [recs_data] res = OrderedDict() try: for rdata in recs_data: rdata = _data2rec(schema, rdata) assert rdata and group_key in rdata idx = rdata.pop(group_key) if idx not in res: res[idx] = [] if len(rdata) == 1: rdata = next(iter(rdata.values())) res[idx].append(rdata) return res except (AssertionError, ValueError) as e: raise ValueError('Unable to cast "{0}" as a group of "{1}": {2}'.format( ','.join(recs_data), ' '.join(schema.keys()), e )) def _rec2data(*rdata): return ' '.join(rdata) def _data_clean(data): data = data.strip(string.whitespace) if data.startswith(('"', '\'')) and data.endswith(('"', '\'')): return data[1:-1] else: return data def _lookup_dig(name, rdtype, timeout=None, servers=None, secure=None): ''' Use dig to lookup addresses :param name: Name of record to search :param rdtype: DNS record type :param timeout: server response timeout :param servers: [] of servers to use :return: [] of records or False if error ''' cmd = 'dig {0} -t {1} '.format(DIG_OPTIONS, rdtype) if servers: cmd += ''.join(['@{0} '.format(srv) for srv in servers]) if timeout is not None: if servers: timeout = int(float(timeout) / len(servers)) else: timeout = int(timeout) cmd += '+time={0} '.format(timeout) if secure: cmd += '+dnssec +adflag ' cmd = __salt__['cmd.run_all']('{0} {1}'.format(cmd, name), python_shell=False, output_loglevel='quiet') if 'ignoring invalid type' in cmd['stderr']: raise ValueError('Invalid DNS type {}'.format(rdtype)) elif cmd['retcode'] != 0: log.warning( 'dig returned (%s): %s', cmd['retcode'], cmd['stderr'].strip(string.whitespace + ';') ) return False elif not cmd['stdout']: return [] validated = False res = [] for line in cmd['stdout'].splitlines(): _, rtype, rdata = line.split(None, 2) if rtype == 'CNAME' and rdtype != 'CNAME': continue elif rtype == 'RRSIG': validated = True continue res.append(_data_clean(rdata)) if res and secure and not validated: return False else: return res def _lookup_drill(name, rdtype, timeout=None, servers=None, secure=None): ''' Use drill to lookup addresses :param name: Name of record to search :param rdtype: DNS record type :param timeout: command return timeout :param servers: [] of servers to use :return: [] of records or False if error ''' cmd = 'drill ' if secure: cmd += '-D -o ad ' cmd += '{0} {1} '.format(rdtype, name) if servers: cmd += ''.join(['@{0} '.format(srv) for srv in servers]) cmd = __salt__['cmd.run_all']( cmd, timeout=timeout, python_shell=False, output_loglevel='quiet') if cmd['retcode'] != 0: log.warning('drill returned (%s): %s', cmd['retcode'], cmd['stderr']) return False lookup_res = iter(cmd['stdout'].splitlines()) validated = False res = [] try: line = '' while 'ANSWER SECTION' not in line: line = next(lookup_res) while True: line = next(lookup_res) line = line.strip() if not line or line.startswith(';;'): break l_type, l_rec = line.split(None, 4)[-2:] if l_type == 'CNAME' and rdtype != 'CNAME': continue elif l_type == 'RRSIG': validated = True continue elif l_type != rdtype: raise ValueError('Invalid DNS type {}'.format(rdtype)) res.append(_data_clean(l_rec)) except StopIteration: pass if res and secure and not validated: return False else: return res def _lookup_gai(name, rdtype, timeout=None): ''' Use Python's socket interface to lookup addresses :param name: Name of record to search :param rdtype: A or AAAA :param timeout: ignored :return: [] of addresses or False if error ''' try: sock_t = { 'A': socket.AF_INET, 'AAAA': socket.AF_INET6 }[rdtype] except KeyError: raise ValueError('Invalid DNS type {} for gai lookup'.format(rdtype)) if timeout: log.info('Ignoring timeout on gai resolver; fix resolv.conf to do that') try: addresses = [sock[4][0] for sock in socket.getaddrinfo(name, None, sock_t, 0, socket.SOCK_RAW)] return addresses except socket.gaierror: return False def _lookup_host(name, rdtype, timeout=None, server=None): ''' Use host to lookup addresses :param name: Name of record to search :param server: Server to query :param rdtype: DNS record type :param timeout: server response wait :return: [] of records or False if error ''' cmd = 'host -t {0} '.format(rdtype) if timeout: cmd += '-W {0} '.format(int(timeout)) cmd += name if server is not None: cmd += ' {0}'.format(server) cmd = __salt__['cmd.run_all'](cmd, python_shell=False, output_loglevel='quiet') if 'invalid type' in cmd['stderr']: raise ValueError('Invalid DNS type {}'.format(rdtype)) elif cmd['retcode'] != 0: log.warning('host returned (%s): %s', cmd['retcode'], cmd['stderr']) return False elif 'has no' in cmd['stdout']: return [] res = [] _stdout = cmd['stdout'] if server is None else cmd['stdout'].split('\n\n')[-1] for line in _stdout.splitlines(): if rdtype != 'CNAME' and 'is an alias' in line: continue line = line.split(' ', 3)[-1] for prefix in ('record', 'address', 'handled by', 'alias for'): if line.startswith(prefix): line = line[len(prefix) + 1:] break res.append(_data_clean(line)) return res def _lookup_dnspython(name, rdtype, timeout=None, servers=None, secure=None): ''' Use dnspython to lookup addresses :param name: Name of record to search :param rdtype: DNS record type :param timeout: query timeout :param server: [] of server(s) to try in order :return: [] of records or False if error ''' resolver = dns.resolver.Resolver() if timeout is not None: resolver.lifetime = float(timeout) if servers: resolver.nameservers = servers if secure: resolver.ednsflags += dns.flags.DO try: res = [_data_clean(rr.to_text()) for rr in resolver.query(name, rdtype, raise_on_no_answer=False)] return res except dns.rdatatype.UnknownRdatatype: raise ValueError('Invalid DNS type {}'.format(rdtype)) except (dns.resolver.NXDOMAIN, dns.resolver.YXDOMAIN, dns.resolver.NoNameservers, dns.exception.Timeout): return False def _lookup_nslookup(name, rdtype, timeout=None, server=None): ''' Use nslookup to lookup addresses :param name: Name of record to search :param rdtype: DNS record type :param timeout: server response timeout :param server: server to query :return: [] of records or False if error ''' cmd = 'nslookup -query={0} {1}'.format(rdtype, name) if timeout is not None: cmd += ' -timeout={0}'.format(int(timeout)) if server is not None: cmd += ' {0}'.format(server) cmd = __salt__['cmd.run_all'](cmd, python_shell=False, output_loglevel='quiet') if cmd['retcode'] != 0: log.warning( 'nslookup returned (%s): %s', cmd['retcode'], cmd['stdout'].splitlines()[-1].strip(string.whitespace + ';') ) return False lookup_res = iter(cmd['stdout'].splitlines()) res = [] try: line = next(lookup_res) if 'unknown query type' in line: raise ValueError('Invalid DNS type {}'.format(rdtype)) while True: if name in line: break line = next(lookup_res) while True: line = line.strip() if not line or line.startswith('*'): break elif rdtype != 'CNAME' and 'canonical name' in line: name = line.split()[-1][:-1] line = next(lookup_res) continue elif rdtype == 'SOA': line = line.split('=') elif line.startswith('Name:'): line = next(lookup_res) line = line.split(':', 1) elif line.startswith(name): if '=' in line: line = line.split('=', 1) else: line = line.split(' ') res.append(_data_clean(line[-1])) line = next(lookup_res) except StopIteration: pass if rdtype == 'SOA': return [' '.join(res[1:])] else: return res def lookup( name, rdtype, method=None, servers=None, timeout=None, walk=False, walk_tld=False, secure=None ): ''' Lookup DNS records and return their data :param name: name to lookup :param rdtype: DNS record type :param method: gai (getaddrinfo()), dnspython, dig, drill, host, nslookup or auto (default) :param servers: (list of) server(s) to try in-order :param timeout: query timeout or a valiant approximation of that :param walk: Walk the DNS upwards looking for the record type or name/recordtype if walk='name'. :param walk_tld: Include the final domain in the walk :param secure: return only DNSSEC secured responses :return: [] of record data ''' # opts = __opts__.get('dns', {}) opts = {} method = method or opts.get('method', 'auto') secure = secure or opts.get('secure', None) servers = servers or opts.get('servers', None) timeout = timeout or opts.get('timeout', False) rdtype = rdtype.upper() # pylint: disable=bad-whitespace,multiple-spaces-before-keyword query_methods = ( ('gai', _lookup_gai, not any((rdtype not in ('A', 'AAAA'), servers, secure))), ('dnspython', _lookup_dnspython, HAS_DNSPYTHON), ('dig', _lookup_dig, HAS_DIG), ('drill', _lookup_drill, HAS_DRILL), ('host', _lookup_host, HAS_HOST and not secure), ('nslookup', _lookup_nslookup, HAS_NSLOOKUP and not secure), ) # pylint: enable=bad-whitespace,multiple-spaces-before-keyword try: if method == 'auto': # The first one not to bork on the conditions becomes the function method, resolver = next(((rname, rcb) for rname, rcb, rtest in query_methods if rtest)) else: # The first one not to bork on the conditions becomes the function. And the name must match. resolver = next((rcb for rname, rcb, rtest in query_methods if rname == method and rtest)) except StopIteration: log.error( 'Unable to lookup %s/%s: Resolver method %s invalid, unsupported ' 'or unable to perform query', method, rdtype, name ) return False res_kwargs = { 'rdtype': rdtype, } if servers: if not isinstance(servers, (list, tuple)): servers = [servers] if method in ('dnspython', 'dig', 'drill'): res_kwargs['servers'] = servers else: if timeout: timeout /= len(servers) # Inject a wrapper for multi-server behaviour def _multi_srvr(resolv_func): @functools.wraps(resolv_func) def _wrapper(**res_kwargs): for server in servers: s_res = resolv_func(server=server, **res_kwargs) if s_res: return s_res return _wrapper resolver = _multi_srvr(resolver) if not walk: name = [name] else: idx = 0 if rdtype in ('SRV', 'TLSA'): # The only RRs I know that have 2 name components idx = name.find('.') + 1 idx = name.find('.', idx) + 1 domain = name[idx:] rname = name[0:idx] name = _tree(domain, walk_tld) if walk == 'name': name = [rname + domain for domain in name] if timeout: timeout /= len(name) if secure: res_kwargs['secure'] = secure if timeout: res_kwargs['timeout'] = timeout for rname in name: res = resolver(name=rname, **res_kwargs) if res: return res return res def query( name, rdtype, method=None, servers=None, timeout=None, walk=False, walk_tld=False, secure=None ): ''' Query DNS for information. Where `lookup()` returns record data, `query()` tries to interpret the data and return it's results :param name: name to lookup :param rdtype: DNS record type :param method: gai (getaddrinfo()), pydns, dig, drill, host, nslookup or auto (default) :param servers: (list of) server(s) to try in-order :param timeout: query timeout or a valiant approximation of that :param secure: return only DNSSEC secured response :param walk: Walk the DNS upwards looking for the record type or name/recordtype if walk='name'. :param walk_tld: Include the top-level domain in the walk :return: [] of records ''' rdtype = rdtype.upper() qargs = { 'method': method, 'servers': servers, 'timeout': timeout, 'walk': walk, 'walk_tld': walk_tld, 'secure': secure } if rdtype == 'PTR' and not name.endswith('arpa'): name = ptr_name(name) if rdtype == 'SPF': # 'SPF' has become a regular 'TXT' again qres = [answer for answer in lookup(name, 'TXT', **qargs) if answer.startswith('v=spf')] if not qres: qres = lookup(name, rdtype, **qargs) else: qres = lookup(name, rdtype, **qargs) rec_map = { 'A': a_rec, 'AAAA': aaaa_rec, 'CAA': caa_rec, 'MX': mx_rec, 'SOA': soa_rec, 'SPF': spf_rec, 'SRV': srv_rec, 'SSHFP': sshfp_rec, 'TLSA': tlsa_rec, } if not qres or rdtype not in rec_map: return qres elif rdtype in ('A', 'AAAA', 'SSHFP', 'TLSA'): res = [rec_map[rdtype](res) for res in qres] elif rdtype in ('SOA', 'SPF'): res = rec_map[rdtype](qres[0]) else: res = rec_map[rdtype](qres) return res def a_rec(rdata): ''' Validate and parse DNS record data for an A record :param rdata: DNS record data :return: { 'address': ip } ''' rschema = OrderedDict(( ('address', ipaddress.IPv4Address), )) return _data2rec(rschema, rdata) def aaaa_rec(rdata): ''' Validate and parse DNS record data for an AAAA record :param rdata: DNS record data :return: { 'address': ip } ''' rschema = OrderedDict(( ('address', ipaddress.IPv6Address), )) return _data2rec(rschema, rdata) def caa_rec(rdatas): ''' Validate and parse DNS record data for a CAA record :param rdata: DNS record data :return: dict w/fields ''' rschema = OrderedDict(( ('flags', lambda flag: ['critical'] if int(flag) > 0 else []), ('tag', RFC.CAA_TAGS), ('value', lambda val: val.strip('\',"')) )) res = _data2rec_group(rschema, rdatas, 'tag') for tag in ('issue', 'issuewild'): tag_res = res.get(tag, False) if not tag_res: continue for idx, val in enumerate(tag_res): if ';' not in val: continue val, params = val.split(';', 1) params = dict(param.split('=') for param in shlex.split(params)) tag_res[idx] = {val: params} return res def mx_data(target, preference=10): ''' Generate MX record data :param target: server :param preference: preference number :return: DNS record data ''' return _rec2data(int(preference), target) def mx_rec(rdatas): ''' Validate and parse DNS record data for MX record(s) :param rdata: DNS record data :return: dict w/fields ''' rschema = OrderedDict(( ('preference', int), ('name', str), )) return _data2rec_group(rschema, rdatas, 'preference') def ptr_name(rdata): ''' Return PTR name of given IP :param rdata: IP address :return: PTR record name ''' try: return ipaddress.ip_address(rdata).reverse_pointer except ValueError: log.error( 'Unable to generate PTR record; %s is not a valid IP address', rdata ) return False def soa_rec(rdata): ''' Validate and parse DNS record data for SOA record(s) :param rdata: DNS record data :return: dict w/fields ''' rschema = OrderedDict(( ('mname', str), ('rname', str), ('serial', int), ('refresh', int), ('retry', int), ('expire', int), ('minimum', int), )) return _data2rec(rschema, rdata) def spf_rec(rdata): ''' Validate and parse DNS record data for SPF record(s) :param rdata: DNS record data :return: dict w/fields ''' spf_fields = rdata.split(' ') if not spf_fields.pop(0).startswith('v=spf'): raise ValueError('Not an SPF record') res = OrderedDict() mods = set() for mech_spec in spf_fields: if mech_spec.startswith(('exp', 'redirect')): # It's a modifier mod, val = mech_spec.split('=', 1) if mod in mods: raise KeyError('Modifier {0} can only appear once'.format(mod)) mods.add(mod) continue # TODO: Should be in something intelligent like an SPF_get # if mod == 'exp': # res[mod] = lookup(val, 'TXT', **qargs) # continue # elif mod == 'redirect': # return query(val, 'SPF', **qargs) mech = {} if mech_spec[0] in ('+', '-', '~', '?'): mech['qualifier'] = mech_spec[0] mech_spec = mech_spec[1:] if ':' in mech_spec: mech_spec, val = mech_spec.split(':', 1) elif '/' in mech_spec: idx = mech_spec.find('/') mech_spec = mech_spec[0:idx] val = mech_spec[idx:] else: val = None res[mech_spec] = mech if not val: continue elif mech_spec in ('ip4', 'ip6'): val = ipaddress.ip_interface(val) assert val.version == int(mech_spec[-1]) mech['value'] = val return res def srv_data(target, port, prio=10, weight=10): ''' Generate SRV record data :param target: :param port: :param prio: :param weight: :return: ''' return _rec2data(prio, weight, port, target) def srv_name(svc, proto='tcp', domain=None): ''' Generate SRV record name :param svc: ldap, 389 etc :param proto: tcp, udp, sctp etc. :param domain: name to append :return: ''' proto = RFC.validate(proto, RFC.SRV_PROTO) if isinstance(svc, int) or svc.isdigit(): svc = _to_port(svc) if domain: domain = '.' + domain return '_{0}._{1}{2}'.format(svc, proto, domain) def srv_rec(rdatas): ''' Validate and parse DNS record data for SRV record(s) :param rdata: DNS record data :return: dict w/fields ''' rschema = OrderedDict(( ('prio', int), ('weight', int), ('port', _to_port), ('name', str), )) return _data2rec_group(rschema, rdatas, 'prio') def sshfp_data(key_t, hash_t, pub): ''' Generate an SSHFP record :param key_t: rsa/dsa/ecdsa/ed25519 :param hash_t: sha1/sha256 :param pub: the SSH public key ''' key_t = RFC.validate(key_t, RFC.SSHFP_ALGO, 'in') hash_t = RFC.validate(hash_t, RFC.SSHFP_HASH) hasher = hashlib.new(hash_t) hasher.update( base64.b64decode(pub) ) ssh_fp = hasher.hexdigest() return _rec2data(key_t, hash_t, ssh_fp) def sshfp_rec(rdata): ''' Validate and parse DNS record data for TLSA record(s) :param rdata: DNS record data :return: dict w/fields ''' rschema = OrderedDict(( ('algorithm', RFC.SSHFP_ALGO), ('fp_hash', RFC.SSHFP_HASH), ('fingerprint', lambda val: val.lower()) # resolvers are inconsistent on this one )) return _data2rec(rschema, rdata) def tlsa_data(pub, usage, selector, matching): ''' Generate a TLSA rec :param pub: Pub key in PEM format :param usage: :param selector: :param matching: :return: TLSA data portion ''' usage = RFC.validate(usage, RFC.TLSA_USAGE) selector = RFC.validate(selector, RFC.TLSA_SELECT) matching = RFC.validate(matching, RFC.TLSA_MATCHING) pub = ssl.PEM_cert_to_DER_cert(pub.strip()) if matching == 0: cert_fp = binascii.b2a_hex(pub) else: hasher = hashlib.new(RFC.TLSA_MATCHING[matching]) hasher.update( pub ) cert_fp = hasher.hexdigest() return _rec2data(usage, selector, matching, cert_fp) def tlsa_rec(rdata): ''' Validate and parse DNS record data for TLSA record(s) :param rdata: DNS record data :return: dict w/fields ''' rschema = OrderedDict(( ('usage', RFC.TLSA_USAGE), ('selector', RFC.TLSA_SELECT), ('matching', RFC.TLSA_MATCHING), ('pub', str) )) return _data2rec(rschema, rdata) def service( svc, proto='tcp', domain=None, walk=False, secure=None ): ''' Find an SRV service in a domain or it's parents :param svc: service to find (ldap, 389, etc) :param proto: protocol the service talks (tcp, udp, etc) :param domain: domain to start search in :param walk: walk the parents if domain doesn't provide the service :param secure: only return DNSSEC-validated results :return: [ [ prio1server1, prio1server2 ], [ prio2server1, prio2server2 ], ] (the servers will already be weighted according to the SRV rules) ''' qres = query(srv_name(svc, proto, domain), 'SRV', walk=walk, secure=secure) if not qres: return False res = [] for _, recs in qres.items(): res.append(_weighted_order(recs)) return res def services(services_file='/etc/services'): ''' Parse through system-known services :return: { 'svc': [ { 'port': port 'proto': proto, 'desc': comment }, ], } ''' res = {} with salt.utils.files.fopen(services_file, 'r') as svc_defs: for svc_def in svc_defs.readlines(): svc_def = salt.utils.stringutils.to_unicode(svc_def.strip()) if not svc_def or svc_def.startswith('#'): continue elif '#' in svc_def: svc_def, comment = svc_def.split('#', 1) comment = comment.strip() else: comment = None svc_def = svc_def.split() port, proto = svc_def.pop(1).split('/') port = int(port) for name in svc_def: svc_res = res.get(name, {}) pp_res = svc_res.get(port, False) if not pp_res: svc = { 'port': port, 'proto': proto, } if comment: svc['desc'] = comment svc_res[port] = svc else: curr_proto = pp_res['proto'] if isinstance(curr_proto, (list, tuple)): curr_proto.append(proto) else: pp_res['proto'] = [curr_proto, proto] curr_desc = pp_res.get('desc', False) if comment: if not curr_desc: pp_res['desc'] = comment elif comment != curr_desc: pp_res['desc'] = '{0}, {1}'.format(curr_desc, comment) res[name] = svc_res for svc, data in res.items(): if len(data) == 1: res[svc] = data.values().pop() continue else: res[svc] = list(data.values()) return res def parse_resolv(src='/etc/resolv.conf'): ''' Parse a resolver configuration file (traditionally /etc/resolv.conf) ''' nameservers = [] ip4_nameservers = [] ip6_nameservers = [] search = [] sortlist = [] domain = '' options = [] try: with salt.utils.files.fopen(src) as src_file: # pylint: disable=too-many-nested-blocks for line in src_file: line = salt.utils.stringutils.to_unicode(line).strip().split() try: (directive, arg) = (line[0].lower(), line[1:]) # Drop everything after # or ; (comments) arg = list(itertools.takewhile(lambda x: x[0] not in ('#', ';'), arg)) if directive == 'nameserver': addr = arg[0] try: ip_addr = ipaddress.ip_address(addr) version = ip_addr.version ip_addr = str(ip_addr) if ip_addr not in nameservers: nameservers.append(ip_addr) if version == 4 and ip_addr not in ip4_nameservers: ip4_nameservers.append(ip_addr) elif version == 6 and ip_addr not in ip6_nameservers: ip6_nameservers.append(ip_addr) except ValueError as exc: log.error('%s: %s', src, exc) elif directive == 'domain': domain = arg[0] elif directive == 'search': search = arg elif directive == 'sortlist': # A sortlist is specified by IP address netmask pairs. # The netmask is optional and defaults to the natural # netmask of the net. The IP address and optional # network pairs are separated by slashes. for ip_raw in arg: try: ip_net = ipaddress.ip_network(ip_raw) except ValueError as exc: log.error('%s: %s', src, exc) else: if '/' not in ip_raw: # No netmask has been provided, guess # the "natural" one if ip_net.version == 4: ip_addr = six.text_type(ip_net.network_address) # pylint: disable=protected-access mask = salt.utils.network.natural_ipv4_netmask(ip_addr) ip_net = ipaddress.ip_network( '{0}{1}'.format(ip_addr, mask), strict=False ) if ip_net.version == 6: # TODO pass if ip_net not in sortlist: sortlist.append(ip_net) elif directive == 'options': # Options allows certain internal resolver variables to # be modified. if arg[0] not in options: options.append(arg[0]) except IndexError: continue if domain and search: # The domain and search keywords are mutually exclusive. If more # than one instance of these keywords is present, the last instance # will override. log.debug( '%s: The domain and search keywords are mutually exclusive.', src ) return { 'nameservers': nameservers, 'ip4_nameservers': ip4_nameservers, 'ip6_nameservers': ip6_nameservers, 'sortlist': [ip.with_netmask for ip in sortlist], 'domain': domain, 'search': search, 'options': options } except IOError: return {}
saltstack/salt
salt/utils/dns.py
caa_rec
python
def caa_rec(rdatas): ''' Validate and parse DNS record data for a CAA record :param rdata: DNS record data :return: dict w/fields ''' rschema = OrderedDict(( ('flags', lambda flag: ['critical'] if int(flag) > 0 else []), ('tag', RFC.CAA_TAGS), ('value', lambda val: val.strip('\',"')) )) res = _data2rec_group(rschema, rdatas, 'tag') for tag in ('issue', 'issuewild'): tag_res = res.get(tag, False) if not tag_res: continue for idx, val in enumerate(tag_res): if ';' not in val: continue val, params = val.split(';', 1) params = dict(param.split('=') for param in shlex.split(params)) tag_res[idx] = {val: params} return res
Validate and parse DNS record data for a CAA record :param rdata: DNS record data :return: dict w/fields
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/dns.py#L774-L799
[ "def _data2rec_group(schema, recs_data, group_key):\n if not isinstance(recs_data, (list, tuple)):\n recs_data = [recs_data]\n\n res = OrderedDict()\n\n try:\n for rdata in recs_data:\n rdata = _data2rec(schema, rdata)\n assert rdata and group_key in rdata\n\n idx = rdata.pop(group_key)\n if idx not in res:\n res[idx] = []\n\n if len(rdata) == 1:\n rdata = next(iter(rdata.values()))\n\n res[idx].append(rdata)\n return res\n except (AssertionError, ValueError) as e:\n raise ValueError('Unable to cast \"{0}\" as a group of \"{1}\": {2}'.format(\n ','.join(recs_data),\n ' '.join(schema.keys()),\n e\n ))\n" ]
# -*- coding: utf-8 -*- ''' Compendium of generic DNS utilities # Examples: dns.lookup(name, rdtype, ...) dns.query(name, rdtype, ...) dns.srv_rec(data) dns.srv_data('my1.example.com', 389, prio=10, weight=100) dns.srv_name('ldap/tcp', 'example.com') ''' from __future__ import absolute_import, print_function, unicode_literals # Import Python libs import base64 import binascii import hashlib import itertools import logging import random import re import shlex import socket import ssl import string import functools # Import Salt libs import salt.utils.files import salt.utils.network import salt.utils.path import salt.utils.stringutils import salt.modules.cmdmod from salt._compat import ipaddress from salt.utils.odict import OrderedDict # Import 3rd-party libs from salt.ext import six from salt.ext.six.moves import zip # pylint: disable=redefined-builtin # Integrations try: import dns.resolver HAS_DNSPYTHON = True except ImportError: HAS_DNSPYTHON = False try: import tldextract HAS_TLDEXTRACT = True except ImportError: HAS_TLDEXTRACT = False HAS_DIG = salt.utils.path.which('dig') is not None DIG_OPTIONS = '+search +fail +noall +answer +nocl +nottl' HAS_DRILL = salt.utils.path.which('drill') is not None HAS_HOST = salt.utils.path.which('host') is not None HAS_NSLOOKUP = salt.utils.path.which('nslookup') is not None __salt__ = { 'cmd.run_all': salt.modules.cmdmod.run_all } log = logging.getLogger(__name__) class RFC(object): ''' Simple holding class for all RFC/IANA registered lists & standards ''' # https://tools.ietf.org/html/rfc6844#section-3 CAA_TAGS = ( 'issue', 'issuewild', 'iodef' ) # http://www.iana.org/assignments/dns-sshfp-rr-parameters/dns-sshfp-rr-parameters.xhtml SSHFP_ALGO = OrderedDict(( (1, 'rsa'), (2, 'dsa'), (3, 'ecdsa'), (4, 'ed25519'), )) SSHFP_HASH = OrderedDict(( (1, 'sha1'), (2, 'sha256'), )) # http://www.iana.org/assignments/dane-parameters/dane-parameters.xhtml TLSA_USAGE = OrderedDict(( (0, 'pkixta'), (1, 'pkixee'), (2, 'daneta'), (3, 'daneee'), )) TLSA_SELECT = OrderedDict(( (0, 'cert'), (1, 'spki'), )) TLSA_MATCHING = OrderedDict(( (0, 'full'), (1, 'sha256'), (2, 'sha512'), )) SRV_PROTO = ( 'tcp', 'udp', 'sctp' ) @staticmethod def validate(lookup, ref, match=None): if lookup in ref: return lookup elif match == 'in': return [code for code, name in ref.items() if lookup in name][-1] else: # OrderedDicts only!(?) return ref.keys()[ref.values().index(lookup)] def _to_port(port): try: port = int(port) assert 1 <= port <= 65535 return port except (ValueError, AssertionError): raise ValueError('Invalid port {0}'.format(port)) def _tree(domain, tld=False): ''' Split out a domain in its parents Leverages tldextract to take the TLDs from publicsuffix.org or makes a valiant approximation of that :param domain: dc2.ams2.example.com :param tld: Include TLD in list :return: [ 'dc2.ams2.example.com', 'ams2.example.com', 'example.com'] ''' domain = domain.rstrip('.') assert '.' in domain, 'Provide a decent domain' if not tld: if HAS_TLDEXTRACT: tld = tldextract.extract(domain).suffix else: tld = re.search(r'((?:(?:ac|biz|com?|info|edu|gov|mil|name|net|n[oi]m|org)\.)?[^.]+)$', domain).group() log.info('Without tldextract, dns.util resolves the TLD of %s to %s', domain, tld) res = [domain] while True: idx = domain.find('.') if idx < 0: break domain = domain[idx + 1:] if domain == tld: break res.append(domain) return res def _weighted_order(recs): res = [] weights = [rec['weight'] for rec in recs] while weights: rnd = random.random() * sum(weights) for i, w in enumerate(weights): rnd -= w if rnd < 0: res.append(recs.pop(i)['name']) weights.pop(i) break return res def _cast(rec_data, rec_cast): if isinstance(rec_cast, dict): rec_data = type(rec_cast.keys()[0])(rec_data) res = rec_cast[rec_data] return res elif isinstance(rec_cast, (list, tuple)): return RFC.validate(rec_data, rec_cast) else: return rec_cast(rec_data) def _data2rec(schema, rec_data): ''' schema = OrderedDict({ 'prio': int, 'weight': int, 'port': to_port, 'name': str, }) rec_data = '10 20 25 myawesome.nl' res = {'prio': 10, 'weight': 20, 'port': 25 'name': 'myawesome.nl'} ''' try: rec_fields = rec_data.split(' ') # spaces in digest fields are allowed assert len(rec_fields) >= len(schema) if len(rec_fields) > len(schema): cutoff = len(schema) - 1 rec_fields = rec_fields[0:cutoff] + [''.join(rec_fields[cutoff:])] if len(schema) == 1: res = _cast(rec_fields[0], next(iter(schema.values()))) else: res = dict(( (field_name, _cast(rec_field, rec_cast)) for (field_name, rec_cast), rec_field in zip(schema.items(), rec_fields) )) return res except (AssertionError, AttributeError, TypeError, ValueError) as e: raise ValueError('Unable to cast "{0}" as "{2}": {1}'.format( rec_data, e, ' '.join(schema.keys()) )) def _data2rec_group(schema, recs_data, group_key): if not isinstance(recs_data, (list, tuple)): recs_data = [recs_data] res = OrderedDict() try: for rdata in recs_data: rdata = _data2rec(schema, rdata) assert rdata and group_key in rdata idx = rdata.pop(group_key) if idx not in res: res[idx] = [] if len(rdata) == 1: rdata = next(iter(rdata.values())) res[idx].append(rdata) return res except (AssertionError, ValueError) as e: raise ValueError('Unable to cast "{0}" as a group of "{1}": {2}'.format( ','.join(recs_data), ' '.join(schema.keys()), e )) def _rec2data(*rdata): return ' '.join(rdata) def _data_clean(data): data = data.strip(string.whitespace) if data.startswith(('"', '\'')) and data.endswith(('"', '\'')): return data[1:-1] else: return data def _lookup_dig(name, rdtype, timeout=None, servers=None, secure=None): ''' Use dig to lookup addresses :param name: Name of record to search :param rdtype: DNS record type :param timeout: server response timeout :param servers: [] of servers to use :return: [] of records or False if error ''' cmd = 'dig {0} -t {1} '.format(DIG_OPTIONS, rdtype) if servers: cmd += ''.join(['@{0} '.format(srv) for srv in servers]) if timeout is not None: if servers: timeout = int(float(timeout) / len(servers)) else: timeout = int(timeout) cmd += '+time={0} '.format(timeout) if secure: cmd += '+dnssec +adflag ' cmd = __salt__['cmd.run_all']('{0} {1}'.format(cmd, name), python_shell=False, output_loglevel='quiet') if 'ignoring invalid type' in cmd['stderr']: raise ValueError('Invalid DNS type {}'.format(rdtype)) elif cmd['retcode'] != 0: log.warning( 'dig returned (%s): %s', cmd['retcode'], cmd['stderr'].strip(string.whitespace + ';') ) return False elif not cmd['stdout']: return [] validated = False res = [] for line in cmd['stdout'].splitlines(): _, rtype, rdata = line.split(None, 2) if rtype == 'CNAME' and rdtype != 'CNAME': continue elif rtype == 'RRSIG': validated = True continue res.append(_data_clean(rdata)) if res and secure and not validated: return False else: return res def _lookup_drill(name, rdtype, timeout=None, servers=None, secure=None): ''' Use drill to lookup addresses :param name: Name of record to search :param rdtype: DNS record type :param timeout: command return timeout :param servers: [] of servers to use :return: [] of records or False if error ''' cmd = 'drill ' if secure: cmd += '-D -o ad ' cmd += '{0} {1} '.format(rdtype, name) if servers: cmd += ''.join(['@{0} '.format(srv) for srv in servers]) cmd = __salt__['cmd.run_all']( cmd, timeout=timeout, python_shell=False, output_loglevel='quiet') if cmd['retcode'] != 0: log.warning('drill returned (%s): %s', cmd['retcode'], cmd['stderr']) return False lookup_res = iter(cmd['stdout'].splitlines()) validated = False res = [] try: line = '' while 'ANSWER SECTION' not in line: line = next(lookup_res) while True: line = next(lookup_res) line = line.strip() if not line or line.startswith(';;'): break l_type, l_rec = line.split(None, 4)[-2:] if l_type == 'CNAME' and rdtype != 'CNAME': continue elif l_type == 'RRSIG': validated = True continue elif l_type != rdtype: raise ValueError('Invalid DNS type {}'.format(rdtype)) res.append(_data_clean(l_rec)) except StopIteration: pass if res and secure and not validated: return False else: return res def _lookup_gai(name, rdtype, timeout=None): ''' Use Python's socket interface to lookup addresses :param name: Name of record to search :param rdtype: A or AAAA :param timeout: ignored :return: [] of addresses or False if error ''' try: sock_t = { 'A': socket.AF_INET, 'AAAA': socket.AF_INET6 }[rdtype] except KeyError: raise ValueError('Invalid DNS type {} for gai lookup'.format(rdtype)) if timeout: log.info('Ignoring timeout on gai resolver; fix resolv.conf to do that') try: addresses = [sock[4][0] for sock in socket.getaddrinfo(name, None, sock_t, 0, socket.SOCK_RAW)] return addresses except socket.gaierror: return False def _lookup_host(name, rdtype, timeout=None, server=None): ''' Use host to lookup addresses :param name: Name of record to search :param server: Server to query :param rdtype: DNS record type :param timeout: server response wait :return: [] of records or False if error ''' cmd = 'host -t {0} '.format(rdtype) if timeout: cmd += '-W {0} '.format(int(timeout)) cmd += name if server is not None: cmd += ' {0}'.format(server) cmd = __salt__['cmd.run_all'](cmd, python_shell=False, output_loglevel='quiet') if 'invalid type' in cmd['stderr']: raise ValueError('Invalid DNS type {}'.format(rdtype)) elif cmd['retcode'] != 0: log.warning('host returned (%s): %s', cmd['retcode'], cmd['stderr']) return False elif 'has no' in cmd['stdout']: return [] res = [] _stdout = cmd['stdout'] if server is None else cmd['stdout'].split('\n\n')[-1] for line in _stdout.splitlines(): if rdtype != 'CNAME' and 'is an alias' in line: continue line = line.split(' ', 3)[-1] for prefix in ('record', 'address', 'handled by', 'alias for'): if line.startswith(prefix): line = line[len(prefix) + 1:] break res.append(_data_clean(line)) return res def _lookup_dnspython(name, rdtype, timeout=None, servers=None, secure=None): ''' Use dnspython to lookup addresses :param name: Name of record to search :param rdtype: DNS record type :param timeout: query timeout :param server: [] of server(s) to try in order :return: [] of records or False if error ''' resolver = dns.resolver.Resolver() if timeout is not None: resolver.lifetime = float(timeout) if servers: resolver.nameservers = servers if secure: resolver.ednsflags += dns.flags.DO try: res = [_data_clean(rr.to_text()) for rr in resolver.query(name, rdtype, raise_on_no_answer=False)] return res except dns.rdatatype.UnknownRdatatype: raise ValueError('Invalid DNS type {}'.format(rdtype)) except (dns.resolver.NXDOMAIN, dns.resolver.YXDOMAIN, dns.resolver.NoNameservers, dns.exception.Timeout): return False def _lookup_nslookup(name, rdtype, timeout=None, server=None): ''' Use nslookup to lookup addresses :param name: Name of record to search :param rdtype: DNS record type :param timeout: server response timeout :param server: server to query :return: [] of records or False if error ''' cmd = 'nslookup -query={0} {1}'.format(rdtype, name) if timeout is not None: cmd += ' -timeout={0}'.format(int(timeout)) if server is not None: cmd += ' {0}'.format(server) cmd = __salt__['cmd.run_all'](cmd, python_shell=False, output_loglevel='quiet') if cmd['retcode'] != 0: log.warning( 'nslookup returned (%s): %s', cmd['retcode'], cmd['stdout'].splitlines()[-1].strip(string.whitespace + ';') ) return False lookup_res = iter(cmd['stdout'].splitlines()) res = [] try: line = next(lookup_res) if 'unknown query type' in line: raise ValueError('Invalid DNS type {}'.format(rdtype)) while True: if name in line: break line = next(lookup_res) while True: line = line.strip() if not line or line.startswith('*'): break elif rdtype != 'CNAME' and 'canonical name' in line: name = line.split()[-1][:-1] line = next(lookup_res) continue elif rdtype == 'SOA': line = line.split('=') elif line.startswith('Name:'): line = next(lookup_res) line = line.split(':', 1) elif line.startswith(name): if '=' in line: line = line.split('=', 1) else: line = line.split(' ') res.append(_data_clean(line[-1])) line = next(lookup_res) except StopIteration: pass if rdtype == 'SOA': return [' '.join(res[1:])] else: return res def lookup( name, rdtype, method=None, servers=None, timeout=None, walk=False, walk_tld=False, secure=None ): ''' Lookup DNS records and return their data :param name: name to lookup :param rdtype: DNS record type :param method: gai (getaddrinfo()), dnspython, dig, drill, host, nslookup or auto (default) :param servers: (list of) server(s) to try in-order :param timeout: query timeout or a valiant approximation of that :param walk: Walk the DNS upwards looking for the record type or name/recordtype if walk='name'. :param walk_tld: Include the final domain in the walk :param secure: return only DNSSEC secured responses :return: [] of record data ''' # opts = __opts__.get('dns', {}) opts = {} method = method or opts.get('method', 'auto') secure = secure or opts.get('secure', None) servers = servers or opts.get('servers', None) timeout = timeout or opts.get('timeout', False) rdtype = rdtype.upper() # pylint: disable=bad-whitespace,multiple-spaces-before-keyword query_methods = ( ('gai', _lookup_gai, not any((rdtype not in ('A', 'AAAA'), servers, secure))), ('dnspython', _lookup_dnspython, HAS_DNSPYTHON), ('dig', _lookup_dig, HAS_DIG), ('drill', _lookup_drill, HAS_DRILL), ('host', _lookup_host, HAS_HOST and not secure), ('nslookup', _lookup_nslookup, HAS_NSLOOKUP and not secure), ) # pylint: enable=bad-whitespace,multiple-spaces-before-keyword try: if method == 'auto': # The first one not to bork on the conditions becomes the function method, resolver = next(((rname, rcb) for rname, rcb, rtest in query_methods if rtest)) else: # The first one not to bork on the conditions becomes the function. And the name must match. resolver = next((rcb for rname, rcb, rtest in query_methods if rname == method and rtest)) except StopIteration: log.error( 'Unable to lookup %s/%s: Resolver method %s invalid, unsupported ' 'or unable to perform query', method, rdtype, name ) return False res_kwargs = { 'rdtype': rdtype, } if servers: if not isinstance(servers, (list, tuple)): servers = [servers] if method in ('dnspython', 'dig', 'drill'): res_kwargs['servers'] = servers else: if timeout: timeout /= len(servers) # Inject a wrapper for multi-server behaviour def _multi_srvr(resolv_func): @functools.wraps(resolv_func) def _wrapper(**res_kwargs): for server in servers: s_res = resolv_func(server=server, **res_kwargs) if s_res: return s_res return _wrapper resolver = _multi_srvr(resolver) if not walk: name = [name] else: idx = 0 if rdtype in ('SRV', 'TLSA'): # The only RRs I know that have 2 name components idx = name.find('.') + 1 idx = name.find('.', idx) + 1 domain = name[idx:] rname = name[0:idx] name = _tree(domain, walk_tld) if walk == 'name': name = [rname + domain for domain in name] if timeout: timeout /= len(name) if secure: res_kwargs['secure'] = secure if timeout: res_kwargs['timeout'] = timeout for rname in name: res = resolver(name=rname, **res_kwargs) if res: return res return res def query( name, rdtype, method=None, servers=None, timeout=None, walk=False, walk_tld=False, secure=None ): ''' Query DNS for information. Where `lookup()` returns record data, `query()` tries to interpret the data and return it's results :param name: name to lookup :param rdtype: DNS record type :param method: gai (getaddrinfo()), pydns, dig, drill, host, nslookup or auto (default) :param servers: (list of) server(s) to try in-order :param timeout: query timeout or a valiant approximation of that :param secure: return only DNSSEC secured response :param walk: Walk the DNS upwards looking for the record type or name/recordtype if walk='name'. :param walk_tld: Include the top-level domain in the walk :return: [] of records ''' rdtype = rdtype.upper() qargs = { 'method': method, 'servers': servers, 'timeout': timeout, 'walk': walk, 'walk_tld': walk_tld, 'secure': secure } if rdtype == 'PTR' and not name.endswith('arpa'): name = ptr_name(name) if rdtype == 'SPF': # 'SPF' has become a regular 'TXT' again qres = [answer for answer in lookup(name, 'TXT', **qargs) if answer.startswith('v=spf')] if not qres: qres = lookup(name, rdtype, **qargs) else: qres = lookup(name, rdtype, **qargs) rec_map = { 'A': a_rec, 'AAAA': aaaa_rec, 'CAA': caa_rec, 'MX': mx_rec, 'SOA': soa_rec, 'SPF': spf_rec, 'SRV': srv_rec, 'SSHFP': sshfp_rec, 'TLSA': tlsa_rec, } if not qres or rdtype not in rec_map: return qres elif rdtype in ('A', 'AAAA', 'SSHFP', 'TLSA'): res = [rec_map[rdtype](res) for res in qres] elif rdtype in ('SOA', 'SPF'): res = rec_map[rdtype](qres[0]) else: res = rec_map[rdtype](qres) return res def host(name, ip4=True, ip6=True, **kwargs): ''' Return a list of addresses for name ip6: Return IPv6 addresses ip4: Return IPv4 addresses the rest is passed on to lookup() ''' res = {} if ip6: ip6 = lookup(name, 'AAAA', **kwargs) if ip6: res['ip6'] = ip6 if ip4: ip4 = lookup(name, 'A', **kwargs) if ip4: res['ip4'] = ip4 return res def a_rec(rdata): ''' Validate and parse DNS record data for an A record :param rdata: DNS record data :return: { 'address': ip } ''' rschema = OrderedDict(( ('address', ipaddress.IPv4Address), )) return _data2rec(rschema, rdata) def aaaa_rec(rdata): ''' Validate and parse DNS record data for an AAAA record :param rdata: DNS record data :return: { 'address': ip } ''' rschema = OrderedDict(( ('address', ipaddress.IPv6Address), )) return _data2rec(rschema, rdata) def mx_data(target, preference=10): ''' Generate MX record data :param target: server :param preference: preference number :return: DNS record data ''' return _rec2data(int(preference), target) def mx_rec(rdatas): ''' Validate and parse DNS record data for MX record(s) :param rdata: DNS record data :return: dict w/fields ''' rschema = OrderedDict(( ('preference', int), ('name', str), )) return _data2rec_group(rschema, rdatas, 'preference') def ptr_name(rdata): ''' Return PTR name of given IP :param rdata: IP address :return: PTR record name ''' try: return ipaddress.ip_address(rdata).reverse_pointer except ValueError: log.error( 'Unable to generate PTR record; %s is not a valid IP address', rdata ) return False def soa_rec(rdata): ''' Validate and parse DNS record data for SOA record(s) :param rdata: DNS record data :return: dict w/fields ''' rschema = OrderedDict(( ('mname', str), ('rname', str), ('serial', int), ('refresh', int), ('retry', int), ('expire', int), ('minimum', int), )) return _data2rec(rschema, rdata) def spf_rec(rdata): ''' Validate and parse DNS record data for SPF record(s) :param rdata: DNS record data :return: dict w/fields ''' spf_fields = rdata.split(' ') if not spf_fields.pop(0).startswith('v=spf'): raise ValueError('Not an SPF record') res = OrderedDict() mods = set() for mech_spec in spf_fields: if mech_spec.startswith(('exp', 'redirect')): # It's a modifier mod, val = mech_spec.split('=', 1) if mod in mods: raise KeyError('Modifier {0} can only appear once'.format(mod)) mods.add(mod) continue # TODO: Should be in something intelligent like an SPF_get # if mod == 'exp': # res[mod] = lookup(val, 'TXT', **qargs) # continue # elif mod == 'redirect': # return query(val, 'SPF', **qargs) mech = {} if mech_spec[0] in ('+', '-', '~', '?'): mech['qualifier'] = mech_spec[0] mech_spec = mech_spec[1:] if ':' in mech_spec: mech_spec, val = mech_spec.split(':', 1) elif '/' in mech_spec: idx = mech_spec.find('/') mech_spec = mech_spec[0:idx] val = mech_spec[idx:] else: val = None res[mech_spec] = mech if not val: continue elif mech_spec in ('ip4', 'ip6'): val = ipaddress.ip_interface(val) assert val.version == int(mech_spec[-1]) mech['value'] = val return res def srv_data(target, port, prio=10, weight=10): ''' Generate SRV record data :param target: :param port: :param prio: :param weight: :return: ''' return _rec2data(prio, weight, port, target) def srv_name(svc, proto='tcp', domain=None): ''' Generate SRV record name :param svc: ldap, 389 etc :param proto: tcp, udp, sctp etc. :param domain: name to append :return: ''' proto = RFC.validate(proto, RFC.SRV_PROTO) if isinstance(svc, int) or svc.isdigit(): svc = _to_port(svc) if domain: domain = '.' + domain return '_{0}._{1}{2}'.format(svc, proto, domain) def srv_rec(rdatas): ''' Validate and parse DNS record data for SRV record(s) :param rdata: DNS record data :return: dict w/fields ''' rschema = OrderedDict(( ('prio', int), ('weight', int), ('port', _to_port), ('name', str), )) return _data2rec_group(rschema, rdatas, 'prio') def sshfp_data(key_t, hash_t, pub): ''' Generate an SSHFP record :param key_t: rsa/dsa/ecdsa/ed25519 :param hash_t: sha1/sha256 :param pub: the SSH public key ''' key_t = RFC.validate(key_t, RFC.SSHFP_ALGO, 'in') hash_t = RFC.validate(hash_t, RFC.SSHFP_HASH) hasher = hashlib.new(hash_t) hasher.update( base64.b64decode(pub) ) ssh_fp = hasher.hexdigest() return _rec2data(key_t, hash_t, ssh_fp) def sshfp_rec(rdata): ''' Validate and parse DNS record data for TLSA record(s) :param rdata: DNS record data :return: dict w/fields ''' rschema = OrderedDict(( ('algorithm', RFC.SSHFP_ALGO), ('fp_hash', RFC.SSHFP_HASH), ('fingerprint', lambda val: val.lower()) # resolvers are inconsistent on this one )) return _data2rec(rschema, rdata) def tlsa_data(pub, usage, selector, matching): ''' Generate a TLSA rec :param pub: Pub key in PEM format :param usage: :param selector: :param matching: :return: TLSA data portion ''' usage = RFC.validate(usage, RFC.TLSA_USAGE) selector = RFC.validate(selector, RFC.TLSA_SELECT) matching = RFC.validate(matching, RFC.TLSA_MATCHING) pub = ssl.PEM_cert_to_DER_cert(pub.strip()) if matching == 0: cert_fp = binascii.b2a_hex(pub) else: hasher = hashlib.new(RFC.TLSA_MATCHING[matching]) hasher.update( pub ) cert_fp = hasher.hexdigest() return _rec2data(usage, selector, matching, cert_fp) def tlsa_rec(rdata): ''' Validate and parse DNS record data for TLSA record(s) :param rdata: DNS record data :return: dict w/fields ''' rschema = OrderedDict(( ('usage', RFC.TLSA_USAGE), ('selector', RFC.TLSA_SELECT), ('matching', RFC.TLSA_MATCHING), ('pub', str) )) return _data2rec(rschema, rdata) def service( svc, proto='tcp', domain=None, walk=False, secure=None ): ''' Find an SRV service in a domain or it's parents :param svc: service to find (ldap, 389, etc) :param proto: protocol the service talks (tcp, udp, etc) :param domain: domain to start search in :param walk: walk the parents if domain doesn't provide the service :param secure: only return DNSSEC-validated results :return: [ [ prio1server1, prio1server2 ], [ prio2server1, prio2server2 ], ] (the servers will already be weighted according to the SRV rules) ''' qres = query(srv_name(svc, proto, domain), 'SRV', walk=walk, secure=secure) if not qres: return False res = [] for _, recs in qres.items(): res.append(_weighted_order(recs)) return res def services(services_file='/etc/services'): ''' Parse through system-known services :return: { 'svc': [ { 'port': port 'proto': proto, 'desc': comment }, ], } ''' res = {} with salt.utils.files.fopen(services_file, 'r') as svc_defs: for svc_def in svc_defs.readlines(): svc_def = salt.utils.stringutils.to_unicode(svc_def.strip()) if not svc_def or svc_def.startswith('#'): continue elif '#' in svc_def: svc_def, comment = svc_def.split('#', 1) comment = comment.strip() else: comment = None svc_def = svc_def.split() port, proto = svc_def.pop(1).split('/') port = int(port) for name in svc_def: svc_res = res.get(name, {}) pp_res = svc_res.get(port, False) if not pp_res: svc = { 'port': port, 'proto': proto, } if comment: svc['desc'] = comment svc_res[port] = svc else: curr_proto = pp_res['proto'] if isinstance(curr_proto, (list, tuple)): curr_proto.append(proto) else: pp_res['proto'] = [curr_proto, proto] curr_desc = pp_res.get('desc', False) if comment: if not curr_desc: pp_res['desc'] = comment elif comment != curr_desc: pp_res['desc'] = '{0}, {1}'.format(curr_desc, comment) res[name] = svc_res for svc, data in res.items(): if len(data) == 1: res[svc] = data.values().pop() continue else: res[svc] = list(data.values()) return res def parse_resolv(src='/etc/resolv.conf'): ''' Parse a resolver configuration file (traditionally /etc/resolv.conf) ''' nameservers = [] ip4_nameservers = [] ip6_nameservers = [] search = [] sortlist = [] domain = '' options = [] try: with salt.utils.files.fopen(src) as src_file: # pylint: disable=too-many-nested-blocks for line in src_file: line = salt.utils.stringutils.to_unicode(line).strip().split() try: (directive, arg) = (line[0].lower(), line[1:]) # Drop everything after # or ; (comments) arg = list(itertools.takewhile(lambda x: x[0] not in ('#', ';'), arg)) if directive == 'nameserver': addr = arg[0] try: ip_addr = ipaddress.ip_address(addr) version = ip_addr.version ip_addr = str(ip_addr) if ip_addr not in nameservers: nameservers.append(ip_addr) if version == 4 and ip_addr not in ip4_nameservers: ip4_nameservers.append(ip_addr) elif version == 6 and ip_addr not in ip6_nameservers: ip6_nameservers.append(ip_addr) except ValueError as exc: log.error('%s: %s', src, exc) elif directive == 'domain': domain = arg[0] elif directive == 'search': search = arg elif directive == 'sortlist': # A sortlist is specified by IP address netmask pairs. # The netmask is optional and defaults to the natural # netmask of the net. The IP address and optional # network pairs are separated by slashes. for ip_raw in arg: try: ip_net = ipaddress.ip_network(ip_raw) except ValueError as exc: log.error('%s: %s', src, exc) else: if '/' not in ip_raw: # No netmask has been provided, guess # the "natural" one if ip_net.version == 4: ip_addr = six.text_type(ip_net.network_address) # pylint: disable=protected-access mask = salt.utils.network.natural_ipv4_netmask(ip_addr) ip_net = ipaddress.ip_network( '{0}{1}'.format(ip_addr, mask), strict=False ) if ip_net.version == 6: # TODO pass if ip_net not in sortlist: sortlist.append(ip_net) elif directive == 'options': # Options allows certain internal resolver variables to # be modified. if arg[0] not in options: options.append(arg[0]) except IndexError: continue if domain and search: # The domain and search keywords are mutually exclusive. If more # than one instance of these keywords is present, the last instance # will override. log.debug( '%s: The domain and search keywords are mutually exclusive.', src ) return { 'nameservers': nameservers, 'ip4_nameservers': ip4_nameservers, 'ip6_nameservers': ip6_nameservers, 'sortlist': [ip.with_netmask for ip in sortlist], 'domain': domain, 'search': search, 'options': options } except IOError: return {}
saltstack/salt
salt/utils/dns.py
ptr_name
python
def ptr_name(rdata): ''' Return PTR name of given IP :param rdata: IP address :return: PTR record name ''' try: return ipaddress.ip_address(rdata).reverse_pointer except ValueError: log.error( 'Unable to generate PTR record; %s is not a valid IP address', rdata ) return False
Return PTR name of given IP :param rdata: IP address :return: PTR record name
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/dns.py#L825-L838
null
# -*- coding: utf-8 -*- ''' Compendium of generic DNS utilities # Examples: dns.lookup(name, rdtype, ...) dns.query(name, rdtype, ...) dns.srv_rec(data) dns.srv_data('my1.example.com', 389, prio=10, weight=100) dns.srv_name('ldap/tcp', 'example.com') ''' from __future__ import absolute_import, print_function, unicode_literals # Import Python libs import base64 import binascii import hashlib import itertools import logging import random import re import shlex import socket import ssl import string import functools # Import Salt libs import salt.utils.files import salt.utils.network import salt.utils.path import salt.utils.stringutils import salt.modules.cmdmod from salt._compat import ipaddress from salt.utils.odict import OrderedDict # Import 3rd-party libs from salt.ext import six from salt.ext.six.moves import zip # pylint: disable=redefined-builtin # Integrations try: import dns.resolver HAS_DNSPYTHON = True except ImportError: HAS_DNSPYTHON = False try: import tldextract HAS_TLDEXTRACT = True except ImportError: HAS_TLDEXTRACT = False HAS_DIG = salt.utils.path.which('dig') is not None DIG_OPTIONS = '+search +fail +noall +answer +nocl +nottl' HAS_DRILL = salt.utils.path.which('drill') is not None HAS_HOST = salt.utils.path.which('host') is not None HAS_NSLOOKUP = salt.utils.path.which('nslookup') is not None __salt__ = { 'cmd.run_all': salt.modules.cmdmod.run_all } log = logging.getLogger(__name__) class RFC(object): ''' Simple holding class for all RFC/IANA registered lists & standards ''' # https://tools.ietf.org/html/rfc6844#section-3 CAA_TAGS = ( 'issue', 'issuewild', 'iodef' ) # http://www.iana.org/assignments/dns-sshfp-rr-parameters/dns-sshfp-rr-parameters.xhtml SSHFP_ALGO = OrderedDict(( (1, 'rsa'), (2, 'dsa'), (3, 'ecdsa'), (4, 'ed25519'), )) SSHFP_HASH = OrderedDict(( (1, 'sha1'), (2, 'sha256'), )) # http://www.iana.org/assignments/dane-parameters/dane-parameters.xhtml TLSA_USAGE = OrderedDict(( (0, 'pkixta'), (1, 'pkixee'), (2, 'daneta'), (3, 'daneee'), )) TLSA_SELECT = OrderedDict(( (0, 'cert'), (1, 'spki'), )) TLSA_MATCHING = OrderedDict(( (0, 'full'), (1, 'sha256'), (2, 'sha512'), )) SRV_PROTO = ( 'tcp', 'udp', 'sctp' ) @staticmethod def validate(lookup, ref, match=None): if lookup in ref: return lookup elif match == 'in': return [code for code, name in ref.items() if lookup in name][-1] else: # OrderedDicts only!(?) return ref.keys()[ref.values().index(lookup)] def _to_port(port): try: port = int(port) assert 1 <= port <= 65535 return port except (ValueError, AssertionError): raise ValueError('Invalid port {0}'.format(port)) def _tree(domain, tld=False): ''' Split out a domain in its parents Leverages tldextract to take the TLDs from publicsuffix.org or makes a valiant approximation of that :param domain: dc2.ams2.example.com :param tld: Include TLD in list :return: [ 'dc2.ams2.example.com', 'ams2.example.com', 'example.com'] ''' domain = domain.rstrip('.') assert '.' in domain, 'Provide a decent domain' if not tld: if HAS_TLDEXTRACT: tld = tldextract.extract(domain).suffix else: tld = re.search(r'((?:(?:ac|biz|com?|info|edu|gov|mil|name|net|n[oi]m|org)\.)?[^.]+)$', domain).group() log.info('Without tldextract, dns.util resolves the TLD of %s to %s', domain, tld) res = [domain] while True: idx = domain.find('.') if idx < 0: break domain = domain[idx + 1:] if domain == tld: break res.append(domain) return res def _weighted_order(recs): res = [] weights = [rec['weight'] for rec in recs] while weights: rnd = random.random() * sum(weights) for i, w in enumerate(weights): rnd -= w if rnd < 0: res.append(recs.pop(i)['name']) weights.pop(i) break return res def _cast(rec_data, rec_cast): if isinstance(rec_cast, dict): rec_data = type(rec_cast.keys()[0])(rec_data) res = rec_cast[rec_data] return res elif isinstance(rec_cast, (list, tuple)): return RFC.validate(rec_data, rec_cast) else: return rec_cast(rec_data) def _data2rec(schema, rec_data): ''' schema = OrderedDict({ 'prio': int, 'weight': int, 'port': to_port, 'name': str, }) rec_data = '10 20 25 myawesome.nl' res = {'prio': 10, 'weight': 20, 'port': 25 'name': 'myawesome.nl'} ''' try: rec_fields = rec_data.split(' ') # spaces in digest fields are allowed assert len(rec_fields) >= len(schema) if len(rec_fields) > len(schema): cutoff = len(schema) - 1 rec_fields = rec_fields[0:cutoff] + [''.join(rec_fields[cutoff:])] if len(schema) == 1: res = _cast(rec_fields[0], next(iter(schema.values()))) else: res = dict(( (field_name, _cast(rec_field, rec_cast)) for (field_name, rec_cast), rec_field in zip(schema.items(), rec_fields) )) return res except (AssertionError, AttributeError, TypeError, ValueError) as e: raise ValueError('Unable to cast "{0}" as "{2}": {1}'.format( rec_data, e, ' '.join(schema.keys()) )) def _data2rec_group(schema, recs_data, group_key): if not isinstance(recs_data, (list, tuple)): recs_data = [recs_data] res = OrderedDict() try: for rdata in recs_data: rdata = _data2rec(schema, rdata) assert rdata and group_key in rdata idx = rdata.pop(group_key) if idx not in res: res[idx] = [] if len(rdata) == 1: rdata = next(iter(rdata.values())) res[idx].append(rdata) return res except (AssertionError, ValueError) as e: raise ValueError('Unable to cast "{0}" as a group of "{1}": {2}'.format( ','.join(recs_data), ' '.join(schema.keys()), e )) def _rec2data(*rdata): return ' '.join(rdata) def _data_clean(data): data = data.strip(string.whitespace) if data.startswith(('"', '\'')) and data.endswith(('"', '\'')): return data[1:-1] else: return data def _lookup_dig(name, rdtype, timeout=None, servers=None, secure=None): ''' Use dig to lookup addresses :param name: Name of record to search :param rdtype: DNS record type :param timeout: server response timeout :param servers: [] of servers to use :return: [] of records or False if error ''' cmd = 'dig {0} -t {1} '.format(DIG_OPTIONS, rdtype) if servers: cmd += ''.join(['@{0} '.format(srv) for srv in servers]) if timeout is not None: if servers: timeout = int(float(timeout) / len(servers)) else: timeout = int(timeout) cmd += '+time={0} '.format(timeout) if secure: cmd += '+dnssec +adflag ' cmd = __salt__['cmd.run_all']('{0} {1}'.format(cmd, name), python_shell=False, output_loglevel='quiet') if 'ignoring invalid type' in cmd['stderr']: raise ValueError('Invalid DNS type {}'.format(rdtype)) elif cmd['retcode'] != 0: log.warning( 'dig returned (%s): %s', cmd['retcode'], cmd['stderr'].strip(string.whitespace + ';') ) return False elif not cmd['stdout']: return [] validated = False res = [] for line in cmd['stdout'].splitlines(): _, rtype, rdata = line.split(None, 2) if rtype == 'CNAME' and rdtype != 'CNAME': continue elif rtype == 'RRSIG': validated = True continue res.append(_data_clean(rdata)) if res and secure and not validated: return False else: return res def _lookup_drill(name, rdtype, timeout=None, servers=None, secure=None): ''' Use drill to lookup addresses :param name: Name of record to search :param rdtype: DNS record type :param timeout: command return timeout :param servers: [] of servers to use :return: [] of records or False if error ''' cmd = 'drill ' if secure: cmd += '-D -o ad ' cmd += '{0} {1} '.format(rdtype, name) if servers: cmd += ''.join(['@{0} '.format(srv) for srv in servers]) cmd = __salt__['cmd.run_all']( cmd, timeout=timeout, python_shell=False, output_loglevel='quiet') if cmd['retcode'] != 0: log.warning('drill returned (%s): %s', cmd['retcode'], cmd['stderr']) return False lookup_res = iter(cmd['stdout'].splitlines()) validated = False res = [] try: line = '' while 'ANSWER SECTION' not in line: line = next(lookup_res) while True: line = next(lookup_res) line = line.strip() if not line or line.startswith(';;'): break l_type, l_rec = line.split(None, 4)[-2:] if l_type == 'CNAME' and rdtype != 'CNAME': continue elif l_type == 'RRSIG': validated = True continue elif l_type != rdtype: raise ValueError('Invalid DNS type {}'.format(rdtype)) res.append(_data_clean(l_rec)) except StopIteration: pass if res and secure and not validated: return False else: return res def _lookup_gai(name, rdtype, timeout=None): ''' Use Python's socket interface to lookup addresses :param name: Name of record to search :param rdtype: A or AAAA :param timeout: ignored :return: [] of addresses or False if error ''' try: sock_t = { 'A': socket.AF_INET, 'AAAA': socket.AF_INET6 }[rdtype] except KeyError: raise ValueError('Invalid DNS type {} for gai lookup'.format(rdtype)) if timeout: log.info('Ignoring timeout on gai resolver; fix resolv.conf to do that') try: addresses = [sock[4][0] for sock in socket.getaddrinfo(name, None, sock_t, 0, socket.SOCK_RAW)] return addresses except socket.gaierror: return False def _lookup_host(name, rdtype, timeout=None, server=None): ''' Use host to lookup addresses :param name: Name of record to search :param server: Server to query :param rdtype: DNS record type :param timeout: server response wait :return: [] of records or False if error ''' cmd = 'host -t {0} '.format(rdtype) if timeout: cmd += '-W {0} '.format(int(timeout)) cmd += name if server is not None: cmd += ' {0}'.format(server) cmd = __salt__['cmd.run_all'](cmd, python_shell=False, output_loglevel='quiet') if 'invalid type' in cmd['stderr']: raise ValueError('Invalid DNS type {}'.format(rdtype)) elif cmd['retcode'] != 0: log.warning('host returned (%s): %s', cmd['retcode'], cmd['stderr']) return False elif 'has no' in cmd['stdout']: return [] res = [] _stdout = cmd['stdout'] if server is None else cmd['stdout'].split('\n\n')[-1] for line in _stdout.splitlines(): if rdtype != 'CNAME' and 'is an alias' in line: continue line = line.split(' ', 3)[-1] for prefix in ('record', 'address', 'handled by', 'alias for'): if line.startswith(prefix): line = line[len(prefix) + 1:] break res.append(_data_clean(line)) return res def _lookup_dnspython(name, rdtype, timeout=None, servers=None, secure=None): ''' Use dnspython to lookup addresses :param name: Name of record to search :param rdtype: DNS record type :param timeout: query timeout :param server: [] of server(s) to try in order :return: [] of records or False if error ''' resolver = dns.resolver.Resolver() if timeout is not None: resolver.lifetime = float(timeout) if servers: resolver.nameservers = servers if secure: resolver.ednsflags += dns.flags.DO try: res = [_data_clean(rr.to_text()) for rr in resolver.query(name, rdtype, raise_on_no_answer=False)] return res except dns.rdatatype.UnknownRdatatype: raise ValueError('Invalid DNS type {}'.format(rdtype)) except (dns.resolver.NXDOMAIN, dns.resolver.YXDOMAIN, dns.resolver.NoNameservers, dns.exception.Timeout): return False def _lookup_nslookup(name, rdtype, timeout=None, server=None): ''' Use nslookup to lookup addresses :param name: Name of record to search :param rdtype: DNS record type :param timeout: server response timeout :param server: server to query :return: [] of records or False if error ''' cmd = 'nslookup -query={0} {1}'.format(rdtype, name) if timeout is not None: cmd += ' -timeout={0}'.format(int(timeout)) if server is not None: cmd += ' {0}'.format(server) cmd = __salt__['cmd.run_all'](cmd, python_shell=False, output_loglevel='quiet') if cmd['retcode'] != 0: log.warning( 'nslookup returned (%s): %s', cmd['retcode'], cmd['stdout'].splitlines()[-1].strip(string.whitespace + ';') ) return False lookup_res = iter(cmd['stdout'].splitlines()) res = [] try: line = next(lookup_res) if 'unknown query type' in line: raise ValueError('Invalid DNS type {}'.format(rdtype)) while True: if name in line: break line = next(lookup_res) while True: line = line.strip() if not line or line.startswith('*'): break elif rdtype != 'CNAME' and 'canonical name' in line: name = line.split()[-1][:-1] line = next(lookup_res) continue elif rdtype == 'SOA': line = line.split('=') elif line.startswith('Name:'): line = next(lookup_res) line = line.split(':', 1) elif line.startswith(name): if '=' in line: line = line.split('=', 1) else: line = line.split(' ') res.append(_data_clean(line[-1])) line = next(lookup_res) except StopIteration: pass if rdtype == 'SOA': return [' '.join(res[1:])] else: return res def lookup( name, rdtype, method=None, servers=None, timeout=None, walk=False, walk_tld=False, secure=None ): ''' Lookup DNS records and return their data :param name: name to lookup :param rdtype: DNS record type :param method: gai (getaddrinfo()), dnspython, dig, drill, host, nslookup or auto (default) :param servers: (list of) server(s) to try in-order :param timeout: query timeout or a valiant approximation of that :param walk: Walk the DNS upwards looking for the record type or name/recordtype if walk='name'. :param walk_tld: Include the final domain in the walk :param secure: return only DNSSEC secured responses :return: [] of record data ''' # opts = __opts__.get('dns', {}) opts = {} method = method or opts.get('method', 'auto') secure = secure or opts.get('secure', None) servers = servers or opts.get('servers', None) timeout = timeout or opts.get('timeout', False) rdtype = rdtype.upper() # pylint: disable=bad-whitespace,multiple-spaces-before-keyword query_methods = ( ('gai', _lookup_gai, not any((rdtype not in ('A', 'AAAA'), servers, secure))), ('dnspython', _lookup_dnspython, HAS_DNSPYTHON), ('dig', _lookup_dig, HAS_DIG), ('drill', _lookup_drill, HAS_DRILL), ('host', _lookup_host, HAS_HOST and not secure), ('nslookup', _lookup_nslookup, HAS_NSLOOKUP and not secure), ) # pylint: enable=bad-whitespace,multiple-spaces-before-keyword try: if method == 'auto': # The first one not to bork on the conditions becomes the function method, resolver = next(((rname, rcb) for rname, rcb, rtest in query_methods if rtest)) else: # The first one not to bork on the conditions becomes the function. And the name must match. resolver = next((rcb for rname, rcb, rtest in query_methods if rname == method and rtest)) except StopIteration: log.error( 'Unable to lookup %s/%s: Resolver method %s invalid, unsupported ' 'or unable to perform query', method, rdtype, name ) return False res_kwargs = { 'rdtype': rdtype, } if servers: if not isinstance(servers, (list, tuple)): servers = [servers] if method in ('dnspython', 'dig', 'drill'): res_kwargs['servers'] = servers else: if timeout: timeout /= len(servers) # Inject a wrapper for multi-server behaviour def _multi_srvr(resolv_func): @functools.wraps(resolv_func) def _wrapper(**res_kwargs): for server in servers: s_res = resolv_func(server=server, **res_kwargs) if s_res: return s_res return _wrapper resolver = _multi_srvr(resolver) if not walk: name = [name] else: idx = 0 if rdtype in ('SRV', 'TLSA'): # The only RRs I know that have 2 name components idx = name.find('.') + 1 idx = name.find('.', idx) + 1 domain = name[idx:] rname = name[0:idx] name = _tree(domain, walk_tld) if walk == 'name': name = [rname + domain for domain in name] if timeout: timeout /= len(name) if secure: res_kwargs['secure'] = secure if timeout: res_kwargs['timeout'] = timeout for rname in name: res = resolver(name=rname, **res_kwargs) if res: return res return res def query( name, rdtype, method=None, servers=None, timeout=None, walk=False, walk_tld=False, secure=None ): ''' Query DNS for information. Where `lookup()` returns record data, `query()` tries to interpret the data and return it's results :param name: name to lookup :param rdtype: DNS record type :param method: gai (getaddrinfo()), pydns, dig, drill, host, nslookup or auto (default) :param servers: (list of) server(s) to try in-order :param timeout: query timeout or a valiant approximation of that :param secure: return only DNSSEC secured response :param walk: Walk the DNS upwards looking for the record type or name/recordtype if walk='name'. :param walk_tld: Include the top-level domain in the walk :return: [] of records ''' rdtype = rdtype.upper() qargs = { 'method': method, 'servers': servers, 'timeout': timeout, 'walk': walk, 'walk_tld': walk_tld, 'secure': secure } if rdtype == 'PTR' and not name.endswith('arpa'): name = ptr_name(name) if rdtype == 'SPF': # 'SPF' has become a regular 'TXT' again qres = [answer for answer in lookup(name, 'TXT', **qargs) if answer.startswith('v=spf')] if not qres: qres = lookup(name, rdtype, **qargs) else: qres = lookup(name, rdtype, **qargs) rec_map = { 'A': a_rec, 'AAAA': aaaa_rec, 'CAA': caa_rec, 'MX': mx_rec, 'SOA': soa_rec, 'SPF': spf_rec, 'SRV': srv_rec, 'SSHFP': sshfp_rec, 'TLSA': tlsa_rec, } if not qres or rdtype not in rec_map: return qres elif rdtype in ('A', 'AAAA', 'SSHFP', 'TLSA'): res = [rec_map[rdtype](res) for res in qres] elif rdtype in ('SOA', 'SPF'): res = rec_map[rdtype](qres[0]) else: res = rec_map[rdtype](qres) return res def host(name, ip4=True, ip6=True, **kwargs): ''' Return a list of addresses for name ip6: Return IPv6 addresses ip4: Return IPv4 addresses the rest is passed on to lookup() ''' res = {} if ip6: ip6 = lookup(name, 'AAAA', **kwargs) if ip6: res['ip6'] = ip6 if ip4: ip4 = lookup(name, 'A', **kwargs) if ip4: res['ip4'] = ip4 return res def a_rec(rdata): ''' Validate and parse DNS record data for an A record :param rdata: DNS record data :return: { 'address': ip } ''' rschema = OrderedDict(( ('address', ipaddress.IPv4Address), )) return _data2rec(rschema, rdata) def aaaa_rec(rdata): ''' Validate and parse DNS record data for an AAAA record :param rdata: DNS record data :return: { 'address': ip } ''' rschema = OrderedDict(( ('address', ipaddress.IPv6Address), )) return _data2rec(rschema, rdata) def caa_rec(rdatas): ''' Validate and parse DNS record data for a CAA record :param rdata: DNS record data :return: dict w/fields ''' rschema = OrderedDict(( ('flags', lambda flag: ['critical'] if int(flag) > 0 else []), ('tag', RFC.CAA_TAGS), ('value', lambda val: val.strip('\',"')) )) res = _data2rec_group(rschema, rdatas, 'tag') for tag in ('issue', 'issuewild'): tag_res = res.get(tag, False) if not tag_res: continue for idx, val in enumerate(tag_res): if ';' not in val: continue val, params = val.split(';', 1) params = dict(param.split('=') for param in shlex.split(params)) tag_res[idx] = {val: params} return res def mx_data(target, preference=10): ''' Generate MX record data :param target: server :param preference: preference number :return: DNS record data ''' return _rec2data(int(preference), target) def mx_rec(rdatas): ''' Validate and parse DNS record data for MX record(s) :param rdata: DNS record data :return: dict w/fields ''' rschema = OrderedDict(( ('preference', int), ('name', str), )) return _data2rec_group(rschema, rdatas, 'preference') def soa_rec(rdata): ''' Validate and parse DNS record data for SOA record(s) :param rdata: DNS record data :return: dict w/fields ''' rschema = OrderedDict(( ('mname', str), ('rname', str), ('serial', int), ('refresh', int), ('retry', int), ('expire', int), ('minimum', int), )) return _data2rec(rschema, rdata) def spf_rec(rdata): ''' Validate and parse DNS record data for SPF record(s) :param rdata: DNS record data :return: dict w/fields ''' spf_fields = rdata.split(' ') if not spf_fields.pop(0).startswith('v=spf'): raise ValueError('Not an SPF record') res = OrderedDict() mods = set() for mech_spec in spf_fields: if mech_spec.startswith(('exp', 'redirect')): # It's a modifier mod, val = mech_spec.split('=', 1) if mod in mods: raise KeyError('Modifier {0} can only appear once'.format(mod)) mods.add(mod) continue # TODO: Should be in something intelligent like an SPF_get # if mod == 'exp': # res[mod] = lookup(val, 'TXT', **qargs) # continue # elif mod == 'redirect': # return query(val, 'SPF', **qargs) mech = {} if mech_spec[0] in ('+', '-', '~', '?'): mech['qualifier'] = mech_spec[0] mech_spec = mech_spec[1:] if ':' in mech_spec: mech_spec, val = mech_spec.split(':', 1) elif '/' in mech_spec: idx = mech_spec.find('/') mech_spec = mech_spec[0:idx] val = mech_spec[idx:] else: val = None res[mech_spec] = mech if not val: continue elif mech_spec in ('ip4', 'ip6'): val = ipaddress.ip_interface(val) assert val.version == int(mech_spec[-1]) mech['value'] = val return res def srv_data(target, port, prio=10, weight=10): ''' Generate SRV record data :param target: :param port: :param prio: :param weight: :return: ''' return _rec2data(prio, weight, port, target) def srv_name(svc, proto='tcp', domain=None): ''' Generate SRV record name :param svc: ldap, 389 etc :param proto: tcp, udp, sctp etc. :param domain: name to append :return: ''' proto = RFC.validate(proto, RFC.SRV_PROTO) if isinstance(svc, int) or svc.isdigit(): svc = _to_port(svc) if domain: domain = '.' + domain return '_{0}._{1}{2}'.format(svc, proto, domain) def srv_rec(rdatas): ''' Validate and parse DNS record data for SRV record(s) :param rdata: DNS record data :return: dict w/fields ''' rschema = OrderedDict(( ('prio', int), ('weight', int), ('port', _to_port), ('name', str), )) return _data2rec_group(rschema, rdatas, 'prio') def sshfp_data(key_t, hash_t, pub): ''' Generate an SSHFP record :param key_t: rsa/dsa/ecdsa/ed25519 :param hash_t: sha1/sha256 :param pub: the SSH public key ''' key_t = RFC.validate(key_t, RFC.SSHFP_ALGO, 'in') hash_t = RFC.validate(hash_t, RFC.SSHFP_HASH) hasher = hashlib.new(hash_t) hasher.update( base64.b64decode(pub) ) ssh_fp = hasher.hexdigest() return _rec2data(key_t, hash_t, ssh_fp) def sshfp_rec(rdata): ''' Validate and parse DNS record data for TLSA record(s) :param rdata: DNS record data :return: dict w/fields ''' rschema = OrderedDict(( ('algorithm', RFC.SSHFP_ALGO), ('fp_hash', RFC.SSHFP_HASH), ('fingerprint', lambda val: val.lower()) # resolvers are inconsistent on this one )) return _data2rec(rschema, rdata) def tlsa_data(pub, usage, selector, matching): ''' Generate a TLSA rec :param pub: Pub key in PEM format :param usage: :param selector: :param matching: :return: TLSA data portion ''' usage = RFC.validate(usage, RFC.TLSA_USAGE) selector = RFC.validate(selector, RFC.TLSA_SELECT) matching = RFC.validate(matching, RFC.TLSA_MATCHING) pub = ssl.PEM_cert_to_DER_cert(pub.strip()) if matching == 0: cert_fp = binascii.b2a_hex(pub) else: hasher = hashlib.new(RFC.TLSA_MATCHING[matching]) hasher.update( pub ) cert_fp = hasher.hexdigest() return _rec2data(usage, selector, matching, cert_fp) def tlsa_rec(rdata): ''' Validate and parse DNS record data for TLSA record(s) :param rdata: DNS record data :return: dict w/fields ''' rschema = OrderedDict(( ('usage', RFC.TLSA_USAGE), ('selector', RFC.TLSA_SELECT), ('matching', RFC.TLSA_MATCHING), ('pub', str) )) return _data2rec(rschema, rdata) def service( svc, proto='tcp', domain=None, walk=False, secure=None ): ''' Find an SRV service in a domain or it's parents :param svc: service to find (ldap, 389, etc) :param proto: protocol the service talks (tcp, udp, etc) :param domain: domain to start search in :param walk: walk the parents if domain doesn't provide the service :param secure: only return DNSSEC-validated results :return: [ [ prio1server1, prio1server2 ], [ prio2server1, prio2server2 ], ] (the servers will already be weighted according to the SRV rules) ''' qres = query(srv_name(svc, proto, domain), 'SRV', walk=walk, secure=secure) if not qres: return False res = [] for _, recs in qres.items(): res.append(_weighted_order(recs)) return res def services(services_file='/etc/services'): ''' Parse through system-known services :return: { 'svc': [ { 'port': port 'proto': proto, 'desc': comment }, ], } ''' res = {} with salt.utils.files.fopen(services_file, 'r') as svc_defs: for svc_def in svc_defs.readlines(): svc_def = salt.utils.stringutils.to_unicode(svc_def.strip()) if not svc_def or svc_def.startswith('#'): continue elif '#' in svc_def: svc_def, comment = svc_def.split('#', 1) comment = comment.strip() else: comment = None svc_def = svc_def.split() port, proto = svc_def.pop(1).split('/') port = int(port) for name in svc_def: svc_res = res.get(name, {}) pp_res = svc_res.get(port, False) if not pp_res: svc = { 'port': port, 'proto': proto, } if comment: svc['desc'] = comment svc_res[port] = svc else: curr_proto = pp_res['proto'] if isinstance(curr_proto, (list, tuple)): curr_proto.append(proto) else: pp_res['proto'] = [curr_proto, proto] curr_desc = pp_res.get('desc', False) if comment: if not curr_desc: pp_res['desc'] = comment elif comment != curr_desc: pp_res['desc'] = '{0}, {1}'.format(curr_desc, comment) res[name] = svc_res for svc, data in res.items(): if len(data) == 1: res[svc] = data.values().pop() continue else: res[svc] = list(data.values()) return res def parse_resolv(src='/etc/resolv.conf'): ''' Parse a resolver configuration file (traditionally /etc/resolv.conf) ''' nameservers = [] ip4_nameservers = [] ip6_nameservers = [] search = [] sortlist = [] domain = '' options = [] try: with salt.utils.files.fopen(src) as src_file: # pylint: disable=too-many-nested-blocks for line in src_file: line = salt.utils.stringutils.to_unicode(line).strip().split() try: (directive, arg) = (line[0].lower(), line[1:]) # Drop everything after # or ; (comments) arg = list(itertools.takewhile(lambda x: x[0] not in ('#', ';'), arg)) if directive == 'nameserver': addr = arg[0] try: ip_addr = ipaddress.ip_address(addr) version = ip_addr.version ip_addr = str(ip_addr) if ip_addr not in nameservers: nameservers.append(ip_addr) if version == 4 and ip_addr not in ip4_nameservers: ip4_nameservers.append(ip_addr) elif version == 6 and ip_addr not in ip6_nameservers: ip6_nameservers.append(ip_addr) except ValueError as exc: log.error('%s: %s', src, exc) elif directive == 'domain': domain = arg[0] elif directive == 'search': search = arg elif directive == 'sortlist': # A sortlist is specified by IP address netmask pairs. # The netmask is optional and defaults to the natural # netmask of the net. The IP address and optional # network pairs are separated by slashes. for ip_raw in arg: try: ip_net = ipaddress.ip_network(ip_raw) except ValueError as exc: log.error('%s: %s', src, exc) else: if '/' not in ip_raw: # No netmask has been provided, guess # the "natural" one if ip_net.version == 4: ip_addr = six.text_type(ip_net.network_address) # pylint: disable=protected-access mask = salt.utils.network.natural_ipv4_netmask(ip_addr) ip_net = ipaddress.ip_network( '{0}{1}'.format(ip_addr, mask), strict=False ) if ip_net.version == 6: # TODO pass if ip_net not in sortlist: sortlist.append(ip_net) elif directive == 'options': # Options allows certain internal resolver variables to # be modified. if arg[0] not in options: options.append(arg[0]) except IndexError: continue if domain and search: # The domain and search keywords are mutually exclusive. If more # than one instance of these keywords is present, the last instance # will override. log.debug( '%s: The domain and search keywords are mutually exclusive.', src ) return { 'nameservers': nameservers, 'ip4_nameservers': ip4_nameservers, 'ip6_nameservers': ip6_nameservers, 'sortlist': [ip.with_netmask for ip in sortlist], 'domain': domain, 'search': search, 'options': options } except IOError: return {}
saltstack/salt
salt/utils/dns.py
soa_rec
python
def soa_rec(rdata): ''' Validate and parse DNS record data for SOA record(s) :param rdata: DNS record data :return: dict w/fields ''' rschema = OrderedDict(( ('mname', str), ('rname', str), ('serial', int), ('refresh', int), ('retry', int), ('expire', int), ('minimum', int), )) return _data2rec(rschema, rdata)
Validate and parse DNS record data for SOA record(s) :param rdata: DNS record data :return: dict w/fields
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/dns.py#L841-L856
[ "def _data2rec(schema, rec_data):\n '''\n schema = OrderedDict({\n 'prio': int,\n 'weight': int,\n 'port': to_port,\n 'name': str,\n })\n rec_data = '10 20 25 myawesome.nl'\n\n res = {'prio': 10, 'weight': 20, 'port': 25 'name': 'myawesome.nl'}\n '''\n try:\n rec_fields = rec_data.split(' ')\n # spaces in digest fields are allowed\n assert len(rec_fields) >= len(schema)\n if len(rec_fields) > len(schema):\n cutoff = len(schema) - 1\n rec_fields = rec_fields[0:cutoff] + [''.join(rec_fields[cutoff:])]\n\n if len(schema) == 1:\n res = _cast(rec_fields[0], next(iter(schema.values())))\n else:\n res = dict((\n (field_name, _cast(rec_field, rec_cast))\n for (field_name, rec_cast), rec_field in zip(schema.items(), rec_fields)\n ))\n return res\n except (AssertionError, AttributeError, TypeError, ValueError) as e:\n raise ValueError('Unable to cast \"{0}\" as \"{2}\": {1}'.format(\n rec_data,\n e,\n ' '.join(schema.keys())\n ))\n" ]
# -*- coding: utf-8 -*- ''' Compendium of generic DNS utilities # Examples: dns.lookup(name, rdtype, ...) dns.query(name, rdtype, ...) dns.srv_rec(data) dns.srv_data('my1.example.com', 389, prio=10, weight=100) dns.srv_name('ldap/tcp', 'example.com') ''' from __future__ import absolute_import, print_function, unicode_literals # Import Python libs import base64 import binascii import hashlib import itertools import logging import random import re import shlex import socket import ssl import string import functools # Import Salt libs import salt.utils.files import salt.utils.network import salt.utils.path import salt.utils.stringutils import salt.modules.cmdmod from salt._compat import ipaddress from salt.utils.odict import OrderedDict # Import 3rd-party libs from salt.ext import six from salt.ext.six.moves import zip # pylint: disable=redefined-builtin # Integrations try: import dns.resolver HAS_DNSPYTHON = True except ImportError: HAS_DNSPYTHON = False try: import tldextract HAS_TLDEXTRACT = True except ImportError: HAS_TLDEXTRACT = False HAS_DIG = salt.utils.path.which('dig') is not None DIG_OPTIONS = '+search +fail +noall +answer +nocl +nottl' HAS_DRILL = salt.utils.path.which('drill') is not None HAS_HOST = salt.utils.path.which('host') is not None HAS_NSLOOKUP = salt.utils.path.which('nslookup') is not None __salt__ = { 'cmd.run_all': salt.modules.cmdmod.run_all } log = logging.getLogger(__name__) class RFC(object): ''' Simple holding class for all RFC/IANA registered lists & standards ''' # https://tools.ietf.org/html/rfc6844#section-3 CAA_TAGS = ( 'issue', 'issuewild', 'iodef' ) # http://www.iana.org/assignments/dns-sshfp-rr-parameters/dns-sshfp-rr-parameters.xhtml SSHFP_ALGO = OrderedDict(( (1, 'rsa'), (2, 'dsa'), (3, 'ecdsa'), (4, 'ed25519'), )) SSHFP_HASH = OrderedDict(( (1, 'sha1'), (2, 'sha256'), )) # http://www.iana.org/assignments/dane-parameters/dane-parameters.xhtml TLSA_USAGE = OrderedDict(( (0, 'pkixta'), (1, 'pkixee'), (2, 'daneta'), (3, 'daneee'), )) TLSA_SELECT = OrderedDict(( (0, 'cert'), (1, 'spki'), )) TLSA_MATCHING = OrderedDict(( (0, 'full'), (1, 'sha256'), (2, 'sha512'), )) SRV_PROTO = ( 'tcp', 'udp', 'sctp' ) @staticmethod def validate(lookup, ref, match=None): if lookup in ref: return lookup elif match == 'in': return [code for code, name in ref.items() if lookup in name][-1] else: # OrderedDicts only!(?) return ref.keys()[ref.values().index(lookup)] def _to_port(port): try: port = int(port) assert 1 <= port <= 65535 return port except (ValueError, AssertionError): raise ValueError('Invalid port {0}'.format(port)) def _tree(domain, tld=False): ''' Split out a domain in its parents Leverages tldextract to take the TLDs from publicsuffix.org or makes a valiant approximation of that :param domain: dc2.ams2.example.com :param tld: Include TLD in list :return: [ 'dc2.ams2.example.com', 'ams2.example.com', 'example.com'] ''' domain = domain.rstrip('.') assert '.' in domain, 'Provide a decent domain' if not tld: if HAS_TLDEXTRACT: tld = tldextract.extract(domain).suffix else: tld = re.search(r'((?:(?:ac|biz|com?|info|edu|gov|mil|name|net|n[oi]m|org)\.)?[^.]+)$', domain).group() log.info('Without tldextract, dns.util resolves the TLD of %s to %s', domain, tld) res = [domain] while True: idx = domain.find('.') if idx < 0: break domain = domain[idx + 1:] if domain == tld: break res.append(domain) return res def _weighted_order(recs): res = [] weights = [rec['weight'] for rec in recs] while weights: rnd = random.random() * sum(weights) for i, w in enumerate(weights): rnd -= w if rnd < 0: res.append(recs.pop(i)['name']) weights.pop(i) break return res def _cast(rec_data, rec_cast): if isinstance(rec_cast, dict): rec_data = type(rec_cast.keys()[0])(rec_data) res = rec_cast[rec_data] return res elif isinstance(rec_cast, (list, tuple)): return RFC.validate(rec_data, rec_cast) else: return rec_cast(rec_data) def _data2rec(schema, rec_data): ''' schema = OrderedDict({ 'prio': int, 'weight': int, 'port': to_port, 'name': str, }) rec_data = '10 20 25 myawesome.nl' res = {'prio': 10, 'weight': 20, 'port': 25 'name': 'myawesome.nl'} ''' try: rec_fields = rec_data.split(' ') # spaces in digest fields are allowed assert len(rec_fields) >= len(schema) if len(rec_fields) > len(schema): cutoff = len(schema) - 1 rec_fields = rec_fields[0:cutoff] + [''.join(rec_fields[cutoff:])] if len(schema) == 1: res = _cast(rec_fields[0], next(iter(schema.values()))) else: res = dict(( (field_name, _cast(rec_field, rec_cast)) for (field_name, rec_cast), rec_field in zip(schema.items(), rec_fields) )) return res except (AssertionError, AttributeError, TypeError, ValueError) as e: raise ValueError('Unable to cast "{0}" as "{2}": {1}'.format( rec_data, e, ' '.join(schema.keys()) )) def _data2rec_group(schema, recs_data, group_key): if not isinstance(recs_data, (list, tuple)): recs_data = [recs_data] res = OrderedDict() try: for rdata in recs_data: rdata = _data2rec(schema, rdata) assert rdata and group_key in rdata idx = rdata.pop(group_key) if idx not in res: res[idx] = [] if len(rdata) == 1: rdata = next(iter(rdata.values())) res[idx].append(rdata) return res except (AssertionError, ValueError) as e: raise ValueError('Unable to cast "{0}" as a group of "{1}": {2}'.format( ','.join(recs_data), ' '.join(schema.keys()), e )) def _rec2data(*rdata): return ' '.join(rdata) def _data_clean(data): data = data.strip(string.whitespace) if data.startswith(('"', '\'')) and data.endswith(('"', '\'')): return data[1:-1] else: return data def _lookup_dig(name, rdtype, timeout=None, servers=None, secure=None): ''' Use dig to lookup addresses :param name: Name of record to search :param rdtype: DNS record type :param timeout: server response timeout :param servers: [] of servers to use :return: [] of records or False if error ''' cmd = 'dig {0} -t {1} '.format(DIG_OPTIONS, rdtype) if servers: cmd += ''.join(['@{0} '.format(srv) for srv in servers]) if timeout is not None: if servers: timeout = int(float(timeout) / len(servers)) else: timeout = int(timeout) cmd += '+time={0} '.format(timeout) if secure: cmd += '+dnssec +adflag ' cmd = __salt__['cmd.run_all']('{0} {1}'.format(cmd, name), python_shell=False, output_loglevel='quiet') if 'ignoring invalid type' in cmd['stderr']: raise ValueError('Invalid DNS type {}'.format(rdtype)) elif cmd['retcode'] != 0: log.warning( 'dig returned (%s): %s', cmd['retcode'], cmd['stderr'].strip(string.whitespace + ';') ) return False elif not cmd['stdout']: return [] validated = False res = [] for line in cmd['stdout'].splitlines(): _, rtype, rdata = line.split(None, 2) if rtype == 'CNAME' and rdtype != 'CNAME': continue elif rtype == 'RRSIG': validated = True continue res.append(_data_clean(rdata)) if res and secure and not validated: return False else: return res def _lookup_drill(name, rdtype, timeout=None, servers=None, secure=None): ''' Use drill to lookup addresses :param name: Name of record to search :param rdtype: DNS record type :param timeout: command return timeout :param servers: [] of servers to use :return: [] of records or False if error ''' cmd = 'drill ' if secure: cmd += '-D -o ad ' cmd += '{0} {1} '.format(rdtype, name) if servers: cmd += ''.join(['@{0} '.format(srv) for srv in servers]) cmd = __salt__['cmd.run_all']( cmd, timeout=timeout, python_shell=False, output_loglevel='quiet') if cmd['retcode'] != 0: log.warning('drill returned (%s): %s', cmd['retcode'], cmd['stderr']) return False lookup_res = iter(cmd['stdout'].splitlines()) validated = False res = [] try: line = '' while 'ANSWER SECTION' not in line: line = next(lookup_res) while True: line = next(lookup_res) line = line.strip() if not line or line.startswith(';;'): break l_type, l_rec = line.split(None, 4)[-2:] if l_type == 'CNAME' and rdtype != 'CNAME': continue elif l_type == 'RRSIG': validated = True continue elif l_type != rdtype: raise ValueError('Invalid DNS type {}'.format(rdtype)) res.append(_data_clean(l_rec)) except StopIteration: pass if res and secure and not validated: return False else: return res def _lookup_gai(name, rdtype, timeout=None): ''' Use Python's socket interface to lookup addresses :param name: Name of record to search :param rdtype: A or AAAA :param timeout: ignored :return: [] of addresses or False if error ''' try: sock_t = { 'A': socket.AF_INET, 'AAAA': socket.AF_INET6 }[rdtype] except KeyError: raise ValueError('Invalid DNS type {} for gai lookup'.format(rdtype)) if timeout: log.info('Ignoring timeout on gai resolver; fix resolv.conf to do that') try: addresses = [sock[4][0] for sock in socket.getaddrinfo(name, None, sock_t, 0, socket.SOCK_RAW)] return addresses except socket.gaierror: return False def _lookup_host(name, rdtype, timeout=None, server=None): ''' Use host to lookup addresses :param name: Name of record to search :param server: Server to query :param rdtype: DNS record type :param timeout: server response wait :return: [] of records or False if error ''' cmd = 'host -t {0} '.format(rdtype) if timeout: cmd += '-W {0} '.format(int(timeout)) cmd += name if server is not None: cmd += ' {0}'.format(server) cmd = __salt__['cmd.run_all'](cmd, python_shell=False, output_loglevel='quiet') if 'invalid type' in cmd['stderr']: raise ValueError('Invalid DNS type {}'.format(rdtype)) elif cmd['retcode'] != 0: log.warning('host returned (%s): %s', cmd['retcode'], cmd['stderr']) return False elif 'has no' in cmd['stdout']: return [] res = [] _stdout = cmd['stdout'] if server is None else cmd['stdout'].split('\n\n')[-1] for line in _stdout.splitlines(): if rdtype != 'CNAME' and 'is an alias' in line: continue line = line.split(' ', 3)[-1] for prefix in ('record', 'address', 'handled by', 'alias for'): if line.startswith(prefix): line = line[len(prefix) + 1:] break res.append(_data_clean(line)) return res def _lookup_dnspython(name, rdtype, timeout=None, servers=None, secure=None): ''' Use dnspython to lookup addresses :param name: Name of record to search :param rdtype: DNS record type :param timeout: query timeout :param server: [] of server(s) to try in order :return: [] of records or False if error ''' resolver = dns.resolver.Resolver() if timeout is not None: resolver.lifetime = float(timeout) if servers: resolver.nameservers = servers if secure: resolver.ednsflags += dns.flags.DO try: res = [_data_clean(rr.to_text()) for rr in resolver.query(name, rdtype, raise_on_no_answer=False)] return res except dns.rdatatype.UnknownRdatatype: raise ValueError('Invalid DNS type {}'.format(rdtype)) except (dns.resolver.NXDOMAIN, dns.resolver.YXDOMAIN, dns.resolver.NoNameservers, dns.exception.Timeout): return False def _lookup_nslookup(name, rdtype, timeout=None, server=None): ''' Use nslookup to lookup addresses :param name: Name of record to search :param rdtype: DNS record type :param timeout: server response timeout :param server: server to query :return: [] of records or False if error ''' cmd = 'nslookup -query={0} {1}'.format(rdtype, name) if timeout is not None: cmd += ' -timeout={0}'.format(int(timeout)) if server is not None: cmd += ' {0}'.format(server) cmd = __salt__['cmd.run_all'](cmd, python_shell=False, output_loglevel='quiet') if cmd['retcode'] != 0: log.warning( 'nslookup returned (%s): %s', cmd['retcode'], cmd['stdout'].splitlines()[-1].strip(string.whitespace + ';') ) return False lookup_res = iter(cmd['stdout'].splitlines()) res = [] try: line = next(lookup_res) if 'unknown query type' in line: raise ValueError('Invalid DNS type {}'.format(rdtype)) while True: if name in line: break line = next(lookup_res) while True: line = line.strip() if not line or line.startswith('*'): break elif rdtype != 'CNAME' and 'canonical name' in line: name = line.split()[-1][:-1] line = next(lookup_res) continue elif rdtype == 'SOA': line = line.split('=') elif line.startswith('Name:'): line = next(lookup_res) line = line.split(':', 1) elif line.startswith(name): if '=' in line: line = line.split('=', 1) else: line = line.split(' ') res.append(_data_clean(line[-1])) line = next(lookup_res) except StopIteration: pass if rdtype == 'SOA': return [' '.join(res[1:])] else: return res def lookup( name, rdtype, method=None, servers=None, timeout=None, walk=False, walk_tld=False, secure=None ): ''' Lookup DNS records and return their data :param name: name to lookup :param rdtype: DNS record type :param method: gai (getaddrinfo()), dnspython, dig, drill, host, nslookup or auto (default) :param servers: (list of) server(s) to try in-order :param timeout: query timeout or a valiant approximation of that :param walk: Walk the DNS upwards looking for the record type or name/recordtype if walk='name'. :param walk_tld: Include the final domain in the walk :param secure: return only DNSSEC secured responses :return: [] of record data ''' # opts = __opts__.get('dns', {}) opts = {} method = method or opts.get('method', 'auto') secure = secure or opts.get('secure', None) servers = servers or opts.get('servers', None) timeout = timeout or opts.get('timeout', False) rdtype = rdtype.upper() # pylint: disable=bad-whitespace,multiple-spaces-before-keyword query_methods = ( ('gai', _lookup_gai, not any((rdtype not in ('A', 'AAAA'), servers, secure))), ('dnspython', _lookup_dnspython, HAS_DNSPYTHON), ('dig', _lookup_dig, HAS_DIG), ('drill', _lookup_drill, HAS_DRILL), ('host', _lookup_host, HAS_HOST and not secure), ('nslookup', _lookup_nslookup, HAS_NSLOOKUP and not secure), ) # pylint: enable=bad-whitespace,multiple-spaces-before-keyword try: if method == 'auto': # The first one not to bork on the conditions becomes the function method, resolver = next(((rname, rcb) for rname, rcb, rtest in query_methods if rtest)) else: # The first one not to bork on the conditions becomes the function. And the name must match. resolver = next((rcb for rname, rcb, rtest in query_methods if rname == method and rtest)) except StopIteration: log.error( 'Unable to lookup %s/%s: Resolver method %s invalid, unsupported ' 'or unable to perform query', method, rdtype, name ) return False res_kwargs = { 'rdtype': rdtype, } if servers: if not isinstance(servers, (list, tuple)): servers = [servers] if method in ('dnspython', 'dig', 'drill'): res_kwargs['servers'] = servers else: if timeout: timeout /= len(servers) # Inject a wrapper for multi-server behaviour def _multi_srvr(resolv_func): @functools.wraps(resolv_func) def _wrapper(**res_kwargs): for server in servers: s_res = resolv_func(server=server, **res_kwargs) if s_res: return s_res return _wrapper resolver = _multi_srvr(resolver) if not walk: name = [name] else: idx = 0 if rdtype in ('SRV', 'TLSA'): # The only RRs I know that have 2 name components idx = name.find('.') + 1 idx = name.find('.', idx) + 1 domain = name[idx:] rname = name[0:idx] name = _tree(domain, walk_tld) if walk == 'name': name = [rname + domain for domain in name] if timeout: timeout /= len(name) if secure: res_kwargs['secure'] = secure if timeout: res_kwargs['timeout'] = timeout for rname in name: res = resolver(name=rname, **res_kwargs) if res: return res return res def query( name, rdtype, method=None, servers=None, timeout=None, walk=False, walk_tld=False, secure=None ): ''' Query DNS for information. Where `lookup()` returns record data, `query()` tries to interpret the data and return it's results :param name: name to lookup :param rdtype: DNS record type :param method: gai (getaddrinfo()), pydns, dig, drill, host, nslookup or auto (default) :param servers: (list of) server(s) to try in-order :param timeout: query timeout or a valiant approximation of that :param secure: return only DNSSEC secured response :param walk: Walk the DNS upwards looking for the record type or name/recordtype if walk='name'. :param walk_tld: Include the top-level domain in the walk :return: [] of records ''' rdtype = rdtype.upper() qargs = { 'method': method, 'servers': servers, 'timeout': timeout, 'walk': walk, 'walk_tld': walk_tld, 'secure': secure } if rdtype == 'PTR' and not name.endswith('arpa'): name = ptr_name(name) if rdtype == 'SPF': # 'SPF' has become a regular 'TXT' again qres = [answer for answer in lookup(name, 'TXT', **qargs) if answer.startswith('v=spf')] if not qres: qres = lookup(name, rdtype, **qargs) else: qres = lookup(name, rdtype, **qargs) rec_map = { 'A': a_rec, 'AAAA': aaaa_rec, 'CAA': caa_rec, 'MX': mx_rec, 'SOA': soa_rec, 'SPF': spf_rec, 'SRV': srv_rec, 'SSHFP': sshfp_rec, 'TLSA': tlsa_rec, } if not qres or rdtype not in rec_map: return qres elif rdtype in ('A', 'AAAA', 'SSHFP', 'TLSA'): res = [rec_map[rdtype](res) for res in qres] elif rdtype in ('SOA', 'SPF'): res = rec_map[rdtype](qres[0]) else: res = rec_map[rdtype](qres) return res def host(name, ip4=True, ip6=True, **kwargs): ''' Return a list of addresses for name ip6: Return IPv6 addresses ip4: Return IPv4 addresses the rest is passed on to lookup() ''' res = {} if ip6: ip6 = lookup(name, 'AAAA', **kwargs) if ip6: res['ip6'] = ip6 if ip4: ip4 = lookup(name, 'A', **kwargs) if ip4: res['ip4'] = ip4 return res def a_rec(rdata): ''' Validate and parse DNS record data for an A record :param rdata: DNS record data :return: { 'address': ip } ''' rschema = OrderedDict(( ('address', ipaddress.IPv4Address), )) return _data2rec(rschema, rdata) def aaaa_rec(rdata): ''' Validate and parse DNS record data for an AAAA record :param rdata: DNS record data :return: { 'address': ip } ''' rschema = OrderedDict(( ('address', ipaddress.IPv6Address), )) return _data2rec(rschema, rdata) def caa_rec(rdatas): ''' Validate and parse DNS record data for a CAA record :param rdata: DNS record data :return: dict w/fields ''' rschema = OrderedDict(( ('flags', lambda flag: ['critical'] if int(flag) > 0 else []), ('tag', RFC.CAA_TAGS), ('value', lambda val: val.strip('\',"')) )) res = _data2rec_group(rschema, rdatas, 'tag') for tag in ('issue', 'issuewild'): tag_res = res.get(tag, False) if not tag_res: continue for idx, val in enumerate(tag_res): if ';' not in val: continue val, params = val.split(';', 1) params = dict(param.split('=') for param in shlex.split(params)) tag_res[idx] = {val: params} return res def mx_data(target, preference=10): ''' Generate MX record data :param target: server :param preference: preference number :return: DNS record data ''' return _rec2data(int(preference), target) def mx_rec(rdatas): ''' Validate and parse DNS record data for MX record(s) :param rdata: DNS record data :return: dict w/fields ''' rschema = OrderedDict(( ('preference', int), ('name', str), )) return _data2rec_group(rschema, rdatas, 'preference') def ptr_name(rdata): ''' Return PTR name of given IP :param rdata: IP address :return: PTR record name ''' try: return ipaddress.ip_address(rdata).reverse_pointer except ValueError: log.error( 'Unable to generate PTR record; %s is not a valid IP address', rdata ) return False def spf_rec(rdata): ''' Validate and parse DNS record data for SPF record(s) :param rdata: DNS record data :return: dict w/fields ''' spf_fields = rdata.split(' ') if not spf_fields.pop(0).startswith('v=spf'): raise ValueError('Not an SPF record') res = OrderedDict() mods = set() for mech_spec in spf_fields: if mech_spec.startswith(('exp', 'redirect')): # It's a modifier mod, val = mech_spec.split('=', 1) if mod in mods: raise KeyError('Modifier {0} can only appear once'.format(mod)) mods.add(mod) continue # TODO: Should be in something intelligent like an SPF_get # if mod == 'exp': # res[mod] = lookup(val, 'TXT', **qargs) # continue # elif mod == 'redirect': # return query(val, 'SPF', **qargs) mech = {} if mech_spec[0] in ('+', '-', '~', '?'): mech['qualifier'] = mech_spec[0] mech_spec = mech_spec[1:] if ':' in mech_spec: mech_spec, val = mech_spec.split(':', 1) elif '/' in mech_spec: idx = mech_spec.find('/') mech_spec = mech_spec[0:idx] val = mech_spec[idx:] else: val = None res[mech_spec] = mech if not val: continue elif mech_spec in ('ip4', 'ip6'): val = ipaddress.ip_interface(val) assert val.version == int(mech_spec[-1]) mech['value'] = val return res def srv_data(target, port, prio=10, weight=10): ''' Generate SRV record data :param target: :param port: :param prio: :param weight: :return: ''' return _rec2data(prio, weight, port, target) def srv_name(svc, proto='tcp', domain=None): ''' Generate SRV record name :param svc: ldap, 389 etc :param proto: tcp, udp, sctp etc. :param domain: name to append :return: ''' proto = RFC.validate(proto, RFC.SRV_PROTO) if isinstance(svc, int) or svc.isdigit(): svc = _to_port(svc) if domain: domain = '.' + domain return '_{0}._{1}{2}'.format(svc, proto, domain) def srv_rec(rdatas): ''' Validate and parse DNS record data for SRV record(s) :param rdata: DNS record data :return: dict w/fields ''' rschema = OrderedDict(( ('prio', int), ('weight', int), ('port', _to_port), ('name', str), )) return _data2rec_group(rschema, rdatas, 'prio') def sshfp_data(key_t, hash_t, pub): ''' Generate an SSHFP record :param key_t: rsa/dsa/ecdsa/ed25519 :param hash_t: sha1/sha256 :param pub: the SSH public key ''' key_t = RFC.validate(key_t, RFC.SSHFP_ALGO, 'in') hash_t = RFC.validate(hash_t, RFC.SSHFP_HASH) hasher = hashlib.new(hash_t) hasher.update( base64.b64decode(pub) ) ssh_fp = hasher.hexdigest() return _rec2data(key_t, hash_t, ssh_fp) def sshfp_rec(rdata): ''' Validate and parse DNS record data for TLSA record(s) :param rdata: DNS record data :return: dict w/fields ''' rschema = OrderedDict(( ('algorithm', RFC.SSHFP_ALGO), ('fp_hash', RFC.SSHFP_HASH), ('fingerprint', lambda val: val.lower()) # resolvers are inconsistent on this one )) return _data2rec(rschema, rdata) def tlsa_data(pub, usage, selector, matching): ''' Generate a TLSA rec :param pub: Pub key in PEM format :param usage: :param selector: :param matching: :return: TLSA data portion ''' usage = RFC.validate(usage, RFC.TLSA_USAGE) selector = RFC.validate(selector, RFC.TLSA_SELECT) matching = RFC.validate(matching, RFC.TLSA_MATCHING) pub = ssl.PEM_cert_to_DER_cert(pub.strip()) if matching == 0: cert_fp = binascii.b2a_hex(pub) else: hasher = hashlib.new(RFC.TLSA_MATCHING[matching]) hasher.update( pub ) cert_fp = hasher.hexdigest() return _rec2data(usage, selector, matching, cert_fp) def tlsa_rec(rdata): ''' Validate and parse DNS record data for TLSA record(s) :param rdata: DNS record data :return: dict w/fields ''' rschema = OrderedDict(( ('usage', RFC.TLSA_USAGE), ('selector', RFC.TLSA_SELECT), ('matching', RFC.TLSA_MATCHING), ('pub', str) )) return _data2rec(rschema, rdata) def service( svc, proto='tcp', domain=None, walk=False, secure=None ): ''' Find an SRV service in a domain or it's parents :param svc: service to find (ldap, 389, etc) :param proto: protocol the service talks (tcp, udp, etc) :param domain: domain to start search in :param walk: walk the parents if domain doesn't provide the service :param secure: only return DNSSEC-validated results :return: [ [ prio1server1, prio1server2 ], [ prio2server1, prio2server2 ], ] (the servers will already be weighted according to the SRV rules) ''' qres = query(srv_name(svc, proto, domain), 'SRV', walk=walk, secure=secure) if not qres: return False res = [] for _, recs in qres.items(): res.append(_weighted_order(recs)) return res def services(services_file='/etc/services'): ''' Parse through system-known services :return: { 'svc': [ { 'port': port 'proto': proto, 'desc': comment }, ], } ''' res = {} with salt.utils.files.fopen(services_file, 'r') as svc_defs: for svc_def in svc_defs.readlines(): svc_def = salt.utils.stringutils.to_unicode(svc_def.strip()) if not svc_def or svc_def.startswith('#'): continue elif '#' in svc_def: svc_def, comment = svc_def.split('#', 1) comment = comment.strip() else: comment = None svc_def = svc_def.split() port, proto = svc_def.pop(1).split('/') port = int(port) for name in svc_def: svc_res = res.get(name, {}) pp_res = svc_res.get(port, False) if not pp_res: svc = { 'port': port, 'proto': proto, } if comment: svc['desc'] = comment svc_res[port] = svc else: curr_proto = pp_res['proto'] if isinstance(curr_proto, (list, tuple)): curr_proto.append(proto) else: pp_res['proto'] = [curr_proto, proto] curr_desc = pp_res.get('desc', False) if comment: if not curr_desc: pp_res['desc'] = comment elif comment != curr_desc: pp_res['desc'] = '{0}, {1}'.format(curr_desc, comment) res[name] = svc_res for svc, data in res.items(): if len(data) == 1: res[svc] = data.values().pop() continue else: res[svc] = list(data.values()) return res def parse_resolv(src='/etc/resolv.conf'): ''' Parse a resolver configuration file (traditionally /etc/resolv.conf) ''' nameservers = [] ip4_nameservers = [] ip6_nameservers = [] search = [] sortlist = [] domain = '' options = [] try: with salt.utils.files.fopen(src) as src_file: # pylint: disable=too-many-nested-blocks for line in src_file: line = salt.utils.stringutils.to_unicode(line).strip().split() try: (directive, arg) = (line[0].lower(), line[1:]) # Drop everything after # or ; (comments) arg = list(itertools.takewhile(lambda x: x[0] not in ('#', ';'), arg)) if directive == 'nameserver': addr = arg[0] try: ip_addr = ipaddress.ip_address(addr) version = ip_addr.version ip_addr = str(ip_addr) if ip_addr not in nameservers: nameservers.append(ip_addr) if version == 4 and ip_addr not in ip4_nameservers: ip4_nameservers.append(ip_addr) elif version == 6 and ip_addr not in ip6_nameservers: ip6_nameservers.append(ip_addr) except ValueError as exc: log.error('%s: %s', src, exc) elif directive == 'domain': domain = arg[0] elif directive == 'search': search = arg elif directive == 'sortlist': # A sortlist is specified by IP address netmask pairs. # The netmask is optional and defaults to the natural # netmask of the net. The IP address and optional # network pairs are separated by slashes. for ip_raw in arg: try: ip_net = ipaddress.ip_network(ip_raw) except ValueError as exc: log.error('%s: %s', src, exc) else: if '/' not in ip_raw: # No netmask has been provided, guess # the "natural" one if ip_net.version == 4: ip_addr = six.text_type(ip_net.network_address) # pylint: disable=protected-access mask = salt.utils.network.natural_ipv4_netmask(ip_addr) ip_net = ipaddress.ip_network( '{0}{1}'.format(ip_addr, mask), strict=False ) if ip_net.version == 6: # TODO pass if ip_net not in sortlist: sortlist.append(ip_net) elif directive == 'options': # Options allows certain internal resolver variables to # be modified. if arg[0] not in options: options.append(arg[0]) except IndexError: continue if domain and search: # The domain and search keywords are mutually exclusive. If more # than one instance of these keywords is present, the last instance # will override. log.debug( '%s: The domain and search keywords are mutually exclusive.', src ) return { 'nameservers': nameservers, 'ip4_nameservers': ip4_nameservers, 'ip6_nameservers': ip6_nameservers, 'sortlist': [ip.with_netmask for ip in sortlist], 'domain': domain, 'search': search, 'options': options } except IOError: return {}
saltstack/salt
salt/utils/dns.py
spf_rec
python
def spf_rec(rdata): ''' Validate and parse DNS record data for SPF record(s) :param rdata: DNS record data :return: dict w/fields ''' spf_fields = rdata.split(' ') if not spf_fields.pop(0).startswith('v=spf'): raise ValueError('Not an SPF record') res = OrderedDict() mods = set() for mech_spec in spf_fields: if mech_spec.startswith(('exp', 'redirect')): # It's a modifier mod, val = mech_spec.split('=', 1) if mod in mods: raise KeyError('Modifier {0} can only appear once'.format(mod)) mods.add(mod) continue # TODO: Should be in something intelligent like an SPF_get # if mod == 'exp': # res[mod] = lookup(val, 'TXT', **qargs) # continue # elif mod == 'redirect': # return query(val, 'SPF', **qargs) mech = {} if mech_spec[0] in ('+', '-', '~', '?'): mech['qualifier'] = mech_spec[0] mech_spec = mech_spec[1:] if ':' in mech_spec: mech_spec, val = mech_spec.split(':', 1) elif '/' in mech_spec: idx = mech_spec.find('/') mech_spec = mech_spec[0:idx] val = mech_spec[idx:] else: val = None res[mech_spec] = mech if not val: continue elif mech_spec in ('ip4', 'ip6'): val = ipaddress.ip_interface(val) assert val.version == int(mech_spec[-1]) mech['value'] = val return res
Validate and parse DNS record data for SPF record(s) :param rdata: DNS record data :return: dict w/fields
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/dns.py#L859-L911
null
# -*- coding: utf-8 -*- ''' Compendium of generic DNS utilities # Examples: dns.lookup(name, rdtype, ...) dns.query(name, rdtype, ...) dns.srv_rec(data) dns.srv_data('my1.example.com', 389, prio=10, weight=100) dns.srv_name('ldap/tcp', 'example.com') ''' from __future__ import absolute_import, print_function, unicode_literals # Import Python libs import base64 import binascii import hashlib import itertools import logging import random import re import shlex import socket import ssl import string import functools # Import Salt libs import salt.utils.files import salt.utils.network import salt.utils.path import salt.utils.stringutils import salt.modules.cmdmod from salt._compat import ipaddress from salt.utils.odict import OrderedDict # Import 3rd-party libs from salt.ext import six from salt.ext.six.moves import zip # pylint: disable=redefined-builtin # Integrations try: import dns.resolver HAS_DNSPYTHON = True except ImportError: HAS_DNSPYTHON = False try: import tldextract HAS_TLDEXTRACT = True except ImportError: HAS_TLDEXTRACT = False HAS_DIG = salt.utils.path.which('dig') is not None DIG_OPTIONS = '+search +fail +noall +answer +nocl +nottl' HAS_DRILL = salt.utils.path.which('drill') is not None HAS_HOST = salt.utils.path.which('host') is not None HAS_NSLOOKUP = salt.utils.path.which('nslookup') is not None __salt__ = { 'cmd.run_all': salt.modules.cmdmod.run_all } log = logging.getLogger(__name__) class RFC(object): ''' Simple holding class for all RFC/IANA registered lists & standards ''' # https://tools.ietf.org/html/rfc6844#section-3 CAA_TAGS = ( 'issue', 'issuewild', 'iodef' ) # http://www.iana.org/assignments/dns-sshfp-rr-parameters/dns-sshfp-rr-parameters.xhtml SSHFP_ALGO = OrderedDict(( (1, 'rsa'), (2, 'dsa'), (3, 'ecdsa'), (4, 'ed25519'), )) SSHFP_HASH = OrderedDict(( (1, 'sha1'), (2, 'sha256'), )) # http://www.iana.org/assignments/dane-parameters/dane-parameters.xhtml TLSA_USAGE = OrderedDict(( (0, 'pkixta'), (1, 'pkixee'), (2, 'daneta'), (3, 'daneee'), )) TLSA_SELECT = OrderedDict(( (0, 'cert'), (1, 'spki'), )) TLSA_MATCHING = OrderedDict(( (0, 'full'), (1, 'sha256'), (2, 'sha512'), )) SRV_PROTO = ( 'tcp', 'udp', 'sctp' ) @staticmethod def validate(lookup, ref, match=None): if lookup in ref: return lookup elif match == 'in': return [code for code, name in ref.items() if lookup in name][-1] else: # OrderedDicts only!(?) return ref.keys()[ref.values().index(lookup)] def _to_port(port): try: port = int(port) assert 1 <= port <= 65535 return port except (ValueError, AssertionError): raise ValueError('Invalid port {0}'.format(port)) def _tree(domain, tld=False): ''' Split out a domain in its parents Leverages tldextract to take the TLDs from publicsuffix.org or makes a valiant approximation of that :param domain: dc2.ams2.example.com :param tld: Include TLD in list :return: [ 'dc2.ams2.example.com', 'ams2.example.com', 'example.com'] ''' domain = domain.rstrip('.') assert '.' in domain, 'Provide a decent domain' if not tld: if HAS_TLDEXTRACT: tld = tldextract.extract(domain).suffix else: tld = re.search(r'((?:(?:ac|biz|com?|info|edu|gov|mil|name|net|n[oi]m|org)\.)?[^.]+)$', domain).group() log.info('Without tldextract, dns.util resolves the TLD of %s to %s', domain, tld) res = [domain] while True: idx = domain.find('.') if idx < 0: break domain = domain[idx + 1:] if domain == tld: break res.append(domain) return res def _weighted_order(recs): res = [] weights = [rec['weight'] for rec in recs] while weights: rnd = random.random() * sum(weights) for i, w in enumerate(weights): rnd -= w if rnd < 0: res.append(recs.pop(i)['name']) weights.pop(i) break return res def _cast(rec_data, rec_cast): if isinstance(rec_cast, dict): rec_data = type(rec_cast.keys()[0])(rec_data) res = rec_cast[rec_data] return res elif isinstance(rec_cast, (list, tuple)): return RFC.validate(rec_data, rec_cast) else: return rec_cast(rec_data) def _data2rec(schema, rec_data): ''' schema = OrderedDict({ 'prio': int, 'weight': int, 'port': to_port, 'name': str, }) rec_data = '10 20 25 myawesome.nl' res = {'prio': 10, 'weight': 20, 'port': 25 'name': 'myawesome.nl'} ''' try: rec_fields = rec_data.split(' ') # spaces in digest fields are allowed assert len(rec_fields) >= len(schema) if len(rec_fields) > len(schema): cutoff = len(schema) - 1 rec_fields = rec_fields[0:cutoff] + [''.join(rec_fields[cutoff:])] if len(schema) == 1: res = _cast(rec_fields[0], next(iter(schema.values()))) else: res = dict(( (field_name, _cast(rec_field, rec_cast)) for (field_name, rec_cast), rec_field in zip(schema.items(), rec_fields) )) return res except (AssertionError, AttributeError, TypeError, ValueError) as e: raise ValueError('Unable to cast "{0}" as "{2}": {1}'.format( rec_data, e, ' '.join(schema.keys()) )) def _data2rec_group(schema, recs_data, group_key): if not isinstance(recs_data, (list, tuple)): recs_data = [recs_data] res = OrderedDict() try: for rdata in recs_data: rdata = _data2rec(schema, rdata) assert rdata and group_key in rdata idx = rdata.pop(group_key) if idx not in res: res[idx] = [] if len(rdata) == 1: rdata = next(iter(rdata.values())) res[idx].append(rdata) return res except (AssertionError, ValueError) as e: raise ValueError('Unable to cast "{0}" as a group of "{1}": {2}'.format( ','.join(recs_data), ' '.join(schema.keys()), e )) def _rec2data(*rdata): return ' '.join(rdata) def _data_clean(data): data = data.strip(string.whitespace) if data.startswith(('"', '\'')) and data.endswith(('"', '\'')): return data[1:-1] else: return data def _lookup_dig(name, rdtype, timeout=None, servers=None, secure=None): ''' Use dig to lookup addresses :param name: Name of record to search :param rdtype: DNS record type :param timeout: server response timeout :param servers: [] of servers to use :return: [] of records or False if error ''' cmd = 'dig {0} -t {1} '.format(DIG_OPTIONS, rdtype) if servers: cmd += ''.join(['@{0} '.format(srv) for srv in servers]) if timeout is not None: if servers: timeout = int(float(timeout) / len(servers)) else: timeout = int(timeout) cmd += '+time={0} '.format(timeout) if secure: cmd += '+dnssec +adflag ' cmd = __salt__['cmd.run_all']('{0} {1}'.format(cmd, name), python_shell=False, output_loglevel='quiet') if 'ignoring invalid type' in cmd['stderr']: raise ValueError('Invalid DNS type {}'.format(rdtype)) elif cmd['retcode'] != 0: log.warning( 'dig returned (%s): %s', cmd['retcode'], cmd['stderr'].strip(string.whitespace + ';') ) return False elif not cmd['stdout']: return [] validated = False res = [] for line in cmd['stdout'].splitlines(): _, rtype, rdata = line.split(None, 2) if rtype == 'CNAME' and rdtype != 'CNAME': continue elif rtype == 'RRSIG': validated = True continue res.append(_data_clean(rdata)) if res and secure and not validated: return False else: return res def _lookup_drill(name, rdtype, timeout=None, servers=None, secure=None): ''' Use drill to lookup addresses :param name: Name of record to search :param rdtype: DNS record type :param timeout: command return timeout :param servers: [] of servers to use :return: [] of records or False if error ''' cmd = 'drill ' if secure: cmd += '-D -o ad ' cmd += '{0} {1} '.format(rdtype, name) if servers: cmd += ''.join(['@{0} '.format(srv) for srv in servers]) cmd = __salt__['cmd.run_all']( cmd, timeout=timeout, python_shell=False, output_loglevel='quiet') if cmd['retcode'] != 0: log.warning('drill returned (%s): %s', cmd['retcode'], cmd['stderr']) return False lookup_res = iter(cmd['stdout'].splitlines()) validated = False res = [] try: line = '' while 'ANSWER SECTION' not in line: line = next(lookup_res) while True: line = next(lookup_res) line = line.strip() if not line or line.startswith(';;'): break l_type, l_rec = line.split(None, 4)[-2:] if l_type == 'CNAME' and rdtype != 'CNAME': continue elif l_type == 'RRSIG': validated = True continue elif l_type != rdtype: raise ValueError('Invalid DNS type {}'.format(rdtype)) res.append(_data_clean(l_rec)) except StopIteration: pass if res and secure and not validated: return False else: return res def _lookup_gai(name, rdtype, timeout=None): ''' Use Python's socket interface to lookup addresses :param name: Name of record to search :param rdtype: A or AAAA :param timeout: ignored :return: [] of addresses or False if error ''' try: sock_t = { 'A': socket.AF_INET, 'AAAA': socket.AF_INET6 }[rdtype] except KeyError: raise ValueError('Invalid DNS type {} for gai lookup'.format(rdtype)) if timeout: log.info('Ignoring timeout on gai resolver; fix resolv.conf to do that') try: addresses = [sock[4][0] for sock in socket.getaddrinfo(name, None, sock_t, 0, socket.SOCK_RAW)] return addresses except socket.gaierror: return False def _lookup_host(name, rdtype, timeout=None, server=None): ''' Use host to lookup addresses :param name: Name of record to search :param server: Server to query :param rdtype: DNS record type :param timeout: server response wait :return: [] of records or False if error ''' cmd = 'host -t {0} '.format(rdtype) if timeout: cmd += '-W {0} '.format(int(timeout)) cmd += name if server is not None: cmd += ' {0}'.format(server) cmd = __salt__['cmd.run_all'](cmd, python_shell=False, output_loglevel='quiet') if 'invalid type' in cmd['stderr']: raise ValueError('Invalid DNS type {}'.format(rdtype)) elif cmd['retcode'] != 0: log.warning('host returned (%s): %s', cmd['retcode'], cmd['stderr']) return False elif 'has no' in cmd['stdout']: return [] res = [] _stdout = cmd['stdout'] if server is None else cmd['stdout'].split('\n\n')[-1] for line in _stdout.splitlines(): if rdtype != 'CNAME' and 'is an alias' in line: continue line = line.split(' ', 3)[-1] for prefix in ('record', 'address', 'handled by', 'alias for'): if line.startswith(prefix): line = line[len(prefix) + 1:] break res.append(_data_clean(line)) return res def _lookup_dnspython(name, rdtype, timeout=None, servers=None, secure=None): ''' Use dnspython to lookup addresses :param name: Name of record to search :param rdtype: DNS record type :param timeout: query timeout :param server: [] of server(s) to try in order :return: [] of records or False if error ''' resolver = dns.resolver.Resolver() if timeout is not None: resolver.lifetime = float(timeout) if servers: resolver.nameservers = servers if secure: resolver.ednsflags += dns.flags.DO try: res = [_data_clean(rr.to_text()) for rr in resolver.query(name, rdtype, raise_on_no_answer=False)] return res except dns.rdatatype.UnknownRdatatype: raise ValueError('Invalid DNS type {}'.format(rdtype)) except (dns.resolver.NXDOMAIN, dns.resolver.YXDOMAIN, dns.resolver.NoNameservers, dns.exception.Timeout): return False def _lookup_nslookup(name, rdtype, timeout=None, server=None): ''' Use nslookup to lookup addresses :param name: Name of record to search :param rdtype: DNS record type :param timeout: server response timeout :param server: server to query :return: [] of records or False if error ''' cmd = 'nslookup -query={0} {1}'.format(rdtype, name) if timeout is not None: cmd += ' -timeout={0}'.format(int(timeout)) if server is not None: cmd += ' {0}'.format(server) cmd = __salt__['cmd.run_all'](cmd, python_shell=False, output_loglevel='quiet') if cmd['retcode'] != 0: log.warning( 'nslookup returned (%s): %s', cmd['retcode'], cmd['stdout'].splitlines()[-1].strip(string.whitespace + ';') ) return False lookup_res = iter(cmd['stdout'].splitlines()) res = [] try: line = next(lookup_res) if 'unknown query type' in line: raise ValueError('Invalid DNS type {}'.format(rdtype)) while True: if name in line: break line = next(lookup_res) while True: line = line.strip() if not line or line.startswith('*'): break elif rdtype != 'CNAME' and 'canonical name' in line: name = line.split()[-1][:-1] line = next(lookup_res) continue elif rdtype == 'SOA': line = line.split('=') elif line.startswith('Name:'): line = next(lookup_res) line = line.split(':', 1) elif line.startswith(name): if '=' in line: line = line.split('=', 1) else: line = line.split(' ') res.append(_data_clean(line[-1])) line = next(lookup_res) except StopIteration: pass if rdtype == 'SOA': return [' '.join(res[1:])] else: return res def lookup( name, rdtype, method=None, servers=None, timeout=None, walk=False, walk_tld=False, secure=None ): ''' Lookup DNS records and return their data :param name: name to lookup :param rdtype: DNS record type :param method: gai (getaddrinfo()), dnspython, dig, drill, host, nslookup or auto (default) :param servers: (list of) server(s) to try in-order :param timeout: query timeout or a valiant approximation of that :param walk: Walk the DNS upwards looking for the record type or name/recordtype if walk='name'. :param walk_tld: Include the final domain in the walk :param secure: return only DNSSEC secured responses :return: [] of record data ''' # opts = __opts__.get('dns', {}) opts = {} method = method or opts.get('method', 'auto') secure = secure or opts.get('secure', None) servers = servers or opts.get('servers', None) timeout = timeout or opts.get('timeout', False) rdtype = rdtype.upper() # pylint: disable=bad-whitespace,multiple-spaces-before-keyword query_methods = ( ('gai', _lookup_gai, not any((rdtype not in ('A', 'AAAA'), servers, secure))), ('dnspython', _lookup_dnspython, HAS_DNSPYTHON), ('dig', _lookup_dig, HAS_DIG), ('drill', _lookup_drill, HAS_DRILL), ('host', _lookup_host, HAS_HOST and not secure), ('nslookup', _lookup_nslookup, HAS_NSLOOKUP and not secure), ) # pylint: enable=bad-whitespace,multiple-spaces-before-keyword try: if method == 'auto': # The first one not to bork on the conditions becomes the function method, resolver = next(((rname, rcb) for rname, rcb, rtest in query_methods if rtest)) else: # The first one not to bork on the conditions becomes the function. And the name must match. resolver = next((rcb for rname, rcb, rtest in query_methods if rname == method and rtest)) except StopIteration: log.error( 'Unable to lookup %s/%s: Resolver method %s invalid, unsupported ' 'or unable to perform query', method, rdtype, name ) return False res_kwargs = { 'rdtype': rdtype, } if servers: if not isinstance(servers, (list, tuple)): servers = [servers] if method in ('dnspython', 'dig', 'drill'): res_kwargs['servers'] = servers else: if timeout: timeout /= len(servers) # Inject a wrapper for multi-server behaviour def _multi_srvr(resolv_func): @functools.wraps(resolv_func) def _wrapper(**res_kwargs): for server in servers: s_res = resolv_func(server=server, **res_kwargs) if s_res: return s_res return _wrapper resolver = _multi_srvr(resolver) if not walk: name = [name] else: idx = 0 if rdtype in ('SRV', 'TLSA'): # The only RRs I know that have 2 name components idx = name.find('.') + 1 idx = name.find('.', idx) + 1 domain = name[idx:] rname = name[0:idx] name = _tree(domain, walk_tld) if walk == 'name': name = [rname + domain for domain in name] if timeout: timeout /= len(name) if secure: res_kwargs['secure'] = secure if timeout: res_kwargs['timeout'] = timeout for rname in name: res = resolver(name=rname, **res_kwargs) if res: return res return res def query( name, rdtype, method=None, servers=None, timeout=None, walk=False, walk_tld=False, secure=None ): ''' Query DNS for information. Where `lookup()` returns record data, `query()` tries to interpret the data and return it's results :param name: name to lookup :param rdtype: DNS record type :param method: gai (getaddrinfo()), pydns, dig, drill, host, nslookup or auto (default) :param servers: (list of) server(s) to try in-order :param timeout: query timeout or a valiant approximation of that :param secure: return only DNSSEC secured response :param walk: Walk the DNS upwards looking for the record type or name/recordtype if walk='name'. :param walk_tld: Include the top-level domain in the walk :return: [] of records ''' rdtype = rdtype.upper() qargs = { 'method': method, 'servers': servers, 'timeout': timeout, 'walk': walk, 'walk_tld': walk_tld, 'secure': secure } if rdtype == 'PTR' and not name.endswith('arpa'): name = ptr_name(name) if rdtype == 'SPF': # 'SPF' has become a regular 'TXT' again qres = [answer for answer in lookup(name, 'TXT', **qargs) if answer.startswith('v=spf')] if not qres: qres = lookup(name, rdtype, **qargs) else: qres = lookup(name, rdtype, **qargs) rec_map = { 'A': a_rec, 'AAAA': aaaa_rec, 'CAA': caa_rec, 'MX': mx_rec, 'SOA': soa_rec, 'SPF': spf_rec, 'SRV': srv_rec, 'SSHFP': sshfp_rec, 'TLSA': tlsa_rec, } if not qres or rdtype not in rec_map: return qres elif rdtype in ('A', 'AAAA', 'SSHFP', 'TLSA'): res = [rec_map[rdtype](res) for res in qres] elif rdtype in ('SOA', 'SPF'): res = rec_map[rdtype](qres[0]) else: res = rec_map[rdtype](qres) return res def host(name, ip4=True, ip6=True, **kwargs): ''' Return a list of addresses for name ip6: Return IPv6 addresses ip4: Return IPv4 addresses the rest is passed on to lookup() ''' res = {} if ip6: ip6 = lookup(name, 'AAAA', **kwargs) if ip6: res['ip6'] = ip6 if ip4: ip4 = lookup(name, 'A', **kwargs) if ip4: res['ip4'] = ip4 return res def a_rec(rdata): ''' Validate and parse DNS record data for an A record :param rdata: DNS record data :return: { 'address': ip } ''' rschema = OrderedDict(( ('address', ipaddress.IPv4Address), )) return _data2rec(rschema, rdata) def aaaa_rec(rdata): ''' Validate and parse DNS record data for an AAAA record :param rdata: DNS record data :return: { 'address': ip } ''' rschema = OrderedDict(( ('address', ipaddress.IPv6Address), )) return _data2rec(rschema, rdata) def caa_rec(rdatas): ''' Validate and parse DNS record data for a CAA record :param rdata: DNS record data :return: dict w/fields ''' rschema = OrderedDict(( ('flags', lambda flag: ['critical'] if int(flag) > 0 else []), ('tag', RFC.CAA_TAGS), ('value', lambda val: val.strip('\',"')) )) res = _data2rec_group(rschema, rdatas, 'tag') for tag in ('issue', 'issuewild'): tag_res = res.get(tag, False) if not tag_res: continue for idx, val in enumerate(tag_res): if ';' not in val: continue val, params = val.split(';', 1) params = dict(param.split('=') for param in shlex.split(params)) tag_res[idx] = {val: params} return res def mx_data(target, preference=10): ''' Generate MX record data :param target: server :param preference: preference number :return: DNS record data ''' return _rec2data(int(preference), target) def mx_rec(rdatas): ''' Validate and parse DNS record data for MX record(s) :param rdata: DNS record data :return: dict w/fields ''' rschema = OrderedDict(( ('preference', int), ('name', str), )) return _data2rec_group(rschema, rdatas, 'preference') def ptr_name(rdata): ''' Return PTR name of given IP :param rdata: IP address :return: PTR record name ''' try: return ipaddress.ip_address(rdata).reverse_pointer except ValueError: log.error( 'Unable to generate PTR record; %s is not a valid IP address', rdata ) return False def soa_rec(rdata): ''' Validate and parse DNS record data for SOA record(s) :param rdata: DNS record data :return: dict w/fields ''' rschema = OrderedDict(( ('mname', str), ('rname', str), ('serial', int), ('refresh', int), ('retry', int), ('expire', int), ('minimum', int), )) return _data2rec(rschema, rdata) def srv_data(target, port, prio=10, weight=10): ''' Generate SRV record data :param target: :param port: :param prio: :param weight: :return: ''' return _rec2data(prio, weight, port, target) def srv_name(svc, proto='tcp', domain=None): ''' Generate SRV record name :param svc: ldap, 389 etc :param proto: tcp, udp, sctp etc. :param domain: name to append :return: ''' proto = RFC.validate(proto, RFC.SRV_PROTO) if isinstance(svc, int) or svc.isdigit(): svc = _to_port(svc) if domain: domain = '.' + domain return '_{0}._{1}{2}'.format(svc, proto, domain) def srv_rec(rdatas): ''' Validate and parse DNS record data for SRV record(s) :param rdata: DNS record data :return: dict w/fields ''' rschema = OrderedDict(( ('prio', int), ('weight', int), ('port', _to_port), ('name', str), )) return _data2rec_group(rschema, rdatas, 'prio') def sshfp_data(key_t, hash_t, pub): ''' Generate an SSHFP record :param key_t: rsa/dsa/ecdsa/ed25519 :param hash_t: sha1/sha256 :param pub: the SSH public key ''' key_t = RFC.validate(key_t, RFC.SSHFP_ALGO, 'in') hash_t = RFC.validate(hash_t, RFC.SSHFP_HASH) hasher = hashlib.new(hash_t) hasher.update( base64.b64decode(pub) ) ssh_fp = hasher.hexdigest() return _rec2data(key_t, hash_t, ssh_fp) def sshfp_rec(rdata): ''' Validate and parse DNS record data for TLSA record(s) :param rdata: DNS record data :return: dict w/fields ''' rschema = OrderedDict(( ('algorithm', RFC.SSHFP_ALGO), ('fp_hash', RFC.SSHFP_HASH), ('fingerprint', lambda val: val.lower()) # resolvers are inconsistent on this one )) return _data2rec(rschema, rdata) def tlsa_data(pub, usage, selector, matching): ''' Generate a TLSA rec :param pub: Pub key in PEM format :param usage: :param selector: :param matching: :return: TLSA data portion ''' usage = RFC.validate(usage, RFC.TLSA_USAGE) selector = RFC.validate(selector, RFC.TLSA_SELECT) matching = RFC.validate(matching, RFC.TLSA_MATCHING) pub = ssl.PEM_cert_to_DER_cert(pub.strip()) if matching == 0: cert_fp = binascii.b2a_hex(pub) else: hasher = hashlib.new(RFC.TLSA_MATCHING[matching]) hasher.update( pub ) cert_fp = hasher.hexdigest() return _rec2data(usage, selector, matching, cert_fp) def tlsa_rec(rdata): ''' Validate and parse DNS record data for TLSA record(s) :param rdata: DNS record data :return: dict w/fields ''' rschema = OrderedDict(( ('usage', RFC.TLSA_USAGE), ('selector', RFC.TLSA_SELECT), ('matching', RFC.TLSA_MATCHING), ('pub', str) )) return _data2rec(rschema, rdata) def service( svc, proto='tcp', domain=None, walk=False, secure=None ): ''' Find an SRV service in a domain or it's parents :param svc: service to find (ldap, 389, etc) :param proto: protocol the service talks (tcp, udp, etc) :param domain: domain to start search in :param walk: walk the parents if domain doesn't provide the service :param secure: only return DNSSEC-validated results :return: [ [ prio1server1, prio1server2 ], [ prio2server1, prio2server2 ], ] (the servers will already be weighted according to the SRV rules) ''' qres = query(srv_name(svc, proto, domain), 'SRV', walk=walk, secure=secure) if not qres: return False res = [] for _, recs in qres.items(): res.append(_weighted_order(recs)) return res def services(services_file='/etc/services'): ''' Parse through system-known services :return: { 'svc': [ { 'port': port 'proto': proto, 'desc': comment }, ], } ''' res = {} with salt.utils.files.fopen(services_file, 'r') as svc_defs: for svc_def in svc_defs.readlines(): svc_def = salt.utils.stringutils.to_unicode(svc_def.strip()) if not svc_def or svc_def.startswith('#'): continue elif '#' in svc_def: svc_def, comment = svc_def.split('#', 1) comment = comment.strip() else: comment = None svc_def = svc_def.split() port, proto = svc_def.pop(1).split('/') port = int(port) for name in svc_def: svc_res = res.get(name, {}) pp_res = svc_res.get(port, False) if not pp_res: svc = { 'port': port, 'proto': proto, } if comment: svc['desc'] = comment svc_res[port] = svc else: curr_proto = pp_res['proto'] if isinstance(curr_proto, (list, tuple)): curr_proto.append(proto) else: pp_res['proto'] = [curr_proto, proto] curr_desc = pp_res.get('desc', False) if comment: if not curr_desc: pp_res['desc'] = comment elif comment != curr_desc: pp_res['desc'] = '{0}, {1}'.format(curr_desc, comment) res[name] = svc_res for svc, data in res.items(): if len(data) == 1: res[svc] = data.values().pop() continue else: res[svc] = list(data.values()) return res def parse_resolv(src='/etc/resolv.conf'): ''' Parse a resolver configuration file (traditionally /etc/resolv.conf) ''' nameservers = [] ip4_nameservers = [] ip6_nameservers = [] search = [] sortlist = [] domain = '' options = [] try: with salt.utils.files.fopen(src) as src_file: # pylint: disable=too-many-nested-blocks for line in src_file: line = salt.utils.stringutils.to_unicode(line).strip().split() try: (directive, arg) = (line[0].lower(), line[1:]) # Drop everything after # or ; (comments) arg = list(itertools.takewhile(lambda x: x[0] not in ('#', ';'), arg)) if directive == 'nameserver': addr = arg[0] try: ip_addr = ipaddress.ip_address(addr) version = ip_addr.version ip_addr = str(ip_addr) if ip_addr not in nameservers: nameservers.append(ip_addr) if version == 4 and ip_addr not in ip4_nameservers: ip4_nameservers.append(ip_addr) elif version == 6 and ip_addr not in ip6_nameservers: ip6_nameservers.append(ip_addr) except ValueError as exc: log.error('%s: %s', src, exc) elif directive == 'domain': domain = arg[0] elif directive == 'search': search = arg elif directive == 'sortlist': # A sortlist is specified by IP address netmask pairs. # The netmask is optional and defaults to the natural # netmask of the net. The IP address and optional # network pairs are separated by slashes. for ip_raw in arg: try: ip_net = ipaddress.ip_network(ip_raw) except ValueError as exc: log.error('%s: %s', src, exc) else: if '/' not in ip_raw: # No netmask has been provided, guess # the "natural" one if ip_net.version == 4: ip_addr = six.text_type(ip_net.network_address) # pylint: disable=protected-access mask = salt.utils.network.natural_ipv4_netmask(ip_addr) ip_net = ipaddress.ip_network( '{0}{1}'.format(ip_addr, mask), strict=False ) if ip_net.version == 6: # TODO pass if ip_net not in sortlist: sortlist.append(ip_net) elif directive == 'options': # Options allows certain internal resolver variables to # be modified. if arg[0] not in options: options.append(arg[0]) except IndexError: continue if domain and search: # The domain and search keywords are mutually exclusive. If more # than one instance of these keywords is present, the last instance # will override. log.debug( '%s: The domain and search keywords are mutually exclusive.', src ) return { 'nameservers': nameservers, 'ip4_nameservers': ip4_nameservers, 'ip6_nameservers': ip6_nameservers, 'sortlist': [ip.with_netmask for ip in sortlist], 'domain': domain, 'search': search, 'options': options } except IOError: return {}
saltstack/salt
salt/utils/dns.py
srv_data
python
def srv_data(target, port, prio=10, weight=10): ''' Generate SRV record data :param target: :param port: :param prio: :param weight: :return: ''' return _rec2data(prio, weight, port, target)
Generate SRV record data :param target: :param port: :param prio: :param weight: :return:
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/dns.py#L914-L923
[ "def _rec2data(*rdata):\n return ' '.join(rdata)\n" ]
# -*- coding: utf-8 -*- ''' Compendium of generic DNS utilities # Examples: dns.lookup(name, rdtype, ...) dns.query(name, rdtype, ...) dns.srv_rec(data) dns.srv_data('my1.example.com', 389, prio=10, weight=100) dns.srv_name('ldap/tcp', 'example.com') ''' from __future__ import absolute_import, print_function, unicode_literals # Import Python libs import base64 import binascii import hashlib import itertools import logging import random import re import shlex import socket import ssl import string import functools # Import Salt libs import salt.utils.files import salt.utils.network import salt.utils.path import salt.utils.stringutils import salt.modules.cmdmod from salt._compat import ipaddress from salt.utils.odict import OrderedDict # Import 3rd-party libs from salt.ext import six from salt.ext.six.moves import zip # pylint: disable=redefined-builtin # Integrations try: import dns.resolver HAS_DNSPYTHON = True except ImportError: HAS_DNSPYTHON = False try: import tldextract HAS_TLDEXTRACT = True except ImportError: HAS_TLDEXTRACT = False HAS_DIG = salt.utils.path.which('dig') is not None DIG_OPTIONS = '+search +fail +noall +answer +nocl +nottl' HAS_DRILL = salt.utils.path.which('drill') is not None HAS_HOST = salt.utils.path.which('host') is not None HAS_NSLOOKUP = salt.utils.path.which('nslookup') is not None __salt__ = { 'cmd.run_all': salt.modules.cmdmod.run_all } log = logging.getLogger(__name__) class RFC(object): ''' Simple holding class for all RFC/IANA registered lists & standards ''' # https://tools.ietf.org/html/rfc6844#section-3 CAA_TAGS = ( 'issue', 'issuewild', 'iodef' ) # http://www.iana.org/assignments/dns-sshfp-rr-parameters/dns-sshfp-rr-parameters.xhtml SSHFP_ALGO = OrderedDict(( (1, 'rsa'), (2, 'dsa'), (3, 'ecdsa'), (4, 'ed25519'), )) SSHFP_HASH = OrderedDict(( (1, 'sha1'), (2, 'sha256'), )) # http://www.iana.org/assignments/dane-parameters/dane-parameters.xhtml TLSA_USAGE = OrderedDict(( (0, 'pkixta'), (1, 'pkixee'), (2, 'daneta'), (3, 'daneee'), )) TLSA_SELECT = OrderedDict(( (0, 'cert'), (1, 'spki'), )) TLSA_MATCHING = OrderedDict(( (0, 'full'), (1, 'sha256'), (2, 'sha512'), )) SRV_PROTO = ( 'tcp', 'udp', 'sctp' ) @staticmethod def validate(lookup, ref, match=None): if lookup in ref: return lookup elif match == 'in': return [code for code, name in ref.items() if lookup in name][-1] else: # OrderedDicts only!(?) return ref.keys()[ref.values().index(lookup)] def _to_port(port): try: port = int(port) assert 1 <= port <= 65535 return port except (ValueError, AssertionError): raise ValueError('Invalid port {0}'.format(port)) def _tree(domain, tld=False): ''' Split out a domain in its parents Leverages tldextract to take the TLDs from publicsuffix.org or makes a valiant approximation of that :param domain: dc2.ams2.example.com :param tld: Include TLD in list :return: [ 'dc2.ams2.example.com', 'ams2.example.com', 'example.com'] ''' domain = domain.rstrip('.') assert '.' in domain, 'Provide a decent domain' if not tld: if HAS_TLDEXTRACT: tld = tldextract.extract(domain).suffix else: tld = re.search(r'((?:(?:ac|biz|com?|info|edu|gov|mil|name|net|n[oi]m|org)\.)?[^.]+)$', domain).group() log.info('Without tldextract, dns.util resolves the TLD of %s to %s', domain, tld) res = [domain] while True: idx = domain.find('.') if idx < 0: break domain = domain[idx + 1:] if domain == tld: break res.append(domain) return res def _weighted_order(recs): res = [] weights = [rec['weight'] for rec in recs] while weights: rnd = random.random() * sum(weights) for i, w in enumerate(weights): rnd -= w if rnd < 0: res.append(recs.pop(i)['name']) weights.pop(i) break return res def _cast(rec_data, rec_cast): if isinstance(rec_cast, dict): rec_data = type(rec_cast.keys()[0])(rec_data) res = rec_cast[rec_data] return res elif isinstance(rec_cast, (list, tuple)): return RFC.validate(rec_data, rec_cast) else: return rec_cast(rec_data) def _data2rec(schema, rec_data): ''' schema = OrderedDict({ 'prio': int, 'weight': int, 'port': to_port, 'name': str, }) rec_data = '10 20 25 myawesome.nl' res = {'prio': 10, 'weight': 20, 'port': 25 'name': 'myawesome.nl'} ''' try: rec_fields = rec_data.split(' ') # spaces in digest fields are allowed assert len(rec_fields) >= len(schema) if len(rec_fields) > len(schema): cutoff = len(schema) - 1 rec_fields = rec_fields[0:cutoff] + [''.join(rec_fields[cutoff:])] if len(schema) == 1: res = _cast(rec_fields[0], next(iter(schema.values()))) else: res = dict(( (field_name, _cast(rec_field, rec_cast)) for (field_name, rec_cast), rec_field in zip(schema.items(), rec_fields) )) return res except (AssertionError, AttributeError, TypeError, ValueError) as e: raise ValueError('Unable to cast "{0}" as "{2}": {1}'.format( rec_data, e, ' '.join(schema.keys()) )) def _data2rec_group(schema, recs_data, group_key): if not isinstance(recs_data, (list, tuple)): recs_data = [recs_data] res = OrderedDict() try: for rdata in recs_data: rdata = _data2rec(schema, rdata) assert rdata and group_key in rdata idx = rdata.pop(group_key) if idx not in res: res[idx] = [] if len(rdata) == 1: rdata = next(iter(rdata.values())) res[idx].append(rdata) return res except (AssertionError, ValueError) as e: raise ValueError('Unable to cast "{0}" as a group of "{1}": {2}'.format( ','.join(recs_data), ' '.join(schema.keys()), e )) def _rec2data(*rdata): return ' '.join(rdata) def _data_clean(data): data = data.strip(string.whitespace) if data.startswith(('"', '\'')) and data.endswith(('"', '\'')): return data[1:-1] else: return data def _lookup_dig(name, rdtype, timeout=None, servers=None, secure=None): ''' Use dig to lookup addresses :param name: Name of record to search :param rdtype: DNS record type :param timeout: server response timeout :param servers: [] of servers to use :return: [] of records or False if error ''' cmd = 'dig {0} -t {1} '.format(DIG_OPTIONS, rdtype) if servers: cmd += ''.join(['@{0} '.format(srv) for srv in servers]) if timeout is not None: if servers: timeout = int(float(timeout) / len(servers)) else: timeout = int(timeout) cmd += '+time={0} '.format(timeout) if secure: cmd += '+dnssec +adflag ' cmd = __salt__['cmd.run_all']('{0} {1}'.format(cmd, name), python_shell=False, output_loglevel='quiet') if 'ignoring invalid type' in cmd['stderr']: raise ValueError('Invalid DNS type {}'.format(rdtype)) elif cmd['retcode'] != 0: log.warning( 'dig returned (%s): %s', cmd['retcode'], cmd['stderr'].strip(string.whitespace + ';') ) return False elif not cmd['stdout']: return [] validated = False res = [] for line in cmd['stdout'].splitlines(): _, rtype, rdata = line.split(None, 2) if rtype == 'CNAME' and rdtype != 'CNAME': continue elif rtype == 'RRSIG': validated = True continue res.append(_data_clean(rdata)) if res and secure and not validated: return False else: return res def _lookup_drill(name, rdtype, timeout=None, servers=None, secure=None): ''' Use drill to lookup addresses :param name: Name of record to search :param rdtype: DNS record type :param timeout: command return timeout :param servers: [] of servers to use :return: [] of records or False if error ''' cmd = 'drill ' if secure: cmd += '-D -o ad ' cmd += '{0} {1} '.format(rdtype, name) if servers: cmd += ''.join(['@{0} '.format(srv) for srv in servers]) cmd = __salt__['cmd.run_all']( cmd, timeout=timeout, python_shell=False, output_loglevel='quiet') if cmd['retcode'] != 0: log.warning('drill returned (%s): %s', cmd['retcode'], cmd['stderr']) return False lookup_res = iter(cmd['stdout'].splitlines()) validated = False res = [] try: line = '' while 'ANSWER SECTION' not in line: line = next(lookup_res) while True: line = next(lookup_res) line = line.strip() if not line or line.startswith(';;'): break l_type, l_rec = line.split(None, 4)[-2:] if l_type == 'CNAME' and rdtype != 'CNAME': continue elif l_type == 'RRSIG': validated = True continue elif l_type != rdtype: raise ValueError('Invalid DNS type {}'.format(rdtype)) res.append(_data_clean(l_rec)) except StopIteration: pass if res and secure and not validated: return False else: return res def _lookup_gai(name, rdtype, timeout=None): ''' Use Python's socket interface to lookup addresses :param name: Name of record to search :param rdtype: A or AAAA :param timeout: ignored :return: [] of addresses or False if error ''' try: sock_t = { 'A': socket.AF_INET, 'AAAA': socket.AF_INET6 }[rdtype] except KeyError: raise ValueError('Invalid DNS type {} for gai lookup'.format(rdtype)) if timeout: log.info('Ignoring timeout on gai resolver; fix resolv.conf to do that') try: addresses = [sock[4][0] for sock in socket.getaddrinfo(name, None, sock_t, 0, socket.SOCK_RAW)] return addresses except socket.gaierror: return False def _lookup_host(name, rdtype, timeout=None, server=None): ''' Use host to lookup addresses :param name: Name of record to search :param server: Server to query :param rdtype: DNS record type :param timeout: server response wait :return: [] of records or False if error ''' cmd = 'host -t {0} '.format(rdtype) if timeout: cmd += '-W {0} '.format(int(timeout)) cmd += name if server is not None: cmd += ' {0}'.format(server) cmd = __salt__['cmd.run_all'](cmd, python_shell=False, output_loglevel='quiet') if 'invalid type' in cmd['stderr']: raise ValueError('Invalid DNS type {}'.format(rdtype)) elif cmd['retcode'] != 0: log.warning('host returned (%s): %s', cmd['retcode'], cmd['stderr']) return False elif 'has no' in cmd['stdout']: return [] res = [] _stdout = cmd['stdout'] if server is None else cmd['stdout'].split('\n\n')[-1] for line in _stdout.splitlines(): if rdtype != 'CNAME' and 'is an alias' in line: continue line = line.split(' ', 3)[-1] for prefix in ('record', 'address', 'handled by', 'alias for'): if line.startswith(prefix): line = line[len(prefix) + 1:] break res.append(_data_clean(line)) return res def _lookup_dnspython(name, rdtype, timeout=None, servers=None, secure=None): ''' Use dnspython to lookup addresses :param name: Name of record to search :param rdtype: DNS record type :param timeout: query timeout :param server: [] of server(s) to try in order :return: [] of records or False if error ''' resolver = dns.resolver.Resolver() if timeout is not None: resolver.lifetime = float(timeout) if servers: resolver.nameservers = servers if secure: resolver.ednsflags += dns.flags.DO try: res = [_data_clean(rr.to_text()) for rr in resolver.query(name, rdtype, raise_on_no_answer=False)] return res except dns.rdatatype.UnknownRdatatype: raise ValueError('Invalid DNS type {}'.format(rdtype)) except (dns.resolver.NXDOMAIN, dns.resolver.YXDOMAIN, dns.resolver.NoNameservers, dns.exception.Timeout): return False def _lookup_nslookup(name, rdtype, timeout=None, server=None): ''' Use nslookup to lookup addresses :param name: Name of record to search :param rdtype: DNS record type :param timeout: server response timeout :param server: server to query :return: [] of records or False if error ''' cmd = 'nslookup -query={0} {1}'.format(rdtype, name) if timeout is not None: cmd += ' -timeout={0}'.format(int(timeout)) if server is not None: cmd += ' {0}'.format(server) cmd = __salt__['cmd.run_all'](cmd, python_shell=False, output_loglevel='quiet') if cmd['retcode'] != 0: log.warning( 'nslookup returned (%s): %s', cmd['retcode'], cmd['stdout'].splitlines()[-1].strip(string.whitespace + ';') ) return False lookup_res = iter(cmd['stdout'].splitlines()) res = [] try: line = next(lookup_res) if 'unknown query type' in line: raise ValueError('Invalid DNS type {}'.format(rdtype)) while True: if name in line: break line = next(lookup_res) while True: line = line.strip() if not line or line.startswith('*'): break elif rdtype != 'CNAME' and 'canonical name' in line: name = line.split()[-1][:-1] line = next(lookup_res) continue elif rdtype == 'SOA': line = line.split('=') elif line.startswith('Name:'): line = next(lookup_res) line = line.split(':', 1) elif line.startswith(name): if '=' in line: line = line.split('=', 1) else: line = line.split(' ') res.append(_data_clean(line[-1])) line = next(lookup_res) except StopIteration: pass if rdtype == 'SOA': return [' '.join(res[1:])] else: return res def lookup( name, rdtype, method=None, servers=None, timeout=None, walk=False, walk_tld=False, secure=None ): ''' Lookup DNS records and return their data :param name: name to lookup :param rdtype: DNS record type :param method: gai (getaddrinfo()), dnspython, dig, drill, host, nslookup or auto (default) :param servers: (list of) server(s) to try in-order :param timeout: query timeout or a valiant approximation of that :param walk: Walk the DNS upwards looking for the record type or name/recordtype if walk='name'. :param walk_tld: Include the final domain in the walk :param secure: return only DNSSEC secured responses :return: [] of record data ''' # opts = __opts__.get('dns', {}) opts = {} method = method or opts.get('method', 'auto') secure = secure or opts.get('secure', None) servers = servers or opts.get('servers', None) timeout = timeout or opts.get('timeout', False) rdtype = rdtype.upper() # pylint: disable=bad-whitespace,multiple-spaces-before-keyword query_methods = ( ('gai', _lookup_gai, not any((rdtype not in ('A', 'AAAA'), servers, secure))), ('dnspython', _lookup_dnspython, HAS_DNSPYTHON), ('dig', _lookup_dig, HAS_DIG), ('drill', _lookup_drill, HAS_DRILL), ('host', _lookup_host, HAS_HOST and not secure), ('nslookup', _lookup_nslookup, HAS_NSLOOKUP and not secure), ) # pylint: enable=bad-whitespace,multiple-spaces-before-keyword try: if method == 'auto': # The first one not to bork on the conditions becomes the function method, resolver = next(((rname, rcb) for rname, rcb, rtest in query_methods if rtest)) else: # The first one not to bork on the conditions becomes the function. And the name must match. resolver = next((rcb for rname, rcb, rtest in query_methods if rname == method and rtest)) except StopIteration: log.error( 'Unable to lookup %s/%s: Resolver method %s invalid, unsupported ' 'or unable to perform query', method, rdtype, name ) return False res_kwargs = { 'rdtype': rdtype, } if servers: if not isinstance(servers, (list, tuple)): servers = [servers] if method in ('dnspython', 'dig', 'drill'): res_kwargs['servers'] = servers else: if timeout: timeout /= len(servers) # Inject a wrapper for multi-server behaviour def _multi_srvr(resolv_func): @functools.wraps(resolv_func) def _wrapper(**res_kwargs): for server in servers: s_res = resolv_func(server=server, **res_kwargs) if s_res: return s_res return _wrapper resolver = _multi_srvr(resolver) if not walk: name = [name] else: idx = 0 if rdtype in ('SRV', 'TLSA'): # The only RRs I know that have 2 name components idx = name.find('.') + 1 idx = name.find('.', idx) + 1 domain = name[idx:] rname = name[0:idx] name = _tree(domain, walk_tld) if walk == 'name': name = [rname + domain for domain in name] if timeout: timeout /= len(name) if secure: res_kwargs['secure'] = secure if timeout: res_kwargs['timeout'] = timeout for rname in name: res = resolver(name=rname, **res_kwargs) if res: return res return res def query( name, rdtype, method=None, servers=None, timeout=None, walk=False, walk_tld=False, secure=None ): ''' Query DNS for information. Where `lookup()` returns record data, `query()` tries to interpret the data and return it's results :param name: name to lookup :param rdtype: DNS record type :param method: gai (getaddrinfo()), pydns, dig, drill, host, nslookup or auto (default) :param servers: (list of) server(s) to try in-order :param timeout: query timeout or a valiant approximation of that :param secure: return only DNSSEC secured response :param walk: Walk the DNS upwards looking for the record type or name/recordtype if walk='name'. :param walk_tld: Include the top-level domain in the walk :return: [] of records ''' rdtype = rdtype.upper() qargs = { 'method': method, 'servers': servers, 'timeout': timeout, 'walk': walk, 'walk_tld': walk_tld, 'secure': secure } if rdtype == 'PTR' and not name.endswith('arpa'): name = ptr_name(name) if rdtype == 'SPF': # 'SPF' has become a regular 'TXT' again qres = [answer for answer in lookup(name, 'TXT', **qargs) if answer.startswith('v=spf')] if not qres: qres = lookup(name, rdtype, **qargs) else: qres = lookup(name, rdtype, **qargs) rec_map = { 'A': a_rec, 'AAAA': aaaa_rec, 'CAA': caa_rec, 'MX': mx_rec, 'SOA': soa_rec, 'SPF': spf_rec, 'SRV': srv_rec, 'SSHFP': sshfp_rec, 'TLSA': tlsa_rec, } if not qres or rdtype not in rec_map: return qres elif rdtype in ('A', 'AAAA', 'SSHFP', 'TLSA'): res = [rec_map[rdtype](res) for res in qres] elif rdtype in ('SOA', 'SPF'): res = rec_map[rdtype](qres[0]) else: res = rec_map[rdtype](qres) return res def host(name, ip4=True, ip6=True, **kwargs): ''' Return a list of addresses for name ip6: Return IPv6 addresses ip4: Return IPv4 addresses the rest is passed on to lookup() ''' res = {} if ip6: ip6 = lookup(name, 'AAAA', **kwargs) if ip6: res['ip6'] = ip6 if ip4: ip4 = lookup(name, 'A', **kwargs) if ip4: res['ip4'] = ip4 return res def a_rec(rdata): ''' Validate and parse DNS record data for an A record :param rdata: DNS record data :return: { 'address': ip } ''' rschema = OrderedDict(( ('address', ipaddress.IPv4Address), )) return _data2rec(rschema, rdata) def aaaa_rec(rdata): ''' Validate and parse DNS record data for an AAAA record :param rdata: DNS record data :return: { 'address': ip } ''' rschema = OrderedDict(( ('address', ipaddress.IPv6Address), )) return _data2rec(rschema, rdata) def caa_rec(rdatas): ''' Validate and parse DNS record data for a CAA record :param rdata: DNS record data :return: dict w/fields ''' rschema = OrderedDict(( ('flags', lambda flag: ['critical'] if int(flag) > 0 else []), ('tag', RFC.CAA_TAGS), ('value', lambda val: val.strip('\',"')) )) res = _data2rec_group(rschema, rdatas, 'tag') for tag in ('issue', 'issuewild'): tag_res = res.get(tag, False) if not tag_res: continue for idx, val in enumerate(tag_res): if ';' not in val: continue val, params = val.split(';', 1) params = dict(param.split('=') for param in shlex.split(params)) tag_res[idx] = {val: params} return res def mx_data(target, preference=10): ''' Generate MX record data :param target: server :param preference: preference number :return: DNS record data ''' return _rec2data(int(preference), target) def mx_rec(rdatas): ''' Validate and parse DNS record data for MX record(s) :param rdata: DNS record data :return: dict w/fields ''' rschema = OrderedDict(( ('preference', int), ('name', str), )) return _data2rec_group(rschema, rdatas, 'preference') def ptr_name(rdata): ''' Return PTR name of given IP :param rdata: IP address :return: PTR record name ''' try: return ipaddress.ip_address(rdata).reverse_pointer except ValueError: log.error( 'Unable to generate PTR record; %s is not a valid IP address', rdata ) return False def soa_rec(rdata): ''' Validate and parse DNS record data for SOA record(s) :param rdata: DNS record data :return: dict w/fields ''' rschema = OrderedDict(( ('mname', str), ('rname', str), ('serial', int), ('refresh', int), ('retry', int), ('expire', int), ('minimum', int), )) return _data2rec(rschema, rdata) def spf_rec(rdata): ''' Validate and parse DNS record data for SPF record(s) :param rdata: DNS record data :return: dict w/fields ''' spf_fields = rdata.split(' ') if not spf_fields.pop(0).startswith('v=spf'): raise ValueError('Not an SPF record') res = OrderedDict() mods = set() for mech_spec in spf_fields: if mech_spec.startswith(('exp', 'redirect')): # It's a modifier mod, val = mech_spec.split('=', 1) if mod in mods: raise KeyError('Modifier {0} can only appear once'.format(mod)) mods.add(mod) continue # TODO: Should be in something intelligent like an SPF_get # if mod == 'exp': # res[mod] = lookup(val, 'TXT', **qargs) # continue # elif mod == 'redirect': # return query(val, 'SPF', **qargs) mech = {} if mech_spec[0] in ('+', '-', '~', '?'): mech['qualifier'] = mech_spec[0] mech_spec = mech_spec[1:] if ':' in mech_spec: mech_spec, val = mech_spec.split(':', 1) elif '/' in mech_spec: idx = mech_spec.find('/') mech_spec = mech_spec[0:idx] val = mech_spec[idx:] else: val = None res[mech_spec] = mech if not val: continue elif mech_spec in ('ip4', 'ip6'): val = ipaddress.ip_interface(val) assert val.version == int(mech_spec[-1]) mech['value'] = val return res def srv_name(svc, proto='tcp', domain=None): ''' Generate SRV record name :param svc: ldap, 389 etc :param proto: tcp, udp, sctp etc. :param domain: name to append :return: ''' proto = RFC.validate(proto, RFC.SRV_PROTO) if isinstance(svc, int) or svc.isdigit(): svc = _to_port(svc) if domain: domain = '.' + domain return '_{0}._{1}{2}'.format(svc, proto, domain) def srv_rec(rdatas): ''' Validate and parse DNS record data for SRV record(s) :param rdata: DNS record data :return: dict w/fields ''' rschema = OrderedDict(( ('prio', int), ('weight', int), ('port', _to_port), ('name', str), )) return _data2rec_group(rschema, rdatas, 'prio') def sshfp_data(key_t, hash_t, pub): ''' Generate an SSHFP record :param key_t: rsa/dsa/ecdsa/ed25519 :param hash_t: sha1/sha256 :param pub: the SSH public key ''' key_t = RFC.validate(key_t, RFC.SSHFP_ALGO, 'in') hash_t = RFC.validate(hash_t, RFC.SSHFP_HASH) hasher = hashlib.new(hash_t) hasher.update( base64.b64decode(pub) ) ssh_fp = hasher.hexdigest() return _rec2data(key_t, hash_t, ssh_fp) def sshfp_rec(rdata): ''' Validate and parse DNS record data for TLSA record(s) :param rdata: DNS record data :return: dict w/fields ''' rschema = OrderedDict(( ('algorithm', RFC.SSHFP_ALGO), ('fp_hash', RFC.SSHFP_HASH), ('fingerprint', lambda val: val.lower()) # resolvers are inconsistent on this one )) return _data2rec(rschema, rdata) def tlsa_data(pub, usage, selector, matching): ''' Generate a TLSA rec :param pub: Pub key in PEM format :param usage: :param selector: :param matching: :return: TLSA data portion ''' usage = RFC.validate(usage, RFC.TLSA_USAGE) selector = RFC.validate(selector, RFC.TLSA_SELECT) matching = RFC.validate(matching, RFC.TLSA_MATCHING) pub = ssl.PEM_cert_to_DER_cert(pub.strip()) if matching == 0: cert_fp = binascii.b2a_hex(pub) else: hasher = hashlib.new(RFC.TLSA_MATCHING[matching]) hasher.update( pub ) cert_fp = hasher.hexdigest() return _rec2data(usage, selector, matching, cert_fp) def tlsa_rec(rdata): ''' Validate and parse DNS record data for TLSA record(s) :param rdata: DNS record data :return: dict w/fields ''' rschema = OrderedDict(( ('usage', RFC.TLSA_USAGE), ('selector', RFC.TLSA_SELECT), ('matching', RFC.TLSA_MATCHING), ('pub', str) )) return _data2rec(rschema, rdata) def service( svc, proto='tcp', domain=None, walk=False, secure=None ): ''' Find an SRV service in a domain or it's parents :param svc: service to find (ldap, 389, etc) :param proto: protocol the service talks (tcp, udp, etc) :param domain: domain to start search in :param walk: walk the parents if domain doesn't provide the service :param secure: only return DNSSEC-validated results :return: [ [ prio1server1, prio1server2 ], [ prio2server1, prio2server2 ], ] (the servers will already be weighted according to the SRV rules) ''' qres = query(srv_name(svc, proto, domain), 'SRV', walk=walk, secure=secure) if not qres: return False res = [] for _, recs in qres.items(): res.append(_weighted_order(recs)) return res def services(services_file='/etc/services'): ''' Parse through system-known services :return: { 'svc': [ { 'port': port 'proto': proto, 'desc': comment }, ], } ''' res = {} with salt.utils.files.fopen(services_file, 'r') as svc_defs: for svc_def in svc_defs.readlines(): svc_def = salt.utils.stringutils.to_unicode(svc_def.strip()) if not svc_def or svc_def.startswith('#'): continue elif '#' in svc_def: svc_def, comment = svc_def.split('#', 1) comment = comment.strip() else: comment = None svc_def = svc_def.split() port, proto = svc_def.pop(1).split('/') port = int(port) for name in svc_def: svc_res = res.get(name, {}) pp_res = svc_res.get(port, False) if not pp_res: svc = { 'port': port, 'proto': proto, } if comment: svc['desc'] = comment svc_res[port] = svc else: curr_proto = pp_res['proto'] if isinstance(curr_proto, (list, tuple)): curr_proto.append(proto) else: pp_res['proto'] = [curr_proto, proto] curr_desc = pp_res.get('desc', False) if comment: if not curr_desc: pp_res['desc'] = comment elif comment != curr_desc: pp_res['desc'] = '{0}, {1}'.format(curr_desc, comment) res[name] = svc_res for svc, data in res.items(): if len(data) == 1: res[svc] = data.values().pop() continue else: res[svc] = list(data.values()) return res def parse_resolv(src='/etc/resolv.conf'): ''' Parse a resolver configuration file (traditionally /etc/resolv.conf) ''' nameservers = [] ip4_nameservers = [] ip6_nameservers = [] search = [] sortlist = [] domain = '' options = [] try: with salt.utils.files.fopen(src) as src_file: # pylint: disable=too-many-nested-blocks for line in src_file: line = salt.utils.stringutils.to_unicode(line).strip().split() try: (directive, arg) = (line[0].lower(), line[1:]) # Drop everything after # or ; (comments) arg = list(itertools.takewhile(lambda x: x[0] not in ('#', ';'), arg)) if directive == 'nameserver': addr = arg[0] try: ip_addr = ipaddress.ip_address(addr) version = ip_addr.version ip_addr = str(ip_addr) if ip_addr not in nameservers: nameservers.append(ip_addr) if version == 4 and ip_addr not in ip4_nameservers: ip4_nameservers.append(ip_addr) elif version == 6 and ip_addr not in ip6_nameservers: ip6_nameservers.append(ip_addr) except ValueError as exc: log.error('%s: %s', src, exc) elif directive == 'domain': domain = arg[0] elif directive == 'search': search = arg elif directive == 'sortlist': # A sortlist is specified by IP address netmask pairs. # The netmask is optional and defaults to the natural # netmask of the net. The IP address and optional # network pairs are separated by slashes. for ip_raw in arg: try: ip_net = ipaddress.ip_network(ip_raw) except ValueError as exc: log.error('%s: %s', src, exc) else: if '/' not in ip_raw: # No netmask has been provided, guess # the "natural" one if ip_net.version == 4: ip_addr = six.text_type(ip_net.network_address) # pylint: disable=protected-access mask = salt.utils.network.natural_ipv4_netmask(ip_addr) ip_net = ipaddress.ip_network( '{0}{1}'.format(ip_addr, mask), strict=False ) if ip_net.version == 6: # TODO pass if ip_net not in sortlist: sortlist.append(ip_net) elif directive == 'options': # Options allows certain internal resolver variables to # be modified. if arg[0] not in options: options.append(arg[0]) except IndexError: continue if domain and search: # The domain and search keywords are mutually exclusive. If more # than one instance of these keywords is present, the last instance # will override. log.debug( '%s: The domain and search keywords are mutually exclusive.', src ) return { 'nameservers': nameservers, 'ip4_nameservers': ip4_nameservers, 'ip6_nameservers': ip6_nameservers, 'sortlist': [ip.with_netmask for ip in sortlist], 'domain': domain, 'search': search, 'options': options } except IOError: return {}
saltstack/salt
salt/utils/dns.py
srv_name
python
def srv_name(svc, proto='tcp', domain=None): ''' Generate SRV record name :param svc: ldap, 389 etc :param proto: tcp, udp, sctp etc. :param domain: name to append :return: ''' proto = RFC.validate(proto, RFC.SRV_PROTO) if isinstance(svc, int) or svc.isdigit(): svc = _to_port(svc) if domain: domain = '.' + domain return '_{0}._{1}{2}'.format(svc, proto, domain)
Generate SRV record name :param svc: ldap, 389 etc :param proto: tcp, udp, sctp etc. :param domain: name to append :return:
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/dns.py#L926-L940
[ "def _to_port(port):\n try:\n port = int(port)\n assert 1 <= port <= 65535\n return port\n except (ValueError, AssertionError):\n raise ValueError('Invalid port {0}'.format(port))\n", "def validate(lookup, ref, match=None):\n if lookup in ref:\n return lookup\n elif match == 'in':\n return [code for code, name in ref.items() if lookup in name][-1]\n else:\n # OrderedDicts only!(?)\n return ref.keys()[ref.values().index(lookup)]\n" ]
# -*- coding: utf-8 -*- ''' Compendium of generic DNS utilities # Examples: dns.lookup(name, rdtype, ...) dns.query(name, rdtype, ...) dns.srv_rec(data) dns.srv_data('my1.example.com', 389, prio=10, weight=100) dns.srv_name('ldap/tcp', 'example.com') ''' from __future__ import absolute_import, print_function, unicode_literals # Import Python libs import base64 import binascii import hashlib import itertools import logging import random import re import shlex import socket import ssl import string import functools # Import Salt libs import salt.utils.files import salt.utils.network import salt.utils.path import salt.utils.stringutils import salt.modules.cmdmod from salt._compat import ipaddress from salt.utils.odict import OrderedDict # Import 3rd-party libs from salt.ext import six from salt.ext.six.moves import zip # pylint: disable=redefined-builtin # Integrations try: import dns.resolver HAS_DNSPYTHON = True except ImportError: HAS_DNSPYTHON = False try: import tldextract HAS_TLDEXTRACT = True except ImportError: HAS_TLDEXTRACT = False HAS_DIG = salt.utils.path.which('dig') is not None DIG_OPTIONS = '+search +fail +noall +answer +nocl +nottl' HAS_DRILL = salt.utils.path.which('drill') is not None HAS_HOST = salt.utils.path.which('host') is not None HAS_NSLOOKUP = salt.utils.path.which('nslookup') is not None __salt__ = { 'cmd.run_all': salt.modules.cmdmod.run_all } log = logging.getLogger(__name__) class RFC(object): ''' Simple holding class for all RFC/IANA registered lists & standards ''' # https://tools.ietf.org/html/rfc6844#section-3 CAA_TAGS = ( 'issue', 'issuewild', 'iodef' ) # http://www.iana.org/assignments/dns-sshfp-rr-parameters/dns-sshfp-rr-parameters.xhtml SSHFP_ALGO = OrderedDict(( (1, 'rsa'), (2, 'dsa'), (3, 'ecdsa'), (4, 'ed25519'), )) SSHFP_HASH = OrderedDict(( (1, 'sha1'), (2, 'sha256'), )) # http://www.iana.org/assignments/dane-parameters/dane-parameters.xhtml TLSA_USAGE = OrderedDict(( (0, 'pkixta'), (1, 'pkixee'), (2, 'daneta'), (3, 'daneee'), )) TLSA_SELECT = OrderedDict(( (0, 'cert'), (1, 'spki'), )) TLSA_MATCHING = OrderedDict(( (0, 'full'), (1, 'sha256'), (2, 'sha512'), )) SRV_PROTO = ( 'tcp', 'udp', 'sctp' ) @staticmethod def validate(lookup, ref, match=None): if lookup in ref: return lookup elif match == 'in': return [code for code, name in ref.items() if lookup in name][-1] else: # OrderedDicts only!(?) return ref.keys()[ref.values().index(lookup)] def _to_port(port): try: port = int(port) assert 1 <= port <= 65535 return port except (ValueError, AssertionError): raise ValueError('Invalid port {0}'.format(port)) def _tree(domain, tld=False): ''' Split out a domain in its parents Leverages tldextract to take the TLDs from publicsuffix.org or makes a valiant approximation of that :param domain: dc2.ams2.example.com :param tld: Include TLD in list :return: [ 'dc2.ams2.example.com', 'ams2.example.com', 'example.com'] ''' domain = domain.rstrip('.') assert '.' in domain, 'Provide a decent domain' if not tld: if HAS_TLDEXTRACT: tld = tldextract.extract(domain).suffix else: tld = re.search(r'((?:(?:ac|biz|com?|info|edu|gov|mil|name|net|n[oi]m|org)\.)?[^.]+)$', domain).group() log.info('Without tldextract, dns.util resolves the TLD of %s to %s', domain, tld) res = [domain] while True: idx = domain.find('.') if idx < 0: break domain = domain[idx + 1:] if domain == tld: break res.append(domain) return res def _weighted_order(recs): res = [] weights = [rec['weight'] for rec in recs] while weights: rnd = random.random() * sum(weights) for i, w in enumerate(weights): rnd -= w if rnd < 0: res.append(recs.pop(i)['name']) weights.pop(i) break return res def _cast(rec_data, rec_cast): if isinstance(rec_cast, dict): rec_data = type(rec_cast.keys()[0])(rec_data) res = rec_cast[rec_data] return res elif isinstance(rec_cast, (list, tuple)): return RFC.validate(rec_data, rec_cast) else: return rec_cast(rec_data) def _data2rec(schema, rec_data): ''' schema = OrderedDict({ 'prio': int, 'weight': int, 'port': to_port, 'name': str, }) rec_data = '10 20 25 myawesome.nl' res = {'prio': 10, 'weight': 20, 'port': 25 'name': 'myawesome.nl'} ''' try: rec_fields = rec_data.split(' ') # spaces in digest fields are allowed assert len(rec_fields) >= len(schema) if len(rec_fields) > len(schema): cutoff = len(schema) - 1 rec_fields = rec_fields[0:cutoff] + [''.join(rec_fields[cutoff:])] if len(schema) == 1: res = _cast(rec_fields[0], next(iter(schema.values()))) else: res = dict(( (field_name, _cast(rec_field, rec_cast)) for (field_name, rec_cast), rec_field in zip(schema.items(), rec_fields) )) return res except (AssertionError, AttributeError, TypeError, ValueError) as e: raise ValueError('Unable to cast "{0}" as "{2}": {1}'.format( rec_data, e, ' '.join(schema.keys()) )) def _data2rec_group(schema, recs_data, group_key): if not isinstance(recs_data, (list, tuple)): recs_data = [recs_data] res = OrderedDict() try: for rdata in recs_data: rdata = _data2rec(schema, rdata) assert rdata and group_key in rdata idx = rdata.pop(group_key) if idx not in res: res[idx] = [] if len(rdata) == 1: rdata = next(iter(rdata.values())) res[idx].append(rdata) return res except (AssertionError, ValueError) as e: raise ValueError('Unable to cast "{0}" as a group of "{1}": {2}'.format( ','.join(recs_data), ' '.join(schema.keys()), e )) def _rec2data(*rdata): return ' '.join(rdata) def _data_clean(data): data = data.strip(string.whitespace) if data.startswith(('"', '\'')) and data.endswith(('"', '\'')): return data[1:-1] else: return data def _lookup_dig(name, rdtype, timeout=None, servers=None, secure=None): ''' Use dig to lookup addresses :param name: Name of record to search :param rdtype: DNS record type :param timeout: server response timeout :param servers: [] of servers to use :return: [] of records or False if error ''' cmd = 'dig {0} -t {1} '.format(DIG_OPTIONS, rdtype) if servers: cmd += ''.join(['@{0} '.format(srv) for srv in servers]) if timeout is not None: if servers: timeout = int(float(timeout) / len(servers)) else: timeout = int(timeout) cmd += '+time={0} '.format(timeout) if secure: cmd += '+dnssec +adflag ' cmd = __salt__['cmd.run_all']('{0} {1}'.format(cmd, name), python_shell=False, output_loglevel='quiet') if 'ignoring invalid type' in cmd['stderr']: raise ValueError('Invalid DNS type {}'.format(rdtype)) elif cmd['retcode'] != 0: log.warning( 'dig returned (%s): %s', cmd['retcode'], cmd['stderr'].strip(string.whitespace + ';') ) return False elif not cmd['stdout']: return [] validated = False res = [] for line in cmd['stdout'].splitlines(): _, rtype, rdata = line.split(None, 2) if rtype == 'CNAME' and rdtype != 'CNAME': continue elif rtype == 'RRSIG': validated = True continue res.append(_data_clean(rdata)) if res and secure and not validated: return False else: return res def _lookup_drill(name, rdtype, timeout=None, servers=None, secure=None): ''' Use drill to lookup addresses :param name: Name of record to search :param rdtype: DNS record type :param timeout: command return timeout :param servers: [] of servers to use :return: [] of records or False if error ''' cmd = 'drill ' if secure: cmd += '-D -o ad ' cmd += '{0} {1} '.format(rdtype, name) if servers: cmd += ''.join(['@{0} '.format(srv) for srv in servers]) cmd = __salt__['cmd.run_all']( cmd, timeout=timeout, python_shell=False, output_loglevel='quiet') if cmd['retcode'] != 0: log.warning('drill returned (%s): %s', cmd['retcode'], cmd['stderr']) return False lookup_res = iter(cmd['stdout'].splitlines()) validated = False res = [] try: line = '' while 'ANSWER SECTION' not in line: line = next(lookup_res) while True: line = next(lookup_res) line = line.strip() if not line or line.startswith(';;'): break l_type, l_rec = line.split(None, 4)[-2:] if l_type == 'CNAME' and rdtype != 'CNAME': continue elif l_type == 'RRSIG': validated = True continue elif l_type != rdtype: raise ValueError('Invalid DNS type {}'.format(rdtype)) res.append(_data_clean(l_rec)) except StopIteration: pass if res and secure and not validated: return False else: return res def _lookup_gai(name, rdtype, timeout=None): ''' Use Python's socket interface to lookup addresses :param name: Name of record to search :param rdtype: A or AAAA :param timeout: ignored :return: [] of addresses or False if error ''' try: sock_t = { 'A': socket.AF_INET, 'AAAA': socket.AF_INET6 }[rdtype] except KeyError: raise ValueError('Invalid DNS type {} for gai lookup'.format(rdtype)) if timeout: log.info('Ignoring timeout on gai resolver; fix resolv.conf to do that') try: addresses = [sock[4][0] for sock in socket.getaddrinfo(name, None, sock_t, 0, socket.SOCK_RAW)] return addresses except socket.gaierror: return False def _lookup_host(name, rdtype, timeout=None, server=None): ''' Use host to lookup addresses :param name: Name of record to search :param server: Server to query :param rdtype: DNS record type :param timeout: server response wait :return: [] of records or False if error ''' cmd = 'host -t {0} '.format(rdtype) if timeout: cmd += '-W {0} '.format(int(timeout)) cmd += name if server is not None: cmd += ' {0}'.format(server) cmd = __salt__['cmd.run_all'](cmd, python_shell=False, output_loglevel='quiet') if 'invalid type' in cmd['stderr']: raise ValueError('Invalid DNS type {}'.format(rdtype)) elif cmd['retcode'] != 0: log.warning('host returned (%s): %s', cmd['retcode'], cmd['stderr']) return False elif 'has no' in cmd['stdout']: return [] res = [] _stdout = cmd['stdout'] if server is None else cmd['stdout'].split('\n\n')[-1] for line in _stdout.splitlines(): if rdtype != 'CNAME' and 'is an alias' in line: continue line = line.split(' ', 3)[-1] for prefix in ('record', 'address', 'handled by', 'alias for'): if line.startswith(prefix): line = line[len(prefix) + 1:] break res.append(_data_clean(line)) return res def _lookup_dnspython(name, rdtype, timeout=None, servers=None, secure=None): ''' Use dnspython to lookup addresses :param name: Name of record to search :param rdtype: DNS record type :param timeout: query timeout :param server: [] of server(s) to try in order :return: [] of records or False if error ''' resolver = dns.resolver.Resolver() if timeout is not None: resolver.lifetime = float(timeout) if servers: resolver.nameservers = servers if secure: resolver.ednsflags += dns.flags.DO try: res = [_data_clean(rr.to_text()) for rr in resolver.query(name, rdtype, raise_on_no_answer=False)] return res except dns.rdatatype.UnknownRdatatype: raise ValueError('Invalid DNS type {}'.format(rdtype)) except (dns.resolver.NXDOMAIN, dns.resolver.YXDOMAIN, dns.resolver.NoNameservers, dns.exception.Timeout): return False def _lookup_nslookup(name, rdtype, timeout=None, server=None): ''' Use nslookup to lookup addresses :param name: Name of record to search :param rdtype: DNS record type :param timeout: server response timeout :param server: server to query :return: [] of records or False if error ''' cmd = 'nslookup -query={0} {1}'.format(rdtype, name) if timeout is not None: cmd += ' -timeout={0}'.format(int(timeout)) if server is not None: cmd += ' {0}'.format(server) cmd = __salt__['cmd.run_all'](cmd, python_shell=False, output_loglevel='quiet') if cmd['retcode'] != 0: log.warning( 'nslookup returned (%s): %s', cmd['retcode'], cmd['stdout'].splitlines()[-1].strip(string.whitespace + ';') ) return False lookup_res = iter(cmd['stdout'].splitlines()) res = [] try: line = next(lookup_res) if 'unknown query type' in line: raise ValueError('Invalid DNS type {}'.format(rdtype)) while True: if name in line: break line = next(lookup_res) while True: line = line.strip() if not line or line.startswith('*'): break elif rdtype != 'CNAME' and 'canonical name' in line: name = line.split()[-1][:-1] line = next(lookup_res) continue elif rdtype == 'SOA': line = line.split('=') elif line.startswith('Name:'): line = next(lookup_res) line = line.split(':', 1) elif line.startswith(name): if '=' in line: line = line.split('=', 1) else: line = line.split(' ') res.append(_data_clean(line[-1])) line = next(lookup_res) except StopIteration: pass if rdtype == 'SOA': return [' '.join(res[1:])] else: return res def lookup( name, rdtype, method=None, servers=None, timeout=None, walk=False, walk_tld=False, secure=None ): ''' Lookup DNS records and return their data :param name: name to lookup :param rdtype: DNS record type :param method: gai (getaddrinfo()), dnspython, dig, drill, host, nslookup or auto (default) :param servers: (list of) server(s) to try in-order :param timeout: query timeout or a valiant approximation of that :param walk: Walk the DNS upwards looking for the record type or name/recordtype if walk='name'. :param walk_tld: Include the final domain in the walk :param secure: return only DNSSEC secured responses :return: [] of record data ''' # opts = __opts__.get('dns', {}) opts = {} method = method or opts.get('method', 'auto') secure = secure or opts.get('secure', None) servers = servers or opts.get('servers', None) timeout = timeout or opts.get('timeout', False) rdtype = rdtype.upper() # pylint: disable=bad-whitespace,multiple-spaces-before-keyword query_methods = ( ('gai', _lookup_gai, not any((rdtype not in ('A', 'AAAA'), servers, secure))), ('dnspython', _lookup_dnspython, HAS_DNSPYTHON), ('dig', _lookup_dig, HAS_DIG), ('drill', _lookup_drill, HAS_DRILL), ('host', _lookup_host, HAS_HOST and not secure), ('nslookup', _lookup_nslookup, HAS_NSLOOKUP and not secure), ) # pylint: enable=bad-whitespace,multiple-spaces-before-keyword try: if method == 'auto': # The first one not to bork on the conditions becomes the function method, resolver = next(((rname, rcb) for rname, rcb, rtest in query_methods if rtest)) else: # The first one not to bork on the conditions becomes the function. And the name must match. resolver = next((rcb for rname, rcb, rtest in query_methods if rname == method and rtest)) except StopIteration: log.error( 'Unable to lookup %s/%s: Resolver method %s invalid, unsupported ' 'or unable to perform query', method, rdtype, name ) return False res_kwargs = { 'rdtype': rdtype, } if servers: if not isinstance(servers, (list, tuple)): servers = [servers] if method in ('dnspython', 'dig', 'drill'): res_kwargs['servers'] = servers else: if timeout: timeout /= len(servers) # Inject a wrapper for multi-server behaviour def _multi_srvr(resolv_func): @functools.wraps(resolv_func) def _wrapper(**res_kwargs): for server in servers: s_res = resolv_func(server=server, **res_kwargs) if s_res: return s_res return _wrapper resolver = _multi_srvr(resolver) if not walk: name = [name] else: idx = 0 if rdtype in ('SRV', 'TLSA'): # The only RRs I know that have 2 name components idx = name.find('.') + 1 idx = name.find('.', idx) + 1 domain = name[idx:] rname = name[0:idx] name = _tree(domain, walk_tld) if walk == 'name': name = [rname + domain for domain in name] if timeout: timeout /= len(name) if secure: res_kwargs['secure'] = secure if timeout: res_kwargs['timeout'] = timeout for rname in name: res = resolver(name=rname, **res_kwargs) if res: return res return res def query( name, rdtype, method=None, servers=None, timeout=None, walk=False, walk_tld=False, secure=None ): ''' Query DNS for information. Where `lookup()` returns record data, `query()` tries to interpret the data and return it's results :param name: name to lookup :param rdtype: DNS record type :param method: gai (getaddrinfo()), pydns, dig, drill, host, nslookup or auto (default) :param servers: (list of) server(s) to try in-order :param timeout: query timeout or a valiant approximation of that :param secure: return only DNSSEC secured response :param walk: Walk the DNS upwards looking for the record type or name/recordtype if walk='name'. :param walk_tld: Include the top-level domain in the walk :return: [] of records ''' rdtype = rdtype.upper() qargs = { 'method': method, 'servers': servers, 'timeout': timeout, 'walk': walk, 'walk_tld': walk_tld, 'secure': secure } if rdtype == 'PTR' and not name.endswith('arpa'): name = ptr_name(name) if rdtype == 'SPF': # 'SPF' has become a regular 'TXT' again qres = [answer for answer in lookup(name, 'TXT', **qargs) if answer.startswith('v=spf')] if not qres: qres = lookup(name, rdtype, **qargs) else: qres = lookup(name, rdtype, **qargs) rec_map = { 'A': a_rec, 'AAAA': aaaa_rec, 'CAA': caa_rec, 'MX': mx_rec, 'SOA': soa_rec, 'SPF': spf_rec, 'SRV': srv_rec, 'SSHFP': sshfp_rec, 'TLSA': tlsa_rec, } if not qres or rdtype not in rec_map: return qres elif rdtype in ('A', 'AAAA', 'SSHFP', 'TLSA'): res = [rec_map[rdtype](res) for res in qres] elif rdtype in ('SOA', 'SPF'): res = rec_map[rdtype](qres[0]) else: res = rec_map[rdtype](qres) return res def host(name, ip4=True, ip6=True, **kwargs): ''' Return a list of addresses for name ip6: Return IPv6 addresses ip4: Return IPv4 addresses the rest is passed on to lookup() ''' res = {} if ip6: ip6 = lookup(name, 'AAAA', **kwargs) if ip6: res['ip6'] = ip6 if ip4: ip4 = lookup(name, 'A', **kwargs) if ip4: res['ip4'] = ip4 return res def a_rec(rdata): ''' Validate and parse DNS record data for an A record :param rdata: DNS record data :return: { 'address': ip } ''' rschema = OrderedDict(( ('address', ipaddress.IPv4Address), )) return _data2rec(rschema, rdata) def aaaa_rec(rdata): ''' Validate and parse DNS record data for an AAAA record :param rdata: DNS record data :return: { 'address': ip } ''' rschema = OrderedDict(( ('address', ipaddress.IPv6Address), )) return _data2rec(rschema, rdata) def caa_rec(rdatas): ''' Validate and parse DNS record data for a CAA record :param rdata: DNS record data :return: dict w/fields ''' rschema = OrderedDict(( ('flags', lambda flag: ['critical'] if int(flag) > 0 else []), ('tag', RFC.CAA_TAGS), ('value', lambda val: val.strip('\',"')) )) res = _data2rec_group(rschema, rdatas, 'tag') for tag in ('issue', 'issuewild'): tag_res = res.get(tag, False) if not tag_res: continue for idx, val in enumerate(tag_res): if ';' not in val: continue val, params = val.split(';', 1) params = dict(param.split('=') for param in shlex.split(params)) tag_res[idx] = {val: params} return res def mx_data(target, preference=10): ''' Generate MX record data :param target: server :param preference: preference number :return: DNS record data ''' return _rec2data(int(preference), target) def mx_rec(rdatas): ''' Validate and parse DNS record data for MX record(s) :param rdata: DNS record data :return: dict w/fields ''' rschema = OrderedDict(( ('preference', int), ('name', str), )) return _data2rec_group(rschema, rdatas, 'preference') def ptr_name(rdata): ''' Return PTR name of given IP :param rdata: IP address :return: PTR record name ''' try: return ipaddress.ip_address(rdata).reverse_pointer except ValueError: log.error( 'Unable to generate PTR record; %s is not a valid IP address', rdata ) return False def soa_rec(rdata): ''' Validate and parse DNS record data for SOA record(s) :param rdata: DNS record data :return: dict w/fields ''' rschema = OrderedDict(( ('mname', str), ('rname', str), ('serial', int), ('refresh', int), ('retry', int), ('expire', int), ('minimum', int), )) return _data2rec(rschema, rdata) def spf_rec(rdata): ''' Validate and parse DNS record data for SPF record(s) :param rdata: DNS record data :return: dict w/fields ''' spf_fields = rdata.split(' ') if not spf_fields.pop(0).startswith('v=spf'): raise ValueError('Not an SPF record') res = OrderedDict() mods = set() for mech_spec in spf_fields: if mech_spec.startswith(('exp', 'redirect')): # It's a modifier mod, val = mech_spec.split('=', 1) if mod in mods: raise KeyError('Modifier {0} can only appear once'.format(mod)) mods.add(mod) continue # TODO: Should be in something intelligent like an SPF_get # if mod == 'exp': # res[mod] = lookup(val, 'TXT', **qargs) # continue # elif mod == 'redirect': # return query(val, 'SPF', **qargs) mech = {} if mech_spec[0] in ('+', '-', '~', '?'): mech['qualifier'] = mech_spec[0] mech_spec = mech_spec[1:] if ':' in mech_spec: mech_spec, val = mech_spec.split(':', 1) elif '/' in mech_spec: idx = mech_spec.find('/') mech_spec = mech_spec[0:idx] val = mech_spec[idx:] else: val = None res[mech_spec] = mech if not val: continue elif mech_spec in ('ip4', 'ip6'): val = ipaddress.ip_interface(val) assert val.version == int(mech_spec[-1]) mech['value'] = val return res def srv_data(target, port, prio=10, weight=10): ''' Generate SRV record data :param target: :param port: :param prio: :param weight: :return: ''' return _rec2data(prio, weight, port, target) def srv_rec(rdatas): ''' Validate and parse DNS record data for SRV record(s) :param rdata: DNS record data :return: dict w/fields ''' rschema = OrderedDict(( ('prio', int), ('weight', int), ('port', _to_port), ('name', str), )) return _data2rec_group(rschema, rdatas, 'prio') def sshfp_data(key_t, hash_t, pub): ''' Generate an SSHFP record :param key_t: rsa/dsa/ecdsa/ed25519 :param hash_t: sha1/sha256 :param pub: the SSH public key ''' key_t = RFC.validate(key_t, RFC.SSHFP_ALGO, 'in') hash_t = RFC.validate(hash_t, RFC.SSHFP_HASH) hasher = hashlib.new(hash_t) hasher.update( base64.b64decode(pub) ) ssh_fp = hasher.hexdigest() return _rec2data(key_t, hash_t, ssh_fp) def sshfp_rec(rdata): ''' Validate and parse DNS record data for TLSA record(s) :param rdata: DNS record data :return: dict w/fields ''' rschema = OrderedDict(( ('algorithm', RFC.SSHFP_ALGO), ('fp_hash', RFC.SSHFP_HASH), ('fingerprint', lambda val: val.lower()) # resolvers are inconsistent on this one )) return _data2rec(rschema, rdata) def tlsa_data(pub, usage, selector, matching): ''' Generate a TLSA rec :param pub: Pub key in PEM format :param usage: :param selector: :param matching: :return: TLSA data portion ''' usage = RFC.validate(usage, RFC.TLSA_USAGE) selector = RFC.validate(selector, RFC.TLSA_SELECT) matching = RFC.validate(matching, RFC.TLSA_MATCHING) pub = ssl.PEM_cert_to_DER_cert(pub.strip()) if matching == 0: cert_fp = binascii.b2a_hex(pub) else: hasher = hashlib.new(RFC.TLSA_MATCHING[matching]) hasher.update( pub ) cert_fp = hasher.hexdigest() return _rec2data(usage, selector, matching, cert_fp) def tlsa_rec(rdata): ''' Validate and parse DNS record data for TLSA record(s) :param rdata: DNS record data :return: dict w/fields ''' rschema = OrderedDict(( ('usage', RFC.TLSA_USAGE), ('selector', RFC.TLSA_SELECT), ('matching', RFC.TLSA_MATCHING), ('pub', str) )) return _data2rec(rschema, rdata) def service( svc, proto='tcp', domain=None, walk=False, secure=None ): ''' Find an SRV service in a domain or it's parents :param svc: service to find (ldap, 389, etc) :param proto: protocol the service talks (tcp, udp, etc) :param domain: domain to start search in :param walk: walk the parents if domain doesn't provide the service :param secure: only return DNSSEC-validated results :return: [ [ prio1server1, prio1server2 ], [ prio2server1, prio2server2 ], ] (the servers will already be weighted according to the SRV rules) ''' qres = query(srv_name(svc, proto, domain), 'SRV', walk=walk, secure=secure) if not qres: return False res = [] for _, recs in qres.items(): res.append(_weighted_order(recs)) return res def services(services_file='/etc/services'): ''' Parse through system-known services :return: { 'svc': [ { 'port': port 'proto': proto, 'desc': comment }, ], } ''' res = {} with salt.utils.files.fopen(services_file, 'r') as svc_defs: for svc_def in svc_defs.readlines(): svc_def = salt.utils.stringutils.to_unicode(svc_def.strip()) if not svc_def or svc_def.startswith('#'): continue elif '#' in svc_def: svc_def, comment = svc_def.split('#', 1) comment = comment.strip() else: comment = None svc_def = svc_def.split() port, proto = svc_def.pop(1).split('/') port = int(port) for name in svc_def: svc_res = res.get(name, {}) pp_res = svc_res.get(port, False) if not pp_res: svc = { 'port': port, 'proto': proto, } if comment: svc['desc'] = comment svc_res[port] = svc else: curr_proto = pp_res['proto'] if isinstance(curr_proto, (list, tuple)): curr_proto.append(proto) else: pp_res['proto'] = [curr_proto, proto] curr_desc = pp_res.get('desc', False) if comment: if not curr_desc: pp_res['desc'] = comment elif comment != curr_desc: pp_res['desc'] = '{0}, {1}'.format(curr_desc, comment) res[name] = svc_res for svc, data in res.items(): if len(data) == 1: res[svc] = data.values().pop() continue else: res[svc] = list(data.values()) return res def parse_resolv(src='/etc/resolv.conf'): ''' Parse a resolver configuration file (traditionally /etc/resolv.conf) ''' nameservers = [] ip4_nameservers = [] ip6_nameservers = [] search = [] sortlist = [] domain = '' options = [] try: with salt.utils.files.fopen(src) as src_file: # pylint: disable=too-many-nested-blocks for line in src_file: line = salt.utils.stringutils.to_unicode(line).strip().split() try: (directive, arg) = (line[0].lower(), line[1:]) # Drop everything after # or ; (comments) arg = list(itertools.takewhile(lambda x: x[0] not in ('#', ';'), arg)) if directive == 'nameserver': addr = arg[0] try: ip_addr = ipaddress.ip_address(addr) version = ip_addr.version ip_addr = str(ip_addr) if ip_addr not in nameservers: nameservers.append(ip_addr) if version == 4 and ip_addr not in ip4_nameservers: ip4_nameservers.append(ip_addr) elif version == 6 and ip_addr not in ip6_nameservers: ip6_nameservers.append(ip_addr) except ValueError as exc: log.error('%s: %s', src, exc) elif directive == 'domain': domain = arg[0] elif directive == 'search': search = arg elif directive == 'sortlist': # A sortlist is specified by IP address netmask pairs. # The netmask is optional and defaults to the natural # netmask of the net. The IP address and optional # network pairs are separated by slashes. for ip_raw in arg: try: ip_net = ipaddress.ip_network(ip_raw) except ValueError as exc: log.error('%s: %s', src, exc) else: if '/' not in ip_raw: # No netmask has been provided, guess # the "natural" one if ip_net.version == 4: ip_addr = six.text_type(ip_net.network_address) # pylint: disable=protected-access mask = salt.utils.network.natural_ipv4_netmask(ip_addr) ip_net = ipaddress.ip_network( '{0}{1}'.format(ip_addr, mask), strict=False ) if ip_net.version == 6: # TODO pass if ip_net not in sortlist: sortlist.append(ip_net) elif directive == 'options': # Options allows certain internal resolver variables to # be modified. if arg[0] not in options: options.append(arg[0]) except IndexError: continue if domain and search: # The domain and search keywords are mutually exclusive. If more # than one instance of these keywords is present, the last instance # will override. log.debug( '%s: The domain and search keywords are mutually exclusive.', src ) return { 'nameservers': nameservers, 'ip4_nameservers': ip4_nameservers, 'ip6_nameservers': ip6_nameservers, 'sortlist': [ip.with_netmask for ip in sortlist], 'domain': domain, 'search': search, 'options': options } except IOError: return {}
saltstack/salt
salt/utils/dns.py
srv_rec
python
def srv_rec(rdatas): ''' Validate and parse DNS record data for SRV record(s) :param rdata: DNS record data :return: dict w/fields ''' rschema = OrderedDict(( ('prio', int), ('weight', int), ('port', _to_port), ('name', str), )) return _data2rec_group(rschema, rdatas, 'prio')
Validate and parse DNS record data for SRV record(s) :param rdata: DNS record data :return: dict w/fields
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/dns.py#L943-L955
[ "def _data2rec_group(schema, recs_data, group_key):\n if not isinstance(recs_data, (list, tuple)):\n recs_data = [recs_data]\n\n res = OrderedDict()\n\n try:\n for rdata in recs_data:\n rdata = _data2rec(schema, rdata)\n assert rdata and group_key in rdata\n\n idx = rdata.pop(group_key)\n if idx not in res:\n res[idx] = []\n\n if len(rdata) == 1:\n rdata = next(iter(rdata.values()))\n\n res[idx].append(rdata)\n return res\n except (AssertionError, ValueError) as e:\n raise ValueError('Unable to cast \"{0}\" as a group of \"{1}\": {2}'.format(\n ','.join(recs_data),\n ' '.join(schema.keys()),\n e\n ))\n" ]
# -*- coding: utf-8 -*- ''' Compendium of generic DNS utilities # Examples: dns.lookup(name, rdtype, ...) dns.query(name, rdtype, ...) dns.srv_rec(data) dns.srv_data('my1.example.com', 389, prio=10, weight=100) dns.srv_name('ldap/tcp', 'example.com') ''' from __future__ import absolute_import, print_function, unicode_literals # Import Python libs import base64 import binascii import hashlib import itertools import logging import random import re import shlex import socket import ssl import string import functools # Import Salt libs import salt.utils.files import salt.utils.network import salt.utils.path import salt.utils.stringutils import salt.modules.cmdmod from salt._compat import ipaddress from salt.utils.odict import OrderedDict # Import 3rd-party libs from salt.ext import six from salt.ext.six.moves import zip # pylint: disable=redefined-builtin # Integrations try: import dns.resolver HAS_DNSPYTHON = True except ImportError: HAS_DNSPYTHON = False try: import tldextract HAS_TLDEXTRACT = True except ImportError: HAS_TLDEXTRACT = False HAS_DIG = salt.utils.path.which('dig') is not None DIG_OPTIONS = '+search +fail +noall +answer +nocl +nottl' HAS_DRILL = salt.utils.path.which('drill') is not None HAS_HOST = salt.utils.path.which('host') is not None HAS_NSLOOKUP = salt.utils.path.which('nslookup') is not None __salt__ = { 'cmd.run_all': salt.modules.cmdmod.run_all } log = logging.getLogger(__name__) class RFC(object): ''' Simple holding class for all RFC/IANA registered lists & standards ''' # https://tools.ietf.org/html/rfc6844#section-3 CAA_TAGS = ( 'issue', 'issuewild', 'iodef' ) # http://www.iana.org/assignments/dns-sshfp-rr-parameters/dns-sshfp-rr-parameters.xhtml SSHFP_ALGO = OrderedDict(( (1, 'rsa'), (2, 'dsa'), (3, 'ecdsa'), (4, 'ed25519'), )) SSHFP_HASH = OrderedDict(( (1, 'sha1'), (2, 'sha256'), )) # http://www.iana.org/assignments/dane-parameters/dane-parameters.xhtml TLSA_USAGE = OrderedDict(( (0, 'pkixta'), (1, 'pkixee'), (2, 'daneta'), (3, 'daneee'), )) TLSA_SELECT = OrderedDict(( (0, 'cert'), (1, 'spki'), )) TLSA_MATCHING = OrderedDict(( (0, 'full'), (1, 'sha256'), (2, 'sha512'), )) SRV_PROTO = ( 'tcp', 'udp', 'sctp' ) @staticmethod def validate(lookup, ref, match=None): if lookup in ref: return lookup elif match == 'in': return [code for code, name in ref.items() if lookup in name][-1] else: # OrderedDicts only!(?) return ref.keys()[ref.values().index(lookup)] def _to_port(port): try: port = int(port) assert 1 <= port <= 65535 return port except (ValueError, AssertionError): raise ValueError('Invalid port {0}'.format(port)) def _tree(domain, tld=False): ''' Split out a domain in its parents Leverages tldextract to take the TLDs from publicsuffix.org or makes a valiant approximation of that :param domain: dc2.ams2.example.com :param tld: Include TLD in list :return: [ 'dc2.ams2.example.com', 'ams2.example.com', 'example.com'] ''' domain = domain.rstrip('.') assert '.' in domain, 'Provide a decent domain' if not tld: if HAS_TLDEXTRACT: tld = tldextract.extract(domain).suffix else: tld = re.search(r'((?:(?:ac|biz|com?|info|edu|gov|mil|name|net|n[oi]m|org)\.)?[^.]+)$', domain).group() log.info('Without tldextract, dns.util resolves the TLD of %s to %s', domain, tld) res = [domain] while True: idx = domain.find('.') if idx < 0: break domain = domain[idx + 1:] if domain == tld: break res.append(domain) return res def _weighted_order(recs): res = [] weights = [rec['weight'] for rec in recs] while weights: rnd = random.random() * sum(weights) for i, w in enumerate(weights): rnd -= w if rnd < 0: res.append(recs.pop(i)['name']) weights.pop(i) break return res def _cast(rec_data, rec_cast): if isinstance(rec_cast, dict): rec_data = type(rec_cast.keys()[0])(rec_data) res = rec_cast[rec_data] return res elif isinstance(rec_cast, (list, tuple)): return RFC.validate(rec_data, rec_cast) else: return rec_cast(rec_data) def _data2rec(schema, rec_data): ''' schema = OrderedDict({ 'prio': int, 'weight': int, 'port': to_port, 'name': str, }) rec_data = '10 20 25 myawesome.nl' res = {'prio': 10, 'weight': 20, 'port': 25 'name': 'myawesome.nl'} ''' try: rec_fields = rec_data.split(' ') # spaces in digest fields are allowed assert len(rec_fields) >= len(schema) if len(rec_fields) > len(schema): cutoff = len(schema) - 1 rec_fields = rec_fields[0:cutoff] + [''.join(rec_fields[cutoff:])] if len(schema) == 1: res = _cast(rec_fields[0], next(iter(schema.values()))) else: res = dict(( (field_name, _cast(rec_field, rec_cast)) for (field_name, rec_cast), rec_field in zip(schema.items(), rec_fields) )) return res except (AssertionError, AttributeError, TypeError, ValueError) as e: raise ValueError('Unable to cast "{0}" as "{2}": {1}'.format( rec_data, e, ' '.join(schema.keys()) )) def _data2rec_group(schema, recs_data, group_key): if not isinstance(recs_data, (list, tuple)): recs_data = [recs_data] res = OrderedDict() try: for rdata in recs_data: rdata = _data2rec(schema, rdata) assert rdata and group_key in rdata idx = rdata.pop(group_key) if idx not in res: res[idx] = [] if len(rdata) == 1: rdata = next(iter(rdata.values())) res[idx].append(rdata) return res except (AssertionError, ValueError) as e: raise ValueError('Unable to cast "{0}" as a group of "{1}": {2}'.format( ','.join(recs_data), ' '.join(schema.keys()), e )) def _rec2data(*rdata): return ' '.join(rdata) def _data_clean(data): data = data.strip(string.whitespace) if data.startswith(('"', '\'')) and data.endswith(('"', '\'')): return data[1:-1] else: return data def _lookup_dig(name, rdtype, timeout=None, servers=None, secure=None): ''' Use dig to lookup addresses :param name: Name of record to search :param rdtype: DNS record type :param timeout: server response timeout :param servers: [] of servers to use :return: [] of records or False if error ''' cmd = 'dig {0} -t {1} '.format(DIG_OPTIONS, rdtype) if servers: cmd += ''.join(['@{0} '.format(srv) for srv in servers]) if timeout is not None: if servers: timeout = int(float(timeout) / len(servers)) else: timeout = int(timeout) cmd += '+time={0} '.format(timeout) if secure: cmd += '+dnssec +adflag ' cmd = __salt__['cmd.run_all']('{0} {1}'.format(cmd, name), python_shell=False, output_loglevel='quiet') if 'ignoring invalid type' in cmd['stderr']: raise ValueError('Invalid DNS type {}'.format(rdtype)) elif cmd['retcode'] != 0: log.warning( 'dig returned (%s): %s', cmd['retcode'], cmd['stderr'].strip(string.whitespace + ';') ) return False elif not cmd['stdout']: return [] validated = False res = [] for line in cmd['stdout'].splitlines(): _, rtype, rdata = line.split(None, 2) if rtype == 'CNAME' and rdtype != 'CNAME': continue elif rtype == 'RRSIG': validated = True continue res.append(_data_clean(rdata)) if res and secure and not validated: return False else: return res def _lookup_drill(name, rdtype, timeout=None, servers=None, secure=None): ''' Use drill to lookup addresses :param name: Name of record to search :param rdtype: DNS record type :param timeout: command return timeout :param servers: [] of servers to use :return: [] of records or False if error ''' cmd = 'drill ' if secure: cmd += '-D -o ad ' cmd += '{0} {1} '.format(rdtype, name) if servers: cmd += ''.join(['@{0} '.format(srv) for srv in servers]) cmd = __salt__['cmd.run_all']( cmd, timeout=timeout, python_shell=False, output_loglevel='quiet') if cmd['retcode'] != 0: log.warning('drill returned (%s): %s', cmd['retcode'], cmd['stderr']) return False lookup_res = iter(cmd['stdout'].splitlines()) validated = False res = [] try: line = '' while 'ANSWER SECTION' not in line: line = next(lookup_res) while True: line = next(lookup_res) line = line.strip() if not line or line.startswith(';;'): break l_type, l_rec = line.split(None, 4)[-2:] if l_type == 'CNAME' and rdtype != 'CNAME': continue elif l_type == 'RRSIG': validated = True continue elif l_type != rdtype: raise ValueError('Invalid DNS type {}'.format(rdtype)) res.append(_data_clean(l_rec)) except StopIteration: pass if res and secure and not validated: return False else: return res def _lookup_gai(name, rdtype, timeout=None): ''' Use Python's socket interface to lookup addresses :param name: Name of record to search :param rdtype: A or AAAA :param timeout: ignored :return: [] of addresses or False if error ''' try: sock_t = { 'A': socket.AF_INET, 'AAAA': socket.AF_INET6 }[rdtype] except KeyError: raise ValueError('Invalid DNS type {} for gai lookup'.format(rdtype)) if timeout: log.info('Ignoring timeout on gai resolver; fix resolv.conf to do that') try: addresses = [sock[4][0] for sock in socket.getaddrinfo(name, None, sock_t, 0, socket.SOCK_RAW)] return addresses except socket.gaierror: return False def _lookup_host(name, rdtype, timeout=None, server=None): ''' Use host to lookup addresses :param name: Name of record to search :param server: Server to query :param rdtype: DNS record type :param timeout: server response wait :return: [] of records or False if error ''' cmd = 'host -t {0} '.format(rdtype) if timeout: cmd += '-W {0} '.format(int(timeout)) cmd += name if server is not None: cmd += ' {0}'.format(server) cmd = __salt__['cmd.run_all'](cmd, python_shell=False, output_loglevel='quiet') if 'invalid type' in cmd['stderr']: raise ValueError('Invalid DNS type {}'.format(rdtype)) elif cmd['retcode'] != 0: log.warning('host returned (%s): %s', cmd['retcode'], cmd['stderr']) return False elif 'has no' in cmd['stdout']: return [] res = [] _stdout = cmd['stdout'] if server is None else cmd['stdout'].split('\n\n')[-1] for line in _stdout.splitlines(): if rdtype != 'CNAME' and 'is an alias' in line: continue line = line.split(' ', 3)[-1] for prefix in ('record', 'address', 'handled by', 'alias for'): if line.startswith(prefix): line = line[len(prefix) + 1:] break res.append(_data_clean(line)) return res def _lookup_dnspython(name, rdtype, timeout=None, servers=None, secure=None): ''' Use dnspython to lookup addresses :param name: Name of record to search :param rdtype: DNS record type :param timeout: query timeout :param server: [] of server(s) to try in order :return: [] of records or False if error ''' resolver = dns.resolver.Resolver() if timeout is not None: resolver.lifetime = float(timeout) if servers: resolver.nameservers = servers if secure: resolver.ednsflags += dns.flags.DO try: res = [_data_clean(rr.to_text()) for rr in resolver.query(name, rdtype, raise_on_no_answer=False)] return res except dns.rdatatype.UnknownRdatatype: raise ValueError('Invalid DNS type {}'.format(rdtype)) except (dns.resolver.NXDOMAIN, dns.resolver.YXDOMAIN, dns.resolver.NoNameservers, dns.exception.Timeout): return False def _lookup_nslookup(name, rdtype, timeout=None, server=None): ''' Use nslookup to lookup addresses :param name: Name of record to search :param rdtype: DNS record type :param timeout: server response timeout :param server: server to query :return: [] of records or False if error ''' cmd = 'nslookup -query={0} {1}'.format(rdtype, name) if timeout is not None: cmd += ' -timeout={0}'.format(int(timeout)) if server is not None: cmd += ' {0}'.format(server) cmd = __salt__['cmd.run_all'](cmd, python_shell=False, output_loglevel='quiet') if cmd['retcode'] != 0: log.warning( 'nslookup returned (%s): %s', cmd['retcode'], cmd['stdout'].splitlines()[-1].strip(string.whitespace + ';') ) return False lookup_res = iter(cmd['stdout'].splitlines()) res = [] try: line = next(lookup_res) if 'unknown query type' in line: raise ValueError('Invalid DNS type {}'.format(rdtype)) while True: if name in line: break line = next(lookup_res) while True: line = line.strip() if not line or line.startswith('*'): break elif rdtype != 'CNAME' and 'canonical name' in line: name = line.split()[-1][:-1] line = next(lookup_res) continue elif rdtype == 'SOA': line = line.split('=') elif line.startswith('Name:'): line = next(lookup_res) line = line.split(':', 1) elif line.startswith(name): if '=' in line: line = line.split('=', 1) else: line = line.split(' ') res.append(_data_clean(line[-1])) line = next(lookup_res) except StopIteration: pass if rdtype == 'SOA': return [' '.join(res[1:])] else: return res def lookup( name, rdtype, method=None, servers=None, timeout=None, walk=False, walk_tld=False, secure=None ): ''' Lookup DNS records and return their data :param name: name to lookup :param rdtype: DNS record type :param method: gai (getaddrinfo()), dnspython, dig, drill, host, nslookup or auto (default) :param servers: (list of) server(s) to try in-order :param timeout: query timeout or a valiant approximation of that :param walk: Walk the DNS upwards looking for the record type or name/recordtype if walk='name'. :param walk_tld: Include the final domain in the walk :param secure: return only DNSSEC secured responses :return: [] of record data ''' # opts = __opts__.get('dns', {}) opts = {} method = method or opts.get('method', 'auto') secure = secure or opts.get('secure', None) servers = servers or opts.get('servers', None) timeout = timeout or opts.get('timeout', False) rdtype = rdtype.upper() # pylint: disable=bad-whitespace,multiple-spaces-before-keyword query_methods = ( ('gai', _lookup_gai, not any((rdtype not in ('A', 'AAAA'), servers, secure))), ('dnspython', _lookup_dnspython, HAS_DNSPYTHON), ('dig', _lookup_dig, HAS_DIG), ('drill', _lookup_drill, HAS_DRILL), ('host', _lookup_host, HAS_HOST and not secure), ('nslookup', _lookup_nslookup, HAS_NSLOOKUP and not secure), ) # pylint: enable=bad-whitespace,multiple-spaces-before-keyword try: if method == 'auto': # The first one not to bork on the conditions becomes the function method, resolver = next(((rname, rcb) for rname, rcb, rtest in query_methods if rtest)) else: # The first one not to bork on the conditions becomes the function. And the name must match. resolver = next((rcb for rname, rcb, rtest in query_methods if rname == method and rtest)) except StopIteration: log.error( 'Unable to lookup %s/%s: Resolver method %s invalid, unsupported ' 'or unable to perform query', method, rdtype, name ) return False res_kwargs = { 'rdtype': rdtype, } if servers: if not isinstance(servers, (list, tuple)): servers = [servers] if method in ('dnspython', 'dig', 'drill'): res_kwargs['servers'] = servers else: if timeout: timeout /= len(servers) # Inject a wrapper for multi-server behaviour def _multi_srvr(resolv_func): @functools.wraps(resolv_func) def _wrapper(**res_kwargs): for server in servers: s_res = resolv_func(server=server, **res_kwargs) if s_res: return s_res return _wrapper resolver = _multi_srvr(resolver) if not walk: name = [name] else: idx = 0 if rdtype in ('SRV', 'TLSA'): # The only RRs I know that have 2 name components idx = name.find('.') + 1 idx = name.find('.', idx) + 1 domain = name[idx:] rname = name[0:idx] name = _tree(domain, walk_tld) if walk == 'name': name = [rname + domain for domain in name] if timeout: timeout /= len(name) if secure: res_kwargs['secure'] = secure if timeout: res_kwargs['timeout'] = timeout for rname in name: res = resolver(name=rname, **res_kwargs) if res: return res return res def query( name, rdtype, method=None, servers=None, timeout=None, walk=False, walk_tld=False, secure=None ): ''' Query DNS for information. Where `lookup()` returns record data, `query()` tries to interpret the data and return it's results :param name: name to lookup :param rdtype: DNS record type :param method: gai (getaddrinfo()), pydns, dig, drill, host, nslookup or auto (default) :param servers: (list of) server(s) to try in-order :param timeout: query timeout or a valiant approximation of that :param secure: return only DNSSEC secured response :param walk: Walk the DNS upwards looking for the record type or name/recordtype if walk='name'. :param walk_tld: Include the top-level domain in the walk :return: [] of records ''' rdtype = rdtype.upper() qargs = { 'method': method, 'servers': servers, 'timeout': timeout, 'walk': walk, 'walk_tld': walk_tld, 'secure': secure } if rdtype == 'PTR' and not name.endswith('arpa'): name = ptr_name(name) if rdtype == 'SPF': # 'SPF' has become a regular 'TXT' again qres = [answer for answer in lookup(name, 'TXT', **qargs) if answer.startswith('v=spf')] if not qres: qres = lookup(name, rdtype, **qargs) else: qres = lookup(name, rdtype, **qargs) rec_map = { 'A': a_rec, 'AAAA': aaaa_rec, 'CAA': caa_rec, 'MX': mx_rec, 'SOA': soa_rec, 'SPF': spf_rec, 'SRV': srv_rec, 'SSHFP': sshfp_rec, 'TLSA': tlsa_rec, } if not qres or rdtype not in rec_map: return qres elif rdtype in ('A', 'AAAA', 'SSHFP', 'TLSA'): res = [rec_map[rdtype](res) for res in qres] elif rdtype in ('SOA', 'SPF'): res = rec_map[rdtype](qres[0]) else: res = rec_map[rdtype](qres) return res def host(name, ip4=True, ip6=True, **kwargs): ''' Return a list of addresses for name ip6: Return IPv6 addresses ip4: Return IPv4 addresses the rest is passed on to lookup() ''' res = {} if ip6: ip6 = lookup(name, 'AAAA', **kwargs) if ip6: res['ip6'] = ip6 if ip4: ip4 = lookup(name, 'A', **kwargs) if ip4: res['ip4'] = ip4 return res def a_rec(rdata): ''' Validate and parse DNS record data for an A record :param rdata: DNS record data :return: { 'address': ip } ''' rschema = OrderedDict(( ('address', ipaddress.IPv4Address), )) return _data2rec(rschema, rdata) def aaaa_rec(rdata): ''' Validate and parse DNS record data for an AAAA record :param rdata: DNS record data :return: { 'address': ip } ''' rschema = OrderedDict(( ('address', ipaddress.IPv6Address), )) return _data2rec(rschema, rdata) def caa_rec(rdatas): ''' Validate and parse DNS record data for a CAA record :param rdata: DNS record data :return: dict w/fields ''' rschema = OrderedDict(( ('flags', lambda flag: ['critical'] if int(flag) > 0 else []), ('tag', RFC.CAA_TAGS), ('value', lambda val: val.strip('\',"')) )) res = _data2rec_group(rschema, rdatas, 'tag') for tag in ('issue', 'issuewild'): tag_res = res.get(tag, False) if not tag_res: continue for idx, val in enumerate(tag_res): if ';' not in val: continue val, params = val.split(';', 1) params = dict(param.split('=') for param in shlex.split(params)) tag_res[idx] = {val: params} return res def mx_data(target, preference=10): ''' Generate MX record data :param target: server :param preference: preference number :return: DNS record data ''' return _rec2data(int(preference), target) def mx_rec(rdatas): ''' Validate and parse DNS record data for MX record(s) :param rdata: DNS record data :return: dict w/fields ''' rschema = OrderedDict(( ('preference', int), ('name', str), )) return _data2rec_group(rschema, rdatas, 'preference') def ptr_name(rdata): ''' Return PTR name of given IP :param rdata: IP address :return: PTR record name ''' try: return ipaddress.ip_address(rdata).reverse_pointer except ValueError: log.error( 'Unable to generate PTR record; %s is not a valid IP address', rdata ) return False def soa_rec(rdata): ''' Validate and parse DNS record data for SOA record(s) :param rdata: DNS record data :return: dict w/fields ''' rschema = OrderedDict(( ('mname', str), ('rname', str), ('serial', int), ('refresh', int), ('retry', int), ('expire', int), ('minimum', int), )) return _data2rec(rschema, rdata) def spf_rec(rdata): ''' Validate and parse DNS record data for SPF record(s) :param rdata: DNS record data :return: dict w/fields ''' spf_fields = rdata.split(' ') if not spf_fields.pop(0).startswith('v=spf'): raise ValueError('Not an SPF record') res = OrderedDict() mods = set() for mech_spec in spf_fields: if mech_spec.startswith(('exp', 'redirect')): # It's a modifier mod, val = mech_spec.split('=', 1) if mod in mods: raise KeyError('Modifier {0} can only appear once'.format(mod)) mods.add(mod) continue # TODO: Should be in something intelligent like an SPF_get # if mod == 'exp': # res[mod] = lookup(val, 'TXT', **qargs) # continue # elif mod == 'redirect': # return query(val, 'SPF', **qargs) mech = {} if mech_spec[0] in ('+', '-', '~', '?'): mech['qualifier'] = mech_spec[0] mech_spec = mech_spec[1:] if ':' in mech_spec: mech_spec, val = mech_spec.split(':', 1) elif '/' in mech_spec: idx = mech_spec.find('/') mech_spec = mech_spec[0:idx] val = mech_spec[idx:] else: val = None res[mech_spec] = mech if not val: continue elif mech_spec in ('ip4', 'ip6'): val = ipaddress.ip_interface(val) assert val.version == int(mech_spec[-1]) mech['value'] = val return res def srv_data(target, port, prio=10, weight=10): ''' Generate SRV record data :param target: :param port: :param prio: :param weight: :return: ''' return _rec2data(prio, weight, port, target) def srv_name(svc, proto='tcp', domain=None): ''' Generate SRV record name :param svc: ldap, 389 etc :param proto: tcp, udp, sctp etc. :param domain: name to append :return: ''' proto = RFC.validate(proto, RFC.SRV_PROTO) if isinstance(svc, int) or svc.isdigit(): svc = _to_port(svc) if domain: domain = '.' + domain return '_{0}._{1}{2}'.format(svc, proto, domain) def sshfp_data(key_t, hash_t, pub): ''' Generate an SSHFP record :param key_t: rsa/dsa/ecdsa/ed25519 :param hash_t: sha1/sha256 :param pub: the SSH public key ''' key_t = RFC.validate(key_t, RFC.SSHFP_ALGO, 'in') hash_t = RFC.validate(hash_t, RFC.SSHFP_HASH) hasher = hashlib.new(hash_t) hasher.update( base64.b64decode(pub) ) ssh_fp = hasher.hexdigest() return _rec2data(key_t, hash_t, ssh_fp) def sshfp_rec(rdata): ''' Validate and parse DNS record data for TLSA record(s) :param rdata: DNS record data :return: dict w/fields ''' rschema = OrderedDict(( ('algorithm', RFC.SSHFP_ALGO), ('fp_hash', RFC.SSHFP_HASH), ('fingerprint', lambda val: val.lower()) # resolvers are inconsistent on this one )) return _data2rec(rschema, rdata) def tlsa_data(pub, usage, selector, matching): ''' Generate a TLSA rec :param pub: Pub key in PEM format :param usage: :param selector: :param matching: :return: TLSA data portion ''' usage = RFC.validate(usage, RFC.TLSA_USAGE) selector = RFC.validate(selector, RFC.TLSA_SELECT) matching = RFC.validate(matching, RFC.TLSA_MATCHING) pub = ssl.PEM_cert_to_DER_cert(pub.strip()) if matching == 0: cert_fp = binascii.b2a_hex(pub) else: hasher = hashlib.new(RFC.TLSA_MATCHING[matching]) hasher.update( pub ) cert_fp = hasher.hexdigest() return _rec2data(usage, selector, matching, cert_fp) def tlsa_rec(rdata): ''' Validate and parse DNS record data for TLSA record(s) :param rdata: DNS record data :return: dict w/fields ''' rschema = OrderedDict(( ('usage', RFC.TLSA_USAGE), ('selector', RFC.TLSA_SELECT), ('matching', RFC.TLSA_MATCHING), ('pub', str) )) return _data2rec(rschema, rdata) def service( svc, proto='tcp', domain=None, walk=False, secure=None ): ''' Find an SRV service in a domain or it's parents :param svc: service to find (ldap, 389, etc) :param proto: protocol the service talks (tcp, udp, etc) :param domain: domain to start search in :param walk: walk the parents if domain doesn't provide the service :param secure: only return DNSSEC-validated results :return: [ [ prio1server1, prio1server2 ], [ prio2server1, prio2server2 ], ] (the servers will already be weighted according to the SRV rules) ''' qres = query(srv_name(svc, proto, domain), 'SRV', walk=walk, secure=secure) if not qres: return False res = [] for _, recs in qres.items(): res.append(_weighted_order(recs)) return res def services(services_file='/etc/services'): ''' Parse through system-known services :return: { 'svc': [ { 'port': port 'proto': proto, 'desc': comment }, ], } ''' res = {} with salt.utils.files.fopen(services_file, 'r') as svc_defs: for svc_def in svc_defs.readlines(): svc_def = salt.utils.stringutils.to_unicode(svc_def.strip()) if not svc_def or svc_def.startswith('#'): continue elif '#' in svc_def: svc_def, comment = svc_def.split('#', 1) comment = comment.strip() else: comment = None svc_def = svc_def.split() port, proto = svc_def.pop(1).split('/') port = int(port) for name in svc_def: svc_res = res.get(name, {}) pp_res = svc_res.get(port, False) if not pp_res: svc = { 'port': port, 'proto': proto, } if comment: svc['desc'] = comment svc_res[port] = svc else: curr_proto = pp_res['proto'] if isinstance(curr_proto, (list, tuple)): curr_proto.append(proto) else: pp_res['proto'] = [curr_proto, proto] curr_desc = pp_res.get('desc', False) if comment: if not curr_desc: pp_res['desc'] = comment elif comment != curr_desc: pp_res['desc'] = '{0}, {1}'.format(curr_desc, comment) res[name] = svc_res for svc, data in res.items(): if len(data) == 1: res[svc] = data.values().pop() continue else: res[svc] = list(data.values()) return res def parse_resolv(src='/etc/resolv.conf'): ''' Parse a resolver configuration file (traditionally /etc/resolv.conf) ''' nameservers = [] ip4_nameservers = [] ip6_nameservers = [] search = [] sortlist = [] domain = '' options = [] try: with salt.utils.files.fopen(src) as src_file: # pylint: disable=too-many-nested-blocks for line in src_file: line = salt.utils.stringutils.to_unicode(line).strip().split() try: (directive, arg) = (line[0].lower(), line[1:]) # Drop everything after # or ; (comments) arg = list(itertools.takewhile(lambda x: x[0] not in ('#', ';'), arg)) if directive == 'nameserver': addr = arg[0] try: ip_addr = ipaddress.ip_address(addr) version = ip_addr.version ip_addr = str(ip_addr) if ip_addr not in nameservers: nameservers.append(ip_addr) if version == 4 and ip_addr not in ip4_nameservers: ip4_nameservers.append(ip_addr) elif version == 6 and ip_addr not in ip6_nameservers: ip6_nameservers.append(ip_addr) except ValueError as exc: log.error('%s: %s', src, exc) elif directive == 'domain': domain = arg[0] elif directive == 'search': search = arg elif directive == 'sortlist': # A sortlist is specified by IP address netmask pairs. # The netmask is optional and defaults to the natural # netmask of the net. The IP address and optional # network pairs are separated by slashes. for ip_raw in arg: try: ip_net = ipaddress.ip_network(ip_raw) except ValueError as exc: log.error('%s: %s', src, exc) else: if '/' not in ip_raw: # No netmask has been provided, guess # the "natural" one if ip_net.version == 4: ip_addr = six.text_type(ip_net.network_address) # pylint: disable=protected-access mask = salt.utils.network.natural_ipv4_netmask(ip_addr) ip_net = ipaddress.ip_network( '{0}{1}'.format(ip_addr, mask), strict=False ) if ip_net.version == 6: # TODO pass if ip_net not in sortlist: sortlist.append(ip_net) elif directive == 'options': # Options allows certain internal resolver variables to # be modified. if arg[0] not in options: options.append(arg[0]) except IndexError: continue if domain and search: # The domain and search keywords are mutually exclusive. If more # than one instance of these keywords is present, the last instance # will override. log.debug( '%s: The domain and search keywords are mutually exclusive.', src ) return { 'nameservers': nameservers, 'ip4_nameservers': ip4_nameservers, 'ip6_nameservers': ip6_nameservers, 'sortlist': [ip.with_netmask for ip in sortlist], 'domain': domain, 'search': search, 'options': options } except IOError: return {}
saltstack/salt
salt/utils/dns.py
sshfp_data
python
def sshfp_data(key_t, hash_t, pub): ''' Generate an SSHFP record :param key_t: rsa/dsa/ecdsa/ed25519 :param hash_t: sha1/sha256 :param pub: the SSH public key ''' key_t = RFC.validate(key_t, RFC.SSHFP_ALGO, 'in') hash_t = RFC.validate(hash_t, RFC.SSHFP_HASH) hasher = hashlib.new(hash_t) hasher.update( base64.b64decode(pub) ) ssh_fp = hasher.hexdigest() return _rec2data(key_t, hash_t, ssh_fp)
Generate an SSHFP record :param key_t: rsa/dsa/ecdsa/ed25519 :param hash_t: sha1/sha256 :param pub: the SSH public key
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/dns.py#L958-L974
[ "def _rec2data(*rdata):\n return ' '.join(rdata)\n", "def validate(lookup, ref, match=None):\n if lookup in ref:\n return lookup\n elif match == 'in':\n return [code for code, name in ref.items() if lookup in name][-1]\n else:\n # OrderedDicts only!(?)\n return ref.keys()[ref.values().index(lookup)]\n" ]
# -*- coding: utf-8 -*- ''' Compendium of generic DNS utilities # Examples: dns.lookup(name, rdtype, ...) dns.query(name, rdtype, ...) dns.srv_rec(data) dns.srv_data('my1.example.com', 389, prio=10, weight=100) dns.srv_name('ldap/tcp', 'example.com') ''' from __future__ import absolute_import, print_function, unicode_literals # Import Python libs import base64 import binascii import hashlib import itertools import logging import random import re import shlex import socket import ssl import string import functools # Import Salt libs import salt.utils.files import salt.utils.network import salt.utils.path import salt.utils.stringutils import salt.modules.cmdmod from salt._compat import ipaddress from salt.utils.odict import OrderedDict # Import 3rd-party libs from salt.ext import six from salt.ext.six.moves import zip # pylint: disable=redefined-builtin # Integrations try: import dns.resolver HAS_DNSPYTHON = True except ImportError: HAS_DNSPYTHON = False try: import tldextract HAS_TLDEXTRACT = True except ImportError: HAS_TLDEXTRACT = False HAS_DIG = salt.utils.path.which('dig') is not None DIG_OPTIONS = '+search +fail +noall +answer +nocl +nottl' HAS_DRILL = salt.utils.path.which('drill') is not None HAS_HOST = salt.utils.path.which('host') is not None HAS_NSLOOKUP = salt.utils.path.which('nslookup') is not None __salt__ = { 'cmd.run_all': salt.modules.cmdmod.run_all } log = logging.getLogger(__name__) class RFC(object): ''' Simple holding class for all RFC/IANA registered lists & standards ''' # https://tools.ietf.org/html/rfc6844#section-3 CAA_TAGS = ( 'issue', 'issuewild', 'iodef' ) # http://www.iana.org/assignments/dns-sshfp-rr-parameters/dns-sshfp-rr-parameters.xhtml SSHFP_ALGO = OrderedDict(( (1, 'rsa'), (2, 'dsa'), (3, 'ecdsa'), (4, 'ed25519'), )) SSHFP_HASH = OrderedDict(( (1, 'sha1'), (2, 'sha256'), )) # http://www.iana.org/assignments/dane-parameters/dane-parameters.xhtml TLSA_USAGE = OrderedDict(( (0, 'pkixta'), (1, 'pkixee'), (2, 'daneta'), (3, 'daneee'), )) TLSA_SELECT = OrderedDict(( (0, 'cert'), (1, 'spki'), )) TLSA_MATCHING = OrderedDict(( (0, 'full'), (1, 'sha256'), (2, 'sha512'), )) SRV_PROTO = ( 'tcp', 'udp', 'sctp' ) @staticmethod def validate(lookup, ref, match=None): if lookup in ref: return lookup elif match == 'in': return [code for code, name in ref.items() if lookup in name][-1] else: # OrderedDicts only!(?) return ref.keys()[ref.values().index(lookup)] def _to_port(port): try: port = int(port) assert 1 <= port <= 65535 return port except (ValueError, AssertionError): raise ValueError('Invalid port {0}'.format(port)) def _tree(domain, tld=False): ''' Split out a domain in its parents Leverages tldextract to take the TLDs from publicsuffix.org or makes a valiant approximation of that :param domain: dc2.ams2.example.com :param tld: Include TLD in list :return: [ 'dc2.ams2.example.com', 'ams2.example.com', 'example.com'] ''' domain = domain.rstrip('.') assert '.' in domain, 'Provide a decent domain' if not tld: if HAS_TLDEXTRACT: tld = tldextract.extract(domain).suffix else: tld = re.search(r'((?:(?:ac|biz|com?|info|edu|gov|mil|name|net|n[oi]m|org)\.)?[^.]+)$', domain).group() log.info('Without tldextract, dns.util resolves the TLD of %s to %s', domain, tld) res = [domain] while True: idx = domain.find('.') if idx < 0: break domain = domain[idx + 1:] if domain == tld: break res.append(domain) return res def _weighted_order(recs): res = [] weights = [rec['weight'] for rec in recs] while weights: rnd = random.random() * sum(weights) for i, w in enumerate(weights): rnd -= w if rnd < 0: res.append(recs.pop(i)['name']) weights.pop(i) break return res def _cast(rec_data, rec_cast): if isinstance(rec_cast, dict): rec_data = type(rec_cast.keys()[0])(rec_data) res = rec_cast[rec_data] return res elif isinstance(rec_cast, (list, tuple)): return RFC.validate(rec_data, rec_cast) else: return rec_cast(rec_data) def _data2rec(schema, rec_data): ''' schema = OrderedDict({ 'prio': int, 'weight': int, 'port': to_port, 'name': str, }) rec_data = '10 20 25 myawesome.nl' res = {'prio': 10, 'weight': 20, 'port': 25 'name': 'myawesome.nl'} ''' try: rec_fields = rec_data.split(' ') # spaces in digest fields are allowed assert len(rec_fields) >= len(schema) if len(rec_fields) > len(schema): cutoff = len(schema) - 1 rec_fields = rec_fields[0:cutoff] + [''.join(rec_fields[cutoff:])] if len(schema) == 1: res = _cast(rec_fields[0], next(iter(schema.values()))) else: res = dict(( (field_name, _cast(rec_field, rec_cast)) for (field_name, rec_cast), rec_field in zip(schema.items(), rec_fields) )) return res except (AssertionError, AttributeError, TypeError, ValueError) as e: raise ValueError('Unable to cast "{0}" as "{2}": {1}'.format( rec_data, e, ' '.join(schema.keys()) )) def _data2rec_group(schema, recs_data, group_key): if not isinstance(recs_data, (list, tuple)): recs_data = [recs_data] res = OrderedDict() try: for rdata in recs_data: rdata = _data2rec(schema, rdata) assert rdata and group_key in rdata idx = rdata.pop(group_key) if idx not in res: res[idx] = [] if len(rdata) == 1: rdata = next(iter(rdata.values())) res[idx].append(rdata) return res except (AssertionError, ValueError) as e: raise ValueError('Unable to cast "{0}" as a group of "{1}": {2}'.format( ','.join(recs_data), ' '.join(schema.keys()), e )) def _rec2data(*rdata): return ' '.join(rdata) def _data_clean(data): data = data.strip(string.whitespace) if data.startswith(('"', '\'')) and data.endswith(('"', '\'')): return data[1:-1] else: return data def _lookup_dig(name, rdtype, timeout=None, servers=None, secure=None): ''' Use dig to lookup addresses :param name: Name of record to search :param rdtype: DNS record type :param timeout: server response timeout :param servers: [] of servers to use :return: [] of records or False if error ''' cmd = 'dig {0} -t {1} '.format(DIG_OPTIONS, rdtype) if servers: cmd += ''.join(['@{0} '.format(srv) for srv in servers]) if timeout is not None: if servers: timeout = int(float(timeout) / len(servers)) else: timeout = int(timeout) cmd += '+time={0} '.format(timeout) if secure: cmd += '+dnssec +adflag ' cmd = __salt__['cmd.run_all']('{0} {1}'.format(cmd, name), python_shell=False, output_loglevel='quiet') if 'ignoring invalid type' in cmd['stderr']: raise ValueError('Invalid DNS type {}'.format(rdtype)) elif cmd['retcode'] != 0: log.warning( 'dig returned (%s): %s', cmd['retcode'], cmd['stderr'].strip(string.whitespace + ';') ) return False elif not cmd['stdout']: return [] validated = False res = [] for line in cmd['stdout'].splitlines(): _, rtype, rdata = line.split(None, 2) if rtype == 'CNAME' and rdtype != 'CNAME': continue elif rtype == 'RRSIG': validated = True continue res.append(_data_clean(rdata)) if res and secure and not validated: return False else: return res def _lookup_drill(name, rdtype, timeout=None, servers=None, secure=None): ''' Use drill to lookup addresses :param name: Name of record to search :param rdtype: DNS record type :param timeout: command return timeout :param servers: [] of servers to use :return: [] of records or False if error ''' cmd = 'drill ' if secure: cmd += '-D -o ad ' cmd += '{0} {1} '.format(rdtype, name) if servers: cmd += ''.join(['@{0} '.format(srv) for srv in servers]) cmd = __salt__['cmd.run_all']( cmd, timeout=timeout, python_shell=False, output_loglevel='quiet') if cmd['retcode'] != 0: log.warning('drill returned (%s): %s', cmd['retcode'], cmd['stderr']) return False lookup_res = iter(cmd['stdout'].splitlines()) validated = False res = [] try: line = '' while 'ANSWER SECTION' not in line: line = next(lookup_res) while True: line = next(lookup_res) line = line.strip() if not line or line.startswith(';;'): break l_type, l_rec = line.split(None, 4)[-2:] if l_type == 'CNAME' and rdtype != 'CNAME': continue elif l_type == 'RRSIG': validated = True continue elif l_type != rdtype: raise ValueError('Invalid DNS type {}'.format(rdtype)) res.append(_data_clean(l_rec)) except StopIteration: pass if res and secure and not validated: return False else: return res def _lookup_gai(name, rdtype, timeout=None): ''' Use Python's socket interface to lookup addresses :param name: Name of record to search :param rdtype: A or AAAA :param timeout: ignored :return: [] of addresses or False if error ''' try: sock_t = { 'A': socket.AF_INET, 'AAAA': socket.AF_INET6 }[rdtype] except KeyError: raise ValueError('Invalid DNS type {} for gai lookup'.format(rdtype)) if timeout: log.info('Ignoring timeout on gai resolver; fix resolv.conf to do that') try: addresses = [sock[4][0] for sock in socket.getaddrinfo(name, None, sock_t, 0, socket.SOCK_RAW)] return addresses except socket.gaierror: return False def _lookup_host(name, rdtype, timeout=None, server=None): ''' Use host to lookup addresses :param name: Name of record to search :param server: Server to query :param rdtype: DNS record type :param timeout: server response wait :return: [] of records or False if error ''' cmd = 'host -t {0} '.format(rdtype) if timeout: cmd += '-W {0} '.format(int(timeout)) cmd += name if server is not None: cmd += ' {0}'.format(server) cmd = __salt__['cmd.run_all'](cmd, python_shell=False, output_loglevel='quiet') if 'invalid type' in cmd['stderr']: raise ValueError('Invalid DNS type {}'.format(rdtype)) elif cmd['retcode'] != 0: log.warning('host returned (%s): %s', cmd['retcode'], cmd['stderr']) return False elif 'has no' in cmd['stdout']: return [] res = [] _stdout = cmd['stdout'] if server is None else cmd['stdout'].split('\n\n')[-1] for line in _stdout.splitlines(): if rdtype != 'CNAME' and 'is an alias' in line: continue line = line.split(' ', 3)[-1] for prefix in ('record', 'address', 'handled by', 'alias for'): if line.startswith(prefix): line = line[len(prefix) + 1:] break res.append(_data_clean(line)) return res def _lookup_dnspython(name, rdtype, timeout=None, servers=None, secure=None): ''' Use dnspython to lookup addresses :param name: Name of record to search :param rdtype: DNS record type :param timeout: query timeout :param server: [] of server(s) to try in order :return: [] of records or False if error ''' resolver = dns.resolver.Resolver() if timeout is not None: resolver.lifetime = float(timeout) if servers: resolver.nameservers = servers if secure: resolver.ednsflags += dns.flags.DO try: res = [_data_clean(rr.to_text()) for rr in resolver.query(name, rdtype, raise_on_no_answer=False)] return res except dns.rdatatype.UnknownRdatatype: raise ValueError('Invalid DNS type {}'.format(rdtype)) except (dns.resolver.NXDOMAIN, dns.resolver.YXDOMAIN, dns.resolver.NoNameservers, dns.exception.Timeout): return False def _lookup_nslookup(name, rdtype, timeout=None, server=None): ''' Use nslookup to lookup addresses :param name: Name of record to search :param rdtype: DNS record type :param timeout: server response timeout :param server: server to query :return: [] of records or False if error ''' cmd = 'nslookup -query={0} {1}'.format(rdtype, name) if timeout is not None: cmd += ' -timeout={0}'.format(int(timeout)) if server is not None: cmd += ' {0}'.format(server) cmd = __salt__['cmd.run_all'](cmd, python_shell=False, output_loglevel='quiet') if cmd['retcode'] != 0: log.warning( 'nslookup returned (%s): %s', cmd['retcode'], cmd['stdout'].splitlines()[-1].strip(string.whitespace + ';') ) return False lookup_res = iter(cmd['stdout'].splitlines()) res = [] try: line = next(lookup_res) if 'unknown query type' in line: raise ValueError('Invalid DNS type {}'.format(rdtype)) while True: if name in line: break line = next(lookup_res) while True: line = line.strip() if not line or line.startswith('*'): break elif rdtype != 'CNAME' and 'canonical name' in line: name = line.split()[-1][:-1] line = next(lookup_res) continue elif rdtype == 'SOA': line = line.split('=') elif line.startswith('Name:'): line = next(lookup_res) line = line.split(':', 1) elif line.startswith(name): if '=' in line: line = line.split('=', 1) else: line = line.split(' ') res.append(_data_clean(line[-1])) line = next(lookup_res) except StopIteration: pass if rdtype == 'SOA': return [' '.join(res[1:])] else: return res def lookup( name, rdtype, method=None, servers=None, timeout=None, walk=False, walk_tld=False, secure=None ): ''' Lookup DNS records and return their data :param name: name to lookup :param rdtype: DNS record type :param method: gai (getaddrinfo()), dnspython, dig, drill, host, nslookup or auto (default) :param servers: (list of) server(s) to try in-order :param timeout: query timeout or a valiant approximation of that :param walk: Walk the DNS upwards looking for the record type or name/recordtype if walk='name'. :param walk_tld: Include the final domain in the walk :param secure: return only DNSSEC secured responses :return: [] of record data ''' # opts = __opts__.get('dns', {}) opts = {} method = method or opts.get('method', 'auto') secure = secure or opts.get('secure', None) servers = servers or opts.get('servers', None) timeout = timeout or opts.get('timeout', False) rdtype = rdtype.upper() # pylint: disable=bad-whitespace,multiple-spaces-before-keyword query_methods = ( ('gai', _lookup_gai, not any((rdtype not in ('A', 'AAAA'), servers, secure))), ('dnspython', _lookup_dnspython, HAS_DNSPYTHON), ('dig', _lookup_dig, HAS_DIG), ('drill', _lookup_drill, HAS_DRILL), ('host', _lookup_host, HAS_HOST and not secure), ('nslookup', _lookup_nslookup, HAS_NSLOOKUP and not secure), ) # pylint: enable=bad-whitespace,multiple-spaces-before-keyword try: if method == 'auto': # The first one not to bork on the conditions becomes the function method, resolver = next(((rname, rcb) for rname, rcb, rtest in query_methods if rtest)) else: # The first one not to bork on the conditions becomes the function. And the name must match. resolver = next((rcb for rname, rcb, rtest in query_methods if rname == method and rtest)) except StopIteration: log.error( 'Unable to lookup %s/%s: Resolver method %s invalid, unsupported ' 'or unable to perform query', method, rdtype, name ) return False res_kwargs = { 'rdtype': rdtype, } if servers: if not isinstance(servers, (list, tuple)): servers = [servers] if method in ('dnspython', 'dig', 'drill'): res_kwargs['servers'] = servers else: if timeout: timeout /= len(servers) # Inject a wrapper for multi-server behaviour def _multi_srvr(resolv_func): @functools.wraps(resolv_func) def _wrapper(**res_kwargs): for server in servers: s_res = resolv_func(server=server, **res_kwargs) if s_res: return s_res return _wrapper resolver = _multi_srvr(resolver) if not walk: name = [name] else: idx = 0 if rdtype in ('SRV', 'TLSA'): # The only RRs I know that have 2 name components idx = name.find('.') + 1 idx = name.find('.', idx) + 1 domain = name[idx:] rname = name[0:idx] name = _tree(domain, walk_tld) if walk == 'name': name = [rname + domain for domain in name] if timeout: timeout /= len(name) if secure: res_kwargs['secure'] = secure if timeout: res_kwargs['timeout'] = timeout for rname in name: res = resolver(name=rname, **res_kwargs) if res: return res return res def query( name, rdtype, method=None, servers=None, timeout=None, walk=False, walk_tld=False, secure=None ): ''' Query DNS for information. Where `lookup()` returns record data, `query()` tries to interpret the data and return it's results :param name: name to lookup :param rdtype: DNS record type :param method: gai (getaddrinfo()), pydns, dig, drill, host, nslookup or auto (default) :param servers: (list of) server(s) to try in-order :param timeout: query timeout or a valiant approximation of that :param secure: return only DNSSEC secured response :param walk: Walk the DNS upwards looking for the record type or name/recordtype if walk='name'. :param walk_tld: Include the top-level domain in the walk :return: [] of records ''' rdtype = rdtype.upper() qargs = { 'method': method, 'servers': servers, 'timeout': timeout, 'walk': walk, 'walk_tld': walk_tld, 'secure': secure } if rdtype == 'PTR' and not name.endswith('arpa'): name = ptr_name(name) if rdtype == 'SPF': # 'SPF' has become a regular 'TXT' again qres = [answer for answer in lookup(name, 'TXT', **qargs) if answer.startswith('v=spf')] if not qres: qres = lookup(name, rdtype, **qargs) else: qres = lookup(name, rdtype, **qargs) rec_map = { 'A': a_rec, 'AAAA': aaaa_rec, 'CAA': caa_rec, 'MX': mx_rec, 'SOA': soa_rec, 'SPF': spf_rec, 'SRV': srv_rec, 'SSHFP': sshfp_rec, 'TLSA': tlsa_rec, } if not qres or rdtype not in rec_map: return qres elif rdtype in ('A', 'AAAA', 'SSHFP', 'TLSA'): res = [rec_map[rdtype](res) for res in qres] elif rdtype in ('SOA', 'SPF'): res = rec_map[rdtype](qres[0]) else: res = rec_map[rdtype](qres) return res def host(name, ip4=True, ip6=True, **kwargs): ''' Return a list of addresses for name ip6: Return IPv6 addresses ip4: Return IPv4 addresses the rest is passed on to lookup() ''' res = {} if ip6: ip6 = lookup(name, 'AAAA', **kwargs) if ip6: res['ip6'] = ip6 if ip4: ip4 = lookup(name, 'A', **kwargs) if ip4: res['ip4'] = ip4 return res def a_rec(rdata): ''' Validate and parse DNS record data for an A record :param rdata: DNS record data :return: { 'address': ip } ''' rschema = OrderedDict(( ('address', ipaddress.IPv4Address), )) return _data2rec(rschema, rdata) def aaaa_rec(rdata): ''' Validate and parse DNS record data for an AAAA record :param rdata: DNS record data :return: { 'address': ip } ''' rschema = OrderedDict(( ('address', ipaddress.IPv6Address), )) return _data2rec(rschema, rdata) def caa_rec(rdatas): ''' Validate and parse DNS record data for a CAA record :param rdata: DNS record data :return: dict w/fields ''' rschema = OrderedDict(( ('flags', lambda flag: ['critical'] if int(flag) > 0 else []), ('tag', RFC.CAA_TAGS), ('value', lambda val: val.strip('\',"')) )) res = _data2rec_group(rschema, rdatas, 'tag') for tag in ('issue', 'issuewild'): tag_res = res.get(tag, False) if not tag_res: continue for idx, val in enumerate(tag_res): if ';' not in val: continue val, params = val.split(';', 1) params = dict(param.split('=') for param in shlex.split(params)) tag_res[idx] = {val: params} return res def mx_data(target, preference=10): ''' Generate MX record data :param target: server :param preference: preference number :return: DNS record data ''' return _rec2data(int(preference), target) def mx_rec(rdatas): ''' Validate and parse DNS record data for MX record(s) :param rdata: DNS record data :return: dict w/fields ''' rschema = OrderedDict(( ('preference', int), ('name', str), )) return _data2rec_group(rschema, rdatas, 'preference') def ptr_name(rdata): ''' Return PTR name of given IP :param rdata: IP address :return: PTR record name ''' try: return ipaddress.ip_address(rdata).reverse_pointer except ValueError: log.error( 'Unable to generate PTR record; %s is not a valid IP address', rdata ) return False def soa_rec(rdata): ''' Validate and parse DNS record data for SOA record(s) :param rdata: DNS record data :return: dict w/fields ''' rschema = OrderedDict(( ('mname', str), ('rname', str), ('serial', int), ('refresh', int), ('retry', int), ('expire', int), ('minimum', int), )) return _data2rec(rschema, rdata) def spf_rec(rdata): ''' Validate and parse DNS record data for SPF record(s) :param rdata: DNS record data :return: dict w/fields ''' spf_fields = rdata.split(' ') if not spf_fields.pop(0).startswith('v=spf'): raise ValueError('Not an SPF record') res = OrderedDict() mods = set() for mech_spec in spf_fields: if mech_spec.startswith(('exp', 'redirect')): # It's a modifier mod, val = mech_spec.split('=', 1) if mod in mods: raise KeyError('Modifier {0} can only appear once'.format(mod)) mods.add(mod) continue # TODO: Should be in something intelligent like an SPF_get # if mod == 'exp': # res[mod] = lookup(val, 'TXT', **qargs) # continue # elif mod == 'redirect': # return query(val, 'SPF', **qargs) mech = {} if mech_spec[0] in ('+', '-', '~', '?'): mech['qualifier'] = mech_spec[0] mech_spec = mech_spec[1:] if ':' in mech_spec: mech_spec, val = mech_spec.split(':', 1) elif '/' in mech_spec: idx = mech_spec.find('/') mech_spec = mech_spec[0:idx] val = mech_spec[idx:] else: val = None res[mech_spec] = mech if not val: continue elif mech_spec in ('ip4', 'ip6'): val = ipaddress.ip_interface(val) assert val.version == int(mech_spec[-1]) mech['value'] = val return res def srv_data(target, port, prio=10, weight=10): ''' Generate SRV record data :param target: :param port: :param prio: :param weight: :return: ''' return _rec2data(prio, weight, port, target) def srv_name(svc, proto='tcp', domain=None): ''' Generate SRV record name :param svc: ldap, 389 etc :param proto: tcp, udp, sctp etc. :param domain: name to append :return: ''' proto = RFC.validate(proto, RFC.SRV_PROTO) if isinstance(svc, int) or svc.isdigit(): svc = _to_port(svc) if domain: domain = '.' + domain return '_{0}._{1}{2}'.format(svc, proto, domain) def srv_rec(rdatas): ''' Validate and parse DNS record data for SRV record(s) :param rdata: DNS record data :return: dict w/fields ''' rschema = OrderedDict(( ('prio', int), ('weight', int), ('port', _to_port), ('name', str), )) return _data2rec_group(rschema, rdatas, 'prio') def sshfp_rec(rdata): ''' Validate and parse DNS record data for TLSA record(s) :param rdata: DNS record data :return: dict w/fields ''' rschema = OrderedDict(( ('algorithm', RFC.SSHFP_ALGO), ('fp_hash', RFC.SSHFP_HASH), ('fingerprint', lambda val: val.lower()) # resolvers are inconsistent on this one )) return _data2rec(rschema, rdata) def tlsa_data(pub, usage, selector, matching): ''' Generate a TLSA rec :param pub: Pub key in PEM format :param usage: :param selector: :param matching: :return: TLSA data portion ''' usage = RFC.validate(usage, RFC.TLSA_USAGE) selector = RFC.validate(selector, RFC.TLSA_SELECT) matching = RFC.validate(matching, RFC.TLSA_MATCHING) pub = ssl.PEM_cert_to_DER_cert(pub.strip()) if matching == 0: cert_fp = binascii.b2a_hex(pub) else: hasher = hashlib.new(RFC.TLSA_MATCHING[matching]) hasher.update( pub ) cert_fp = hasher.hexdigest() return _rec2data(usage, selector, matching, cert_fp) def tlsa_rec(rdata): ''' Validate and parse DNS record data for TLSA record(s) :param rdata: DNS record data :return: dict w/fields ''' rschema = OrderedDict(( ('usage', RFC.TLSA_USAGE), ('selector', RFC.TLSA_SELECT), ('matching', RFC.TLSA_MATCHING), ('pub', str) )) return _data2rec(rschema, rdata) def service( svc, proto='tcp', domain=None, walk=False, secure=None ): ''' Find an SRV service in a domain or it's parents :param svc: service to find (ldap, 389, etc) :param proto: protocol the service talks (tcp, udp, etc) :param domain: domain to start search in :param walk: walk the parents if domain doesn't provide the service :param secure: only return DNSSEC-validated results :return: [ [ prio1server1, prio1server2 ], [ prio2server1, prio2server2 ], ] (the servers will already be weighted according to the SRV rules) ''' qres = query(srv_name(svc, proto, domain), 'SRV', walk=walk, secure=secure) if not qres: return False res = [] for _, recs in qres.items(): res.append(_weighted_order(recs)) return res def services(services_file='/etc/services'): ''' Parse through system-known services :return: { 'svc': [ { 'port': port 'proto': proto, 'desc': comment }, ], } ''' res = {} with salt.utils.files.fopen(services_file, 'r') as svc_defs: for svc_def in svc_defs.readlines(): svc_def = salt.utils.stringutils.to_unicode(svc_def.strip()) if not svc_def or svc_def.startswith('#'): continue elif '#' in svc_def: svc_def, comment = svc_def.split('#', 1) comment = comment.strip() else: comment = None svc_def = svc_def.split() port, proto = svc_def.pop(1).split('/') port = int(port) for name in svc_def: svc_res = res.get(name, {}) pp_res = svc_res.get(port, False) if not pp_res: svc = { 'port': port, 'proto': proto, } if comment: svc['desc'] = comment svc_res[port] = svc else: curr_proto = pp_res['proto'] if isinstance(curr_proto, (list, tuple)): curr_proto.append(proto) else: pp_res['proto'] = [curr_proto, proto] curr_desc = pp_res.get('desc', False) if comment: if not curr_desc: pp_res['desc'] = comment elif comment != curr_desc: pp_res['desc'] = '{0}, {1}'.format(curr_desc, comment) res[name] = svc_res for svc, data in res.items(): if len(data) == 1: res[svc] = data.values().pop() continue else: res[svc] = list(data.values()) return res def parse_resolv(src='/etc/resolv.conf'): ''' Parse a resolver configuration file (traditionally /etc/resolv.conf) ''' nameservers = [] ip4_nameservers = [] ip6_nameservers = [] search = [] sortlist = [] domain = '' options = [] try: with salt.utils.files.fopen(src) as src_file: # pylint: disable=too-many-nested-blocks for line in src_file: line = salt.utils.stringutils.to_unicode(line).strip().split() try: (directive, arg) = (line[0].lower(), line[1:]) # Drop everything after # or ; (comments) arg = list(itertools.takewhile(lambda x: x[0] not in ('#', ';'), arg)) if directive == 'nameserver': addr = arg[0] try: ip_addr = ipaddress.ip_address(addr) version = ip_addr.version ip_addr = str(ip_addr) if ip_addr not in nameservers: nameservers.append(ip_addr) if version == 4 and ip_addr not in ip4_nameservers: ip4_nameservers.append(ip_addr) elif version == 6 and ip_addr not in ip6_nameservers: ip6_nameservers.append(ip_addr) except ValueError as exc: log.error('%s: %s', src, exc) elif directive == 'domain': domain = arg[0] elif directive == 'search': search = arg elif directive == 'sortlist': # A sortlist is specified by IP address netmask pairs. # The netmask is optional and defaults to the natural # netmask of the net. The IP address and optional # network pairs are separated by slashes. for ip_raw in arg: try: ip_net = ipaddress.ip_network(ip_raw) except ValueError as exc: log.error('%s: %s', src, exc) else: if '/' not in ip_raw: # No netmask has been provided, guess # the "natural" one if ip_net.version == 4: ip_addr = six.text_type(ip_net.network_address) # pylint: disable=protected-access mask = salt.utils.network.natural_ipv4_netmask(ip_addr) ip_net = ipaddress.ip_network( '{0}{1}'.format(ip_addr, mask), strict=False ) if ip_net.version == 6: # TODO pass if ip_net not in sortlist: sortlist.append(ip_net) elif directive == 'options': # Options allows certain internal resolver variables to # be modified. if arg[0] not in options: options.append(arg[0]) except IndexError: continue if domain and search: # The domain and search keywords are mutually exclusive. If more # than one instance of these keywords is present, the last instance # will override. log.debug( '%s: The domain and search keywords are mutually exclusive.', src ) return { 'nameservers': nameservers, 'ip4_nameservers': ip4_nameservers, 'ip6_nameservers': ip6_nameservers, 'sortlist': [ip.with_netmask for ip in sortlist], 'domain': domain, 'search': search, 'options': options } except IOError: return {}
saltstack/salt
salt/utils/dns.py
sshfp_rec
python
def sshfp_rec(rdata): ''' Validate and parse DNS record data for TLSA record(s) :param rdata: DNS record data :return: dict w/fields ''' rschema = OrderedDict(( ('algorithm', RFC.SSHFP_ALGO), ('fp_hash', RFC.SSHFP_HASH), ('fingerprint', lambda val: val.lower()) # resolvers are inconsistent on this one )) return _data2rec(rschema, rdata)
Validate and parse DNS record data for TLSA record(s) :param rdata: DNS record data :return: dict w/fields
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/dns.py#L977-L989
[ "def _data2rec(schema, rec_data):\n '''\n schema = OrderedDict({\n 'prio': int,\n 'weight': int,\n 'port': to_port,\n 'name': str,\n })\n rec_data = '10 20 25 myawesome.nl'\n\n res = {'prio': 10, 'weight': 20, 'port': 25 'name': 'myawesome.nl'}\n '''\n try:\n rec_fields = rec_data.split(' ')\n # spaces in digest fields are allowed\n assert len(rec_fields) >= len(schema)\n if len(rec_fields) > len(schema):\n cutoff = len(schema) - 1\n rec_fields = rec_fields[0:cutoff] + [''.join(rec_fields[cutoff:])]\n\n if len(schema) == 1:\n res = _cast(rec_fields[0], next(iter(schema.values())))\n else:\n res = dict((\n (field_name, _cast(rec_field, rec_cast))\n for (field_name, rec_cast), rec_field in zip(schema.items(), rec_fields)\n ))\n return res\n except (AssertionError, AttributeError, TypeError, ValueError) as e:\n raise ValueError('Unable to cast \"{0}\" as \"{2}\": {1}'.format(\n rec_data,\n e,\n ' '.join(schema.keys())\n ))\n" ]
# -*- coding: utf-8 -*- ''' Compendium of generic DNS utilities # Examples: dns.lookup(name, rdtype, ...) dns.query(name, rdtype, ...) dns.srv_rec(data) dns.srv_data('my1.example.com', 389, prio=10, weight=100) dns.srv_name('ldap/tcp', 'example.com') ''' from __future__ import absolute_import, print_function, unicode_literals # Import Python libs import base64 import binascii import hashlib import itertools import logging import random import re import shlex import socket import ssl import string import functools # Import Salt libs import salt.utils.files import salt.utils.network import salt.utils.path import salt.utils.stringutils import salt.modules.cmdmod from salt._compat import ipaddress from salt.utils.odict import OrderedDict # Import 3rd-party libs from salt.ext import six from salt.ext.six.moves import zip # pylint: disable=redefined-builtin # Integrations try: import dns.resolver HAS_DNSPYTHON = True except ImportError: HAS_DNSPYTHON = False try: import tldextract HAS_TLDEXTRACT = True except ImportError: HAS_TLDEXTRACT = False HAS_DIG = salt.utils.path.which('dig') is not None DIG_OPTIONS = '+search +fail +noall +answer +nocl +nottl' HAS_DRILL = salt.utils.path.which('drill') is not None HAS_HOST = salt.utils.path.which('host') is not None HAS_NSLOOKUP = salt.utils.path.which('nslookup') is not None __salt__ = { 'cmd.run_all': salt.modules.cmdmod.run_all } log = logging.getLogger(__name__) class RFC(object): ''' Simple holding class for all RFC/IANA registered lists & standards ''' # https://tools.ietf.org/html/rfc6844#section-3 CAA_TAGS = ( 'issue', 'issuewild', 'iodef' ) # http://www.iana.org/assignments/dns-sshfp-rr-parameters/dns-sshfp-rr-parameters.xhtml SSHFP_ALGO = OrderedDict(( (1, 'rsa'), (2, 'dsa'), (3, 'ecdsa'), (4, 'ed25519'), )) SSHFP_HASH = OrderedDict(( (1, 'sha1'), (2, 'sha256'), )) # http://www.iana.org/assignments/dane-parameters/dane-parameters.xhtml TLSA_USAGE = OrderedDict(( (0, 'pkixta'), (1, 'pkixee'), (2, 'daneta'), (3, 'daneee'), )) TLSA_SELECT = OrderedDict(( (0, 'cert'), (1, 'spki'), )) TLSA_MATCHING = OrderedDict(( (0, 'full'), (1, 'sha256'), (2, 'sha512'), )) SRV_PROTO = ( 'tcp', 'udp', 'sctp' ) @staticmethod def validate(lookup, ref, match=None): if lookup in ref: return lookup elif match == 'in': return [code for code, name in ref.items() if lookup in name][-1] else: # OrderedDicts only!(?) return ref.keys()[ref.values().index(lookup)] def _to_port(port): try: port = int(port) assert 1 <= port <= 65535 return port except (ValueError, AssertionError): raise ValueError('Invalid port {0}'.format(port)) def _tree(domain, tld=False): ''' Split out a domain in its parents Leverages tldextract to take the TLDs from publicsuffix.org or makes a valiant approximation of that :param domain: dc2.ams2.example.com :param tld: Include TLD in list :return: [ 'dc2.ams2.example.com', 'ams2.example.com', 'example.com'] ''' domain = domain.rstrip('.') assert '.' in domain, 'Provide a decent domain' if not tld: if HAS_TLDEXTRACT: tld = tldextract.extract(domain).suffix else: tld = re.search(r'((?:(?:ac|biz|com?|info|edu|gov|mil|name|net|n[oi]m|org)\.)?[^.]+)$', domain).group() log.info('Without tldextract, dns.util resolves the TLD of %s to %s', domain, tld) res = [domain] while True: idx = domain.find('.') if idx < 0: break domain = domain[idx + 1:] if domain == tld: break res.append(domain) return res def _weighted_order(recs): res = [] weights = [rec['weight'] for rec in recs] while weights: rnd = random.random() * sum(weights) for i, w in enumerate(weights): rnd -= w if rnd < 0: res.append(recs.pop(i)['name']) weights.pop(i) break return res def _cast(rec_data, rec_cast): if isinstance(rec_cast, dict): rec_data = type(rec_cast.keys()[0])(rec_data) res = rec_cast[rec_data] return res elif isinstance(rec_cast, (list, tuple)): return RFC.validate(rec_data, rec_cast) else: return rec_cast(rec_data) def _data2rec(schema, rec_data): ''' schema = OrderedDict({ 'prio': int, 'weight': int, 'port': to_port, 'name': str, }) rec_data = '10 20 25 myawesome.nl' res = {'prio': 10, 'weight': 20, 'port': 25 'name': 'myawesome.nl'} ''' try: rec_fields = rec_data.split(' ') # spaces in digest fields are allowed assert len(rec_fields) >= len(schema) if len(rec_fields) > len(schema): cutoff = len(schema) - 1 rec_fields = rec_fields[0:cutoff] + [''.join(rec_fields[cutoff:])] if len(schema) == 1: res = _cast(rec_fields[0], next(iter(schema.values()))) else: res = dict(( (field_name, _cast(rec_field, rec_cast)) for (field_name, rec_cast), rec_field in zip(schema.items(), rec_fields) )) return res except (AssertionError, AttributeError, TypeError, ValueError) as e: raise ValueError('Unable to cast "{0}" as "{2}": {1}'.format( rec_data, e, ' '.join(schema.keys()) )) def _data2rec_group(schema, recs_data, group_key): if not isinstance(recs_data, (list, tuple)): recs_data = [recs_data] res = OrderedDict() try: for rdata in recs_data: rdata = _data2rec(schema, rdata) assert rdata and group_key in rdata idx = rdata.pop(group_key) if idx not in res: res[idx] = [] if len(rdata) == 1: rdata = next(iter(rdata.values())) res[idx].append(rdata) return res except (AssertionError, ValueError) as e: raise ValueError('Unable to cast "{0}" as a group of "{1}": {2}'.format( ','.join(recs_data), ' '.join(schema.keys()), e )) def _rec2data(*rdata): return ' '.join(rdata) def _data_clean(data): data = data.strip(string.whitespace) if data.startswith(('"', '\'')) and data.endswith(('"', '\'')): return data[1:-1] else: return data def _lookup_dig(name, rdtype, timeout=None, servers=None, secure=None): ''' Use dig to lookup addresses :param name: Name of record to search :param rdtype: DNS record type :param timeout: server response timeout :param servers: [] of servers to use :return: [] of records or False if error ''' cmd = 'dig {0} -t {1} '.format(DIG_OPTIONS, rdtype) if servers: cmd += ''.join(['@{0} '.format(srv) for srv in servers]) if timeout is not None: if servers: timeout = int(float(timeout) / len(servers)) else: timeout = int(timeout) cmd += '+time={0} '.format(timeout) if secure: cmd += '+dnssec +adflag ' cmd = __salt__['cmd.run_all']('{0} {1}'.format(cmd, name), python_shell=False, output_loglevel='quiet') if 'ignoring invalid type' in cmd['stderr']: raise ValueError('Invalid DNS type {}'.format(rdtype)) elif cmd['retcode'] != 0: log.warning( 'dig returned (%s): %s', cmd['retcode'], cmd['stderr'].strip(string.whitespace + ';') ) return False elif not cmd['stdout']: return [] validated = False res = [] for line in cmd['stdout'].splitlines(): _, rtype, rdata = line.split(None, 2) if rtype == 'CNAME' and rdtype != 'CNAME': continue elif rtype == 'RRSIG': validated = True continue res.append(_data_clean(rdata)) if res and secure and not validated: return False else: return res def _lookup_drill(name, rdtype, timeout=None, servers=None, secure=None): ''' Use drill to lookup addresses :param name: Name of record to search :param rdtype: DNS record type :param timeout: command return timeout :param servers: [] of servers to use :return: [] of records or False if error ''' cmd = 'drill ' if secure: cmd += '-D -o ad ' cmd += '{0} {1} '.format(rdtype, name) if servers: cmd += ''.join(['@{0} '.format(srv) for srv in servers]) cmd = __salt__['cmd.run_all']( cmd, timeout=timeout, python_shell=False, output_loglevel='quiet') if cmd['retcode'] != 0: log.warning('drill returned (%s): %s', cmd['retcode'], cmd['stderr']) return False lookup_res = iter(cmd['stdout'].splitlines()) validated = False res = [] try: line = '' while 'ANSWER SECTION' not in line: line = next(lookup_res) while True: line = next(lookup_res) line = line.strip() if not line or line.startswith(';;'): break l_type, l_rec = line.split(None, 4)[-2:] if l_type == 'CNAME' and rdtype != 'CNAME': continue elif l_type == 'RRSIG': validated = True continue elif l_type != rdtype: raise ValueError('Invalid DNS type {}'.format(rdtype)) res.append(_data_clean(l_rec)) except StopIteration: pass if res and secure and not validated: return False else: return res def _lookup_gai(name, rdtype, timeout=None): ''' Use Python's socket interface to lookup addresses :param name: Name of record to search :param rdtype: A or AAAA :param timeout: ignored :return: [] of addresses or False if error ''' try: sock_t = { 'A': socket.AF_INET, 'AAAA': socket.AF_INET6 }[rdtype] except KeyError: raise ValueError('Invalid DNS type {} for gai lookup'.format(rdtype)) if timeout: log.info('Ignoring timeout on gai resolver; fix resolv.conf to do that') try: addresses = [sock[4][0] for sock in socket.getaddrinfo(name, None, sock_t, 0, socket.SOCK_RAW)] return addresses except socket.gaierror: return False def _lookup_host(name, rdtype, timeout=None, server=None): ''' Use host to lookup addresses :param name: Name of record to search :param server: Server to query :param rdtype: DNS record type :param timeout: server response wait :return: [] of records or False if error ''' cmd = 'host -t {0} '.format(rdtype) if timeout: cmd += '-W {0} '.format(int(timeout)) cmd += name if server is not None: cmd += ' {0}'.format(server) cmd = __salt__['cmd.run_all'](cmd, python_shell=False, output_loglevel='quiet') if 'invalid type' in cmd['stderr']: raise ValueError('Invalid DNS type {}'.format(rdtype)) elif cmd['retcode'] != 0: log.warning('host returned (%s): %s', cmd['retcode'], cmd['stderr']) return False elif 'has no' in cmd['stdout']: return [] res = [] _stdout = cmd['stdout'] if server is None else cmd['stdout'].split('\n\n')[-1] for line in _stdout.splitlines(): if rdtype != 'CNAME' and 'is an alias' in line: continue line = line.split(' ', 3)[-1] for prefix in ('record', 'address', 'handled by', 'alias for'): if line.startswith(prefix): line = line[len(prefix) + 1:] break res.append(_data_clean(line)) return res def _lookup_dnspython(name, rdtype, timeout=None, servers=None, secure=None): ''' Use dnspython to lookup addresses :param name: Name of record to search :param rdtype: DNS record type :param timeout: query timeout :param server: [] of server(s) to try in order :return: [] of records or False if error ''' resolver = dns.resolver.Resolver() if timeout is not None: resolver.lifetime = float(timeout) if servers: resolver.nameservers = servers if secure: resolver.ednsflags += dns.flags.DO try: res = [_data_clean(rr.to_text()) for rr in resolver.query(name, rdtype, raise_on_no_answer=False)] return res except dns.rdatatype.UnknownRdatatype: raise ValueError('Invalid DNS type {}'.format(rdtype)) except (dns.resolver.NXDOMAIN, dns.resolver.YXDOMAIN, dns.resolver.NoNameservers, dns.exception.Timeout): return False def _lookup_nslookup(name, rdtype, timeout=None, server=None): ''' Use nslookup to lookup addresses :param name: Name of record to search :param rdtype: DNS record type :param timeout: server response timeout :param server: server to query :return: [] of records or False if error ''' cmd = 'nslookup -query={0} {1}'.format(rdtype, name) if timeout is not None: cmd += ' -timeout={0}'.format(int(timeout)) if server is not None: cmd += ' {0}'.format(server) cmd = __salt__['cmd.run_all'](cmd, python_shell=False, output_loglevel='quiet') if cmd['retcode'] != 0: log.warning( 'nslookup returned (%s): %s', cmd['retcode'], cmd['stdout'].splitlines()[-1].strip(string.whitespace + ';') ) return False lookup_res = iter(cmd['stdout'].splitlines()) res = [] try: line = next(lookup_res) if 'unknown query type' in line: raise ValueError('Invalid DNS type {}'.format(rdtype)) while True: if name in line: break line = next(lookup_res) while True: line = line.strip() if not line or line.startswith('*'): break elif rdtype != 'CNAME' and 'canonical name' in line: name = line.split()[-1][:-1] line = next(lookup_res) continue elif rdtype == 'SOA': line = line.split('=') elif line.startswith('Name:'): line = next(lookup_res) line = line.split(':', 1) elif line.startswith(name): if '=' in line: line = line.split('=', 1) else: line = line.split(' ') res.append(_data_clean(line[-1])) line = next(lookup_res) except StopIteration: pass if rdtype == 'SOA': return [' '.join(res[1:])] else: return res def lookup( name, rdtype, method=None, servers=None, timeout=None, walk=False, walk_tld=False, secure=None ): ''' Lookup DNS records and return their data :param name: name to lookup :param rdtype: DNS record type :param method: gai (getaddrinfo()), dnspython, dig, drill, host, nslookup or auto (default) :param servers: (list of) server(s) to try in-order :param timeout: query timeout or a valiant approximation of that :param walk: Walk the DNS upwards looking for the record type or name/recordtype if walk='name'. :param walk_tld: Include the final domain in the walk :param secure: return only DNSSEC secured responses :return: [] of record data ''' # opts = __opts__.get('dns', {}) opts = {} method = method or opts.get('method', 'auto') secure = secure or opts.get('secure', None) servers = servers or opts.get('servers', None) timeout = timeout or opts.get('timeout', False) rdtype = rdtype.upper() # pylint: disable=bad-whitespace,multiple-spaces-before-keyword query_methods = ( ('gai', _lookup_gai, not any((rdtype not in ('A', 'AAAA'), servers, secure))), ('dnspython', _lookup_dnspython, HAS_DNSPYTHON), ('dig', _lookup_dig, HAS_DIG), ('drill', _lookup_drill, HAS_DRILL), ('host', _lookup_host, HAS_HOST and not secure), ('nslookup', _lookup_nslookup, HAS_NSLOOKUP and not secure), ) # pylint: enable=bad-whitespace,multiple-spaces-before-keyword try: if method == 'auto': # The first one not to bork on the conditions becomes the function method, resolver = next(((rname, rcb) for rname, rcb, rtest in query_methods if rtest)) else: # The first one not to bork on the conditions becomes the function. And the name must match. resolver = next((rcb for rname, rcb, rtest in query_methods if rname == method and rtest)) except StopIteration: log.error( 'Unable to lookup %s/%s: Resolver method %s invalid, unsupported ' 'or unable to perform query', method, rdtype, name ) return False res_kwargs = { 'rdtype': rdtype, } if servers: if not isinstance(servers, (list, tuple)): servers = [servers] if method in ('dnspython', 'dig', 'drill'): res_kwargs['servers'] = servers else: if timeout: timeout /= len(servers) # Inject a wrapper for multi-server behaviour def _multi_srvr(resolv_func): @functools.wraps(resolv_func) def _wrapper(**res_kwargs): for server in servers: s_res = resolv_func(server=server, **res_kwargs) if s_res: return s_res return _wrapper resolver = _multi_srvr(resolver) if not walk: name = [name] else: idx = 0 if rdtype in ('SRV', 'TLSA'): # The only RRs I know that have 2 name components idx = name.find('.') + 1 idx = name.find('.', idx) + 1 domain = name[idx:] rname = name[0:idx] name = _tree(domain, walk_tld) if walk == 'name': name = [rname + domain for domain in name] if timeout: timeout /= len(name) if secure: res_kwargs['secure'] = secure if timeout: res_kwargs['timeout'] = timeout for rname in name: res = resolver(name=rname, **res_kwargs) if res: return res return res def query( name, rdtype, method=None, servers=None, timeout=None, walk=False, walk_tld=False, secure=None ): ''' Query DNS for information. Where `lookup()` returns record data, `query()` tries to interpret the data and return it's results :param name: name to lookup :param rdtype: DNS record type :param method: gai (getaddrinfo()), pydns, dig, drill, host, nslookup or auto (default) :param servers: (list of) server(s) to try in-order :param timeout: query timeout or a valiant approximation of that :param secure: return only DNSSEC secured response :param walk: Walk the DNS upwards looking for the record type or name/recordtype if walk='name'. :param walk_tld: Include the top-level domain in the walk :return: [] of records ''' rdtype = rdtype.upper() qargs = { 'method': method, 'servers': servers, 'timeout': timeout, 'walk': walk, 'walk_tld': walk_tld, 'secure': secure } if rdtype == 'PTR' and not name.endswith('arpa'): name = ptr_name(name) if rdtype == 'SPF': # 'SPF' has become a regular 'TXT' again qres = [answer for answer in lookup(name, 'TXT', **qargs) if answer.startswith('v=spf')] if not qres: qres = lookup(name, rdtype, **qargs) else: qres = lookup(name, rdtype, **qargs) rec_map = { 'A': a_rec, 'AAAA': aaaa_rec, 'CAA': caa_rec, 'MX': mx_rec, 'SOA': soa_rec, 'SPF': spf_rec, 'SRV': srv_rec, 'SSHFP': sshfp_rec, 'TLSA': tlsa_rec, } if not qres or rdtype not in rec_map: return qres elif rdtype in ('A', 'AAAA', 'SSHFP', 'TLSA'): res = [rec_map[rdtype](res) for res in qres] elif rdtype in ('SOA', 'SPF'): res = rec_map[rdtype](qres[0]) else: res = rec_map[rdtype](qres) return res def host(name, ip4=True, ip6=True, **kwargs): ''' Return a list of addresses for name ip6: Return IPv6 addresses ip4: Return IPv4 addresses the rest is passed on to lookup() ''' res = {} if ip6: ip6 = lookup(name, 'AAAA', **kwargs) if ip6: res['ip6'] = ip6 if ip4: ip4 = lookup(name, 'A', **kwargs) if ip4: res['ip4'] = ip4 return res def a_rec(rdata): ''' Validate and parse DNS record data for an A record :param rdata: DNS record data :return: { 'address': ip } ''' rschema = OrderedDict(( ('address', ipaddress.IPv4Address), )) return _data2rec(rschema, rdata) def aaaa_rec(rdata): ''' Validate and parse DNS record data for an AAAA record :param rdata: DNS record data :return: { 'address': ip } ''' rschema = OrderedDict(( ('address', ipaddress.IPv6Address), )) return _data2rec(rschema, rdata) def caa_rec(rdatas): ''' Validate and parse DNS record data for a CAA record :param rdata: DNS record data :return: dict w/fields ''' rschema = OrderedDict(( ('flags', lambda flag: ['critical'] if int(flag) > 0 else []), ('tag', RFC.CAA_TAGS), ('value', lambda val: val.strip('\',"')) )) res = _data2rec_group(rschema, rdatas, 'tag') for tag in ('issue', 'issuewild'): tag_res = res.get(tag, False) if not tag_res: continue for idx, val in enumerate(tag_res): if ';' not in val: continue val, params = val.split(';', 1) params = dict(param.split('=') for param in shlex.split(params)) tag_res[idx] = {val: params} return res def mx_data(target, preference=10): ''' Generate MX record data :param target: server :param preference: preference number :return: DNS record data ''' return _rec2data(int(preference), target) def mx_rec(rdatas): ''' Validate and parse DNS record data for MX record(s) :param rdata: DNS record data :return: dict w/fields ''' rschema = OrderedDict(( ('preference', int), ('name', str), )) return _data2rec_group(rschema, rdatas, 'preference') def ptr_name(rdata): ''' Return PTR name of given IP :param rdata: IP address :return: PTR record name ''' try: return ipaddress.ip_address(rdata).reverse_pointer except ValueError: log.error( 'Unable to generate PTR record; %s is not a valid IP address', rdata ) return False def soa_rec(rdata): ''' Validate and parse DNS record data for SOA record(s) :param rdata: DNS record data :return: dict w/fields ''' rschema = OrderedDict(( ('mname', str), ('rname', str), ('serial', int), ('refresh', int), ('retry', int), ('expire', int), ('minimum', int), )) return _data2rec(rschema, rdata) def spf_rec(rdata): ''' Validate and parse DNS record data for SPF record(s) :param rdata: DNS record data :return: dict w/fields ''' spf_fields = rdata.split(' ') if not spf_fields.pop(0).startswith('v=spf'): raise ValueError('Not an SPF record') res = OrderedDict() mods = set() for mech_spec in spf_fields: if mech_spec.startswith(('exp', 'redirect')): # It's a modifier mod, val = mech_spec.split('=', 1) if mod in mods: raise KeyError('Modifier {0} can only appear once'.format(mod)) mods.add(mod) continue # TODO: Should be in something intelligent like an SPF_get # if mod == 'exp': # res[mod] = lookup(val, 'TXT', **qargs) # continue # elif mod == 'redirect': # return query(val, 'SPF', **qargs) mech = {} if mech_spec[0] in ('+', '-', '~', '?'): mech['qualifier'] = mech_spec[0] mech_spec = mech_spec[1:] if ':' in mech_spec: mech_spec, val = mech_spec.split(':', 1) elif '/' in mech_spec: idx = mech_spec.find('/') mech_spec = mech_spec[0:idx] val = mech_spec[idx:] else: val = None res[mech_spec] = mech if not val: continue elif mech_spec in ('ip4', 'ip6'): val = ipaddress.ip_interface(val) assert val.version == int(mech_spec[-1]) mech['value'] = val return res def srv_data(target, port, prio=10, weight=10): ''' Generate SRV record data :param target: :param port: :param prio: :param weight: :return: ''' return _rec2data(prio, weight, port, target) def srv_name(svc, proto='tcp', domain=None): ''' Generate SRV record name :param svc: ldap, 389 etc :param proto: tcp, udp, sctp etc. :param domain: name to append :return: ''' proto = RFC.validate(proto, RFC.SRV_PROTO) if isinstance(svc, int) or svc.isdigit(): svc = _to_port(svc) if domain: domain = '.' + domain return '_{0}._{1}{2}'.format(svc, proto, domain) def srv_rec(rdatas): ''' Validate and parse DNS record data for SRV record(s) :param rdata: DNS record data :return: dict w/fields ''' rschema = OrderedDict(( ('prio', int), ('weight', int), ('port', _to_port), ('name', str), )) return _data2rec_group(rschema, rdatas, 'prio') def sshfp_data(key_t, hash_t, pub): ''' Generate an SSHFP record :param key_t: rsa/dsa/ecdsa/ed25519 :param hash_t: sha1/sha256 :param pub: the SSH public key ''' key_t = RFC.validate(key_t, RFC.SSHFP_ALGO, 'in') hash_t = RFC.validate(hash_t, RFC.SSHFP_HASH) hasher = hashlib.new(hash_t) hasher.update( base64.b64decode(pub) ) ssh_fp = hasher.hexdigest() return _rec2data(key_t, hash_t, ssh_fp) def tlsa_data(pub, usage, selector, matching): ''' Generate a TLSA rec :param pub: Pub key in PEM format :param usage: :param selector: :param matching: :return: TLSA data portion ''' usage = RFC.validate(usage, RFC.TLSA_USAGE) selector = RFC.validate(selector, RFC.TLSA_SELECT) matching = RFC.validate(matching, RFC.TLSA_MATCHING) pub = ssl.PEM_cert_to_DER_cert(pub.strip()) if matching == 0: cert_fp = binascii.b2a_hex(pub) else: hasher = hashlib.new(RFC.TLSA_MATCHING[matching]) hasher.update( pub ) cert_fp = hasher.hexdigest() return _rec2data(usage, selector, matching, cert_fp) def tlsa_rec(rdata): ''' Validate and parse DNS record data for TLSA record(s) :param rdata: DNS record data :return: dict w/fields ''' rschema = OrderedDict(( ('usage', RFC.TLSA_USAGE), ('selector', RFC.TLSA_SELECT), ('matching', RFC.TLSA_MATCHING), ('pub', str) )) return _data2rec(rschema, rdata) def service( svc, proto='tcp', domain=None, walk=False, secure=None ): ''' Find an SRV service in a domain or it's parents :param svc: service to find (ldap, 389, etc) :param proto: protocol the service talks (tcp, udp, etc) :param domain: domain to start search in :param walk: walk the parents if domain doesn't provide the service :param secure: only return DNSSEC-validated results :return: [ [ prio1server1, prio1server2 ], [ prio2server1, prio2server2 ], ] (the servers will already be weighted according to the SRV rules) ''' qres = query(srv_name(svc, proto, domain), 'SRV', walk=walk, secure=secure) if not qres: return False res = [] for _, recs in qres.items(): res.append(_weighted_order(recs)) return res def services(services_file='/etc/services'): ''' Parse through system-known services :return: { 'svc': [ { 'port': port 'proto': proto, 'desc': comment }, ], } ''' res = {} with salt.utils.files.fopen(services_file, 'r') as svc_defs: for svc_def in svc_defs.readlines(): svc_def = salt.utils.stringutils.to_unicode(svc_def.strip()) if not svc_def or svc_def.startswith('#'): continue elif '#' in svc_def: svc_def, comment = svc_def.split('#', 1) comment = comment.strip() else: comment = None svc_def = svc_def.split() port, proto = svc_def.pop(1).split('/') port = int(port) for name in svc_def: svc_res = res.get(name, {}) pp_res = svc_res.get(port, False) if not pp_res: svc = { 'port': port, 'proto': proto, } if comment: svc['desc'] = comment svc_res[port] = svc else: curr_proto = pp_res['proto'] if isinstance(curr_proto, (list, tuple)): curr_proto.append(proto) else: pp_res['proto'] = [curr_proto, proto] curr_desc = pp_res.get('desc', False) if comment: if not curr_desc: pp_res['desc'] = comment elif comment != curr_desc: pp_res['desc'] = '{0}, {1}'.format(curr_desc, comment) res[name] = svc_res for svc, data in res.items(): if len(data) == 1: res[svc] = data.values().pop() continue else: res[svc] = list(data.values()) return res def parse_resolv(src='/etc/resolv.conf'): ''' Parse a resolver configuration file (traditionally /etc/resolv.conf) ''' nameservers = [] ip4_nameservers = [] ip6_nameservers = [] search = [] sortlist = [] domain = '' options = [] try: with salt.utils.files.fopen(src) as src_file: # pylint: disable=too-many-nested-blocks for line in src_file: line = salt.utils.stringutils.to_unicode(line).strip().split() try: (directive, arg) = (line[0].lower(), line[1:]) # Drop everything after # or ; (comments) arg = list(itertools.takewhile(lambda x: x[0] not in ('#', ';'), arg)) if directive == 'nameserver': addr = arg[0] try: ip_addr = ipaddress.ip_address(addr) version = ip_addr.version ip_addr = str(ip_addr) if ip_addr not in nameservers: nameservers.append(ip_addr) if version == 4 and ip_addr not in ip4_nameservers: ip4_nameservers.append(ip_addr) elif version == 6 and ip_addr not in ip6_nameservers: ip6_nameservers.append(ip_addr) except ValueError as exc: log.error('%s: %s', src, exc) elif directive == 'domain': domain = arg[0] elif directive == 'search': search = arg elif directive == 'sortlist': # A sortlist is specified by IP address netmask pairs. # The netmask is optional and defaults to the natural # netmask of the net. The IP address and optional # network pairs are separated by slashes. for ip_raw in arg: try: ip_net = ipaddress.ip_network(ip_raw) except ValueError as exc: log.error('%s: %s', src, exc) else: if '/' not in ip_raw: # No netmask has been provided, guess # the "natural" one if ip_net.version == 4: ip_addr = six.text_type(ip_net.network_address) # pylint: disable=protected-access mask = salt.utils.network.natural_ipv4_netmask(ip_addr) ip_net = ipaddress.ip_network( '{0}{1}'.format(ip_addr, mask), strict=False ) if ip_net.version == 6: # TODO pass if ip_net not in sortlist: sortlist.append(ip_net) elif directive == 'options': # Options allows certain internal resolver variables to # be modified. if arg[0] not in options: options.append(arg[0]) except IndexError: continue if domain and search: # The domain and search keywords are mutually exclusive. If more # than one instance of these keywords is present, the last instance # will override. log.debug( '%s: The domain and search keywords are mutually exclusive.', src ) return { 'nameservers': nameservers, 'ip4_nameservers': ip4_nameservers, 'ip6_nameservers': ip6_nameservers, 'sortlist': [ip.with_netmask for ip in sortlist], 'domain': domain, 'search': search, 'options': options } except IOError: return {}
saltstack/salt
salt/utils/dns.py
tlsa_data
python
def tlsa_data(pub, usage, selector, matching): ''' Generate a TLSA rec :param pub: Pub key in PEM format :param usage: :param selector: :param matching: :return: TLSA data portion ''' usage = RFC.validate(usage, RFC.TLSA_USAGE) selector = RFC.validate(selector, RFC.TLSA_SELECT) matching = RFC.validate(matching, RFC.TLSA_MATCHING) pub = ssl.PEM_cert_to_DER_cert(pub.strip()) if matching == 0: cert_fp = binascii.b2a_hex(pub) else: hasher = hashlib.new(RFC.TLSA_MATCHING[matching]) hasher.update( pub ) cert_fp = hasher.hexdigest() return _rec2data(usage, selector, matching, cert_fp)
Generate a TLSA rec :param pub: Pub key in PEM format :param usage: :param selector: :param matching: :return: TLSA data portion
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/dns.py#L992-L1015
[ "def _rec2data(*rdata):\n return ' '.join(rdata)\n", "def validate(lookup, ref, match=None):\n if lookup in ref:\n return lookup\n elif match == 'in':\n return [code for code, name in ref.items() if lookup in name][-1]\n else:\n # OrderedDicts only!(?)\n return ref.keys()[ref.values().index(lookup)]\n" ]
# -*- coding: utf-8 -*- ''' Compendium of generic DNS utilities # Examples: dns.lookup(name, rdtype, ...) dns.query(name, rdtype, ...) dns.srv_rec(data) dns.srv_data('my1.example.com', 389, prio=10, weight=100) dns.srv_name('ldap/tcp', 'example.com') ''' from __future__ import absolute_import, print_function, unicode_literals # Import Python libs import base64 import binascii import hashlib import itertools import logging import random import re import shlex import socket import ssl import string import functools # Import Salt libs import salt.utils.files import salt.utils.network import salt.utils.path import salt.utils.stringutils import salt.modules.cmdmod from salt._compat import ipaddress from salt.utils.odict import OrderedDict # Import 3rd-party libs from salt.ext import six from salt.ext.six.moves import zip # pylint: disable=redefined-builtin # Integrations try: import dns.resolver HAS_DNSPYTHON = True except ImportError: HAS_DNSPYTHON = False try: import tldextract HAS_TLDEXTRACT = True except ImportError: HAS_TLDEXTRACT = False HAS_DIG = salt.utils.path.which('dig') is not None DIG_OPTIONS = '+search +fail +noall +answer +nocl +nottl' HAS_DRILL = salt.utils.path.which('drill') is not None HAS_HOST = salt.utils.path.which('host') is not None HAS_NSLOOKUP = salt.utils.path.which('nslookup') is not None __salt__ = { 'cmd.run_all': salt.modules.cmdmod.run_all } log = logging.getLogger(__name__) class RFC(object): ''' Simple holding class for all RFC/IANA registered lists & standards ''' # https://tools.ietf.org/html/rfc6844#section-3 CAA_TAGS = ( 'issue', 'issuewild', 'iodef' ) # http://www.iana.org/assignments/dns-sshfp-rr-parameters/dns-sshfp-rr-parameters.xhtml SSHFP_ALGO = OrderedDict(( (1, 'rsa'), (2, 'dsa'), (3, 'ecdsa'), (4, 'ed25519'), )) SSHFP_HASH = OrderedDict(( (1, 'sha1'), (2, 'sha256'), )) # http://www.iana.org/assignments/dane-parameters/dane-parameters.xhtml TLSA_USAGE = OrderedDict(( (0, 'pkixta'), (1, 'pkixee'), (2, 'daneta'), (3, 'daneee'), )) TLSA_SELECT = OrderedDict(( (0, 'cert'), (1, 'spki'), )) TLSA_MATCHING = OrderedDict(( (0, 'full'), (1, 'sha256'), (2, 'sha512'), )) SRV_PROTO = ( 'tcp', 'udp', 'sctp' ) @staticmethod def validate(lookup, ref, match=None): if lookup in ref: return lookup elif match == 'in': return [code for code, name in ref.items() if lookup in name][-1] else: # OrderedDicts only!(?) return ref.keys()[ref.values().index(lookup)] def _to_port(port): try: port = int(port) assert 1 <= port <= 65535 return port except (ValueError, AssertionError): raise ValueError('Invalid port {0}'.format(port)) def _tree(domain, tld=False): ''' Split out a domain in its parents Leverages tldextract to take the TLDs from publicsuffix.org or makes a valiant approximation of that :param domain: dc2.ams2.example.com :param tld: Include TLD in list :return: [ 'dc2.ams2.example.com', 'ams2.example.com', 'example.com'] ''' domain = domain.rstrip('.') assert '.' in domain, 'Provide a decent domain' if not tld: if HAS_TLDEXTRACT: tld = tldextract.extract(domain).suffix else: tld = re.search(r'((?:(?:ac|biz|com?|info|edu|gov|mil|name|net|n[oi]m|org)\.)?[^.]+)$', domain).group() log.info('Without tldextract, dns.util resolves the TLD of %s to %s', domain, tld) res = [domain] while True: idx = domain.find('.') if idx < 0: break domain = domain[idx + 1:] if domain == tld: break res.append(domain) return res def _weighted_order(recs): res = [] weights = [rec['weight'] for rec in recs] while weights: rnd = random.random() * sum(weights) for i, w in enumerate(weights): rnd -= w if rnd < 0: res.append(recs.pop(i)['name']) weights.pop(i) break return res def _cast(rec_data, rec_cast): if isinstance(rec_cast, dict): rec_data = type(rec_cast.keys()[0])(rec_data) res = rec_cast[rec_data] return res elif isinstance(rec_cast, (list, tuple)): return RFC.validate(rec_data, rec_cast) else: return rec_cast(rec_data) def _data2rec(schema, rec_data): ''' schema = OrderedDict({ 'prio': int, 'weight': int, 'port': to_port, 'name': str, }) rec_data = '10 20 25 myawesome.nl' res = {'prio': 10, 'weight': 20, 'port': 25 'name': 'myawesome.nl'} ''' try: rec_fields = rec_data.split(' ') # spaces in digest fields are allowed assert len(rec_fields) >= len(schema) if len(rec_fields) > len(schema): cutoff = len(schema) - 1 rec_fields = rec_fields[0:cutoff] + [''.join(rec_fields[cutoff:])] if len(schema) == 1: res = _cast(rec_fields[0], next(iter(schema.values()))) else: res = dict(( (field_name, _cast(rec_field, rec_cast)) for (field_name, rec_cast), rec_field in zip(schema.items(), rec_fields) )) return res except (AssertionError, AttributeError, TypeError, ValueError) as e: raise ValueError('Unable to cast "{0}" as "{2}": {1}'.format( rec_data, e, ' '.join(schema.keys()) )) def _data2rec_group(schema, recs_data, group_key): if not isinstance(recs_data, (list, tuple)): recs_data = [recs_data] res = OrderedDict() try: for rdata in recs_data: rdata = _data2rec(schema, rdata) assert rdata and group_key in rdata idx = rdata.pop(group_key) if idx not in res: res[idx] = [] if len(rdata) == 1: rdata = next(iter(rdata.values())) res[idx].append(rdata) return res except (AssertionError, ValueError) as e: raise ValueError('Unable to cast "{0}" as a group of "{1}": {2}'.format( ','.join(recs_data), ' '.join(schema.keys()), e )) def _rec2data(*rdata): return ' '.join(rdata) def _data_clean(data): data = data.strip(string.whitespace) if data.startswith(('"', '\'')) and data.endswith(('"', '\'')): return data[1:-1] else: return data def _lookup_dig(name, rdtype, timeout=None, servers=None, secure=None): ''' Use dig to lookup addresses :param name: Name of record to search :param rdtype: DNS record type :param timeout: server response timeout :param servers: [] of servers to use :return: [] of records or False if error ''' cmd = 'dig {0} -t {1} '.format(DIG_OPTIONS, rdtype) if servers: cmd += ''.join(['@{0} '.format(srv) for srv in servers]) if timeout is not None: if servers: timeout = int(float(timeout) / len(servers)) else: timeout = int(timeout) cmd += '+time={0} '.format(timeout) if secure: cmd += '+dnssec +adflag ' cmd = __salt__['cmd.run_all']('{0} {1}'.format(cmd, name), python_shell=False, output_loglevel='quiet') if 'ignoring invalid type' in cmd['stderr']: raise ValueError('Invalid DNS type {}'.format(rdtype)) elif cmd['retcode'] != 0: log.warning( 'dig returned (%s): %s', cmd['retcode'], cmd['stderr'].strip(string.whitespace + ';') ) return False elif not cmd['stdout']: return [] validated = False res = [] for line in cmd['stdout'].splitlines(): _, rtype, rdata = line.split(None, 2) if rtype == 'CNAME' and rdtype != 'CNAME': continue elif rtype == 'RRSIG': validated = True continue res.append(_data_clean(rdata)) if res and secure and not validated: return False else: return res def _lookup_drill(name, rdtype, timeout=None, servers=None, secure=None): ''' Use drill to lookup addresses :param name: Name of record to search :param rdtype: DNS record type :param timeout: command return timeout :param servers: [] of servers to use :return: [] of records or False if error ''' cmd = 'drill ' if secure: cmd += '-D -o ad ' cmd += '{0} {1} '.format(rdtype, name) if servers: cmd += ''.join(['@{0} '.format(srv) for srv in servers]) cmd = __salt__['cmd.run_all']( cmd, timeout=timeout, python_shell=False, output_loglevel='quiet') if cmd['retcode'] != 0: log.warning('drill returned (%s): %s', cmd['retcode'], cmd['stderr']) return False lookup_res = iter(cmd['stdout'].splitlines()) validated = False res = [] try: line = '' while 'ANSWER SECTION' not in line: line = next(lookup_res) while True: line = next(lookup_res) line = line.strip() if not line or line.startswith(';;'): break l_type, l_rec = line.split(None, 4)[-2:] if l_type == 'CNAME' and rdtype != 'CNAME': continue elif l_type == 'RRSIG': validated = True continue elif l_type != rdtype: raise ValueError('Invalid DNS type {}'.format(rdtype)) res.append(_data_clean(l_rec)) except StopIteration: pass if res and secure and not validated: return False else: return res def _lookup_gai(name, rdtype, timeout=None): ''' Use Python's socket interface to lookup addresses :param name: Name of record to search :param rdtype: A or AAAA :param timeout: ignored :return: [] of addresses or False if error ''' try: sock_t = { 'A': socket.AF_INET, 'AAAA': socket.AF_INET6 }[rdtype] except KeyError: raise ValueError('Invalid DNS type {} for gai lookup'.format(rdtype)) if timeout: log.info('Ignoring timeout on gai resolver; fix resolv.conf to do that') try: addresses = [sock[4][0] for sock in socket.getaddrinfo(name, None, sock_t, 0, socket.SOCK_RAW)] return addresses except socket.gaierror: return False def _lookup_host(name, rdtype, timeout=None, server=None): ''' Use host to lookup addresses :param name: Name of record to search :param server: Server to query :param rdtype: DNS record type :param timeout: server response wait :return: [] of records or False if error ''' cmd = 'host -t {0} '.format(rdtype) if timeout: cmd += '-W {0} '.format(int(timeout)) cmd += name if server is not None: cmd += ' {0}'.format(server) cmd = __salt__['cmd.run_all'](cmd, python_shell=False, output_loglevel='quiet') if 'invalid type' in cmd['stderr']: raise ValueError('Invalid DNS type {}'.format(rdtype)) elif cmd['retcode'] != 0: log.warning('host returned (%s): %s', cmd['retcode'], cmd['stderr']) return False elif 'has no' in cmd['stdout']: return [] res = [] _stdout = cmd['stdout'] if server is None else cmd['stdout'].split('\n\n')[-1] for line in _stdout.splitlines(): if rdtype != 'CNAME' and 'is an alias' in line: continue line = line.split(' ', 3)[-1] for prefix in ('record', 'address', 'handled by', 'alias for'): if line.startswith(prefix): line = line[len(prefix) + 1:] break res.append(_data_clean(line)) return res def _lookup_dnspython(name, rdtype, timeout=None, servers=None, secure=None): ''' Use dnspython to lookup addresses :param name: Name of record to search :param rdtype: DNS record type :param timeout: query timeout :param server: [] of server(s) to try in order :return: [] of records or False if error ''' resolver = dns.resolver.Resolver() if timeout is not None: resolver.lifetime = float(timeout) if servers: resolver.nameservers = servers if secure: resolver.ednsflags += dns.flags.DO try: res = [_data_clean(rr.to_text()) for rr in resolver.query(name, rdtype, raise_on_no_answer=False)] return res except dns.rdatatype.UnknownRdatatype: raise ValueError('Invalid DNS type {}'.format(rdtype)) except (dns.resolver.NXDOMAIN, dns.resolver.YXDOMAIN, dns.resolver.NoNameservers, dns.exception.Timeout): return False def _lookup_nslookup(name, rdtype, timeout=None, server=None): ''' Use nslookup to lookup addresses :param name: Name of record to search :param rdtype: DNS record type :param timeout: server response timeout :param server: server to query :return: [] of records or False if error ''' cmd = 'nslookup -query={0} {1}'.format(rdtype, name) if timeout is not None: cmd += ' -timeout={0}'.format(int(timeout)) if server is not None: cmd += ' {0}'.format(server) cmd = __salt__['cmd.run_all'](cmd, python_shell=False, output_loglevel='quiet') if cmd['retcode'] != 0: log.warning( 'nslookup returned (%s): %s', cmd['retcode'], cmd['stdout'].splitlines()[-1].strip(string.whitespace + ';') ) return False lookup_res = iter(cmd['stdout'].splitlines()) res = [] try: line = next(lookup_res) if 'unknown query type' in line: raise ValueError('Invalid DNS type {}'.format(rdtype)) while True: if name in line: break line = next(lookup_res) while True: line = line.strip() if not line or line.startswith('*'): break elif rdtype != 'CNAME' and 'canonical name' in line: name = line.split()[-1][:-1] line = next(lookup_res) continue elif rdtype == 'SOA': line = line.split('=') elif line.startswith('Name:'): line = next(lookup_res) line = line.split(':', 1) elif line.startswith(name): if '=' in line: line = line.split('=', 1) else: line = line.split(' ') res.append(_data_clean(line[-1])) line = next(lookup_res) except StopIteration: pass if rdtype == 'SOA': return [' '.join(res[1:])] else: return res def lookup( name, rdtype, method=None, servers=None, timeout=None, walk=False, walk_tld=False, secure=None ): ''' Lookup DNS records and return their data :param name: name to lookup :param rdtype: DNS record type :param method: gai (getaddrinfo()), dnspython, dig, drill, host, nslookup or auto (default) :param servers: (list of) server(s) to try in-order :param timeout: query timeout or a valiant approximation of that :param walk: Walk the DNS upwards looking for the record type or name/recordtype if walk='name'. :param walk_tld: Include the final domain in the walk :param secure: return only DNSSEC secured responses :return: [] of record data ''' # opts = __opts__.get('dns', {}) opts = {} method = method or opts.get('method', 'auto') secure = secure or opts.get('secure', None) servers = servers or opts.get('servers', None) timeout = timeout or opts.get('timeout', False) rdtype = rdtype.upper() # pylint: disable=bad-whitespace,multiple-spaces-before-keyword query_methods = ( ('gai', _lookup_gai, not any((rdtype not in ('A', 'AAAA'), servers, secure))), ('dnspython', _lookup_dnspython, HAS_DNSPYTHON), ('dig', _lookup_dig, HAS_DIG), ('drill', _lookup_drill, HAS_DRILL), ('host', _lookup_host, HAS_HOST and not secure), ('nslookup', _lookup_nslookup, HAS_NSLOOKUP and not secure), ) # pylint: enable=bad-whitespace,multiple-spaces-before-keyword try: if method == 'auto': # The first one not to bork on the conditions becomes the function method, resolver = next(((rname, rcb) for rname, rcb, rtest in query_methods if rtest)) else: # The first one not to bork on the conditions becomes the function. And the name must match. resolver = next((rcb for rname, rcb, rtest in query_methods if rname == method and rtest)) except StopIteration: log.error( 'Unable to lookup %s/%s: Resolver method %s invalid, unsupported ' 'or unable to perform query', method, rdtype, name ) return False res_kwargs = { 'rdtype': rdtype, } if servers: if not isinstance(servers, (list, tuple)): servers = [servers] if method in ('dnspython', 'dig', 'drill'): res_kwargs['servers'] = servers else: if timeout: timeout /= len(servers) # Inject a wrapper for multi-server behaviour def _multi_srvr(resolv_func): @functools.wraps(resolv_func) def _wrapper(**res_kwargs): for server in servers: s_res = resolv_func(server=server, **res_kwargs) if s_res: return s_res return _wrapper resolver = _multi_srvr(resolver) if not walk: name = [name] else: idx = 0 if rdtype in ('SRV', 'TLSA'): # The only RRs I know that have 2 name components idx = name.find('.') + 1 idx = name.find('.', idx) + 1 domain = name[idx:] rname = name[0:idx] name = _tree(domain, walk_tld) if walk == 'name': name = [rname + domain for domain in name] if timeout: timeout /= len(name) if secure: res_kwargs['secure'] = secure if timeout: res_kwargs['timeout'] = timeout for rname in name: res = resolver(name=rname, **res_kwargs) if res: return res return res def query( name, rdtype, method=None, servers=None, timeout=None, walk=False, walk_tld=False, secure=None ): ''' Query DNS for information. Where `lookup()` returns record data, `query()` tries to interpret the data and return it's results :param name: name to lookup :param rdtype: DNS record type :param method: gai (getaddrinfo()), pydns, dig, drill, host, nslookup or auto (default) :param servers: (list of) server(s) to try in-order :param timeout: query timeout or a valiant approximation of that :param secure: return only DNSSEC secured response :param walk: Walk the DNS upwards looking for the record type or name/recordtype if walk='name'. :param walk_tld: Include the top-level domain in the walk :return: [] of records ''' rdtype = rdtype.upper() qargs = { 'method': method, 'servers': servers, 'timeout': timeout, 'walk': walk, 'walk_tld': walk_tld, 'secure': secure } if rdtype == 'PTR' and not name.endswith('arpa'): name = ptr_name(name) if rdtype == 'SPF': # 'SPF' has become a regular 'TXT' again qres = [answer for answer in lookup(name, 'TXT', **qargs) if answer.startswith('v=spf')] if not qres: qres = lookup(name, rdtype, **qargs) else: qres = lookup(name, rdtype, **qargs) rec_map = { 'A': a_rec, 'AAAA': aaaa_rec, 'CAA': caa_rec, 'MX': mx_rec, 'SOA': soa_rec, 'SPF': spf_rec, 'SRV': srv_rec, 'SSHFP': sshfp_rec, 'TLSA': tlsa_rec, } if not qres or rdtype not in rec_map: return qres elif rdtype in ('A', 'AAAA', 'SSHFP', 'TLSA'): res = [rec_map[rdtype](res) for res in qres] elif rdtype in ('SOA', 'SPF'): res = rec_map[rdtype](qres[0]) else: res = rec_map[rdtype](qres) return res def host(name, ip4=True, ip6=True, **kwargs): ''' Return a list of addresses for name ip6: Return IPv6 addresses ip4: Return IPv4 addresses the rest is passed on to lookup() ''' res = {} if ip6: ip6 = lookup(name, 'AAAA', **kwargs) if ip6: res['ip6'] = ip6 if ip4: ip4 = lookup(name, 'A', **kwargs) if ip4: res['ip4'] = ip4 return res def a_rec(rdata): ''' Validate and parse DNS record data for an A record :param rdata: DNS record data :return: { 'address': ip } ''' rschema = OrderedDict(( ('address', ipaddress.IPv4Address), )) return _data2rec(rschema, rdata) def aaaa_rec(rdata): ''' Validate and parse DNS record data for an AAAA record :param rdata: DNS record data :return: { 'address': ip } ''' rschema = OrderedDict(( ('address', ipaddress.IPv6Address), )) return _data2rec(rschema, rdata) def caa_rec(rdatas): ''' Validate and parse DNS record data for a CAA record :param rdata: DNS record data :return: dict w/fields ''' rschema = OrderedDict(( ('flags', lambda flag: ['critical'] if int(flag) > 0 else []), ('tag', RFC.CAA_TAGS), ('value', lambda val: val.strip('\',"')) )) res = _data2rec_group(rschema, rdatas, 'tag') for tag in ('issue', 'issuewild'): tag_res = res.get(tag, False) if not tag_res: continue for idx, val in enumerate(tag_res): if ';' not in val: continue val, params = val.split(';', 1) params = dict(param.split('=') for param in shlex.split(params)) tag_res[idx] = {val: params} return res def mx_data(target, preference=10): ''' Generate MX record data :param target: server :param preference: preference number :return: DNS record data ''' return _rec2data(int(preference), target) def mx_rec(rdatas): ''' Validate and parse DNS record data for MX record(s) :param rdata: DNS record data :return: dict w/fields ''' rschema = OrderedDict(( ('preference', int), ('name', str), )) return _data2rec_group(rschema, rdatas, 'preference') def ptr_name(rdata): ''' Return PTR name of given IP :param rdata: IP address :return: PTR record name ''' try: return ipaddress.ip_address(rdata).reverse_pointer except ValueError: log.error( 'Unable to generate PTR record; %s is not a valid IP address', rdata ) return False def soa_rec(rdata): ''' Validate and parse DNS record data for SOA record(s) :param rdata: DNS record data :return: dict w/fields ''' rschema = OrderedDict(( ('mname', str), ('rname', str), ('serial', int), ('refresh', int), ('retry', int), ('expire', int), ('minimum', int), )) return _data2rec(rschema, rdata) def spf_rec(rdata): ''' Validate and parse DNS record data for SPF record(s) :param rdata: DNS record data :return: dict w/fields ''' spf_fields = rdata.split(' ') if not spf_fields.pop(0).startswith('v=spf'): raise ValueError('Not an SPF record') res = OrderedDict() mods = set() for mech_spec in spf_fields: if mech_spec.startswith(('exp', 'redirect')): # It's a modifier mod, val = mech_spec.split('=', 1) if mod in mods: raise KeyError('Modifier {0} can only appear once'.format(mod)) mods.add(mod) continue # TODO: Should be in something intelligent like an SPF_get # if mod == 'exp': # res[mod] = lookup(val, 'TXT', **qargs) # continue # elif mod == 'redirect': # return query(val, 'SPF', **qargs) mech = {} if mech_spec[0] in ('+', '-', '~', '?'): mech['qualifier'] = mech_spec[0] mech_spec = mech_spec[1:] if ':' in mech_spec: mech_spec, val = mech_spec.split(':', 1) elif '/' in mech_spec: idx = mech_spec.find('/') mech_spec = mech_spec[0:idx] val = mech_spec[idx:] else: val = None res[mech_spec] = mech if not val: continue elif mech_spec in ('ip4', 'ip6'): val = ipaddress.ip_interface(val) assert val.version == int(mech_spec[-1]) mech['value'] = val return res def srv_data(target, port, prio=10, weight=10): ''' Generate SRV record data :param target: :param port: :param prio: :param weight: :return: ''' return _rec2data(prio, weight, port, target) def srv_name(svc, proto='tcp', domain=None): ''' Generate SRV record name :param svc: ldap, 389 etc :param proto: tcp, udp, sctp etc. :param domain: name to append :return: ''' proto = RFC.validate(proto, RFC.SRV_PROTO) if isinstance(svc, int) or svc.isdigit(): svc = _to_port(svc) if domain: domain = '.' + domain return '_{0}._{1}{2}'.format(svc, proto, domain) def srv_rec(rdatas): ''' Validate and parse DNS record data for SRV record(s) :param rdata: DNS record data :return: dict w/fields ''' rschema = OrderedDict(( ('prio', int), ('weight', int), ('port', _to_port), ('name', str), )) return _data2rec_group(rschema, rdatas, 'prio') def sshfp_data(key_t, hash_t, pub): ''' Generate an SSHFP record :param key_t: rsa/dsa/ecdsa/ed25519 :param hash_t: sha1/sha256 :param pub: the SSH public key ''' key_t = RFC.validate(key_t, RFC.SSHFP_ALGO, 'in') hash_t = RFC.validate(hash_t, RFC.SSHFP_HASH) hasher = hashlib.new(hash_t) hasher.update( base64.b64decode(pub) ) ssh_fp = hasher.hexdigest() return _rec2data(key_t, hash_t, ssh_fp) def sshfp_rec(rdata): ''' Validate and parse DNS record data for TLSA record(s) :param rdata: DNS record data :return: dict w/fields ''' rschema = OrderedDict(( ('algorithm', RFC.SSHFP_ALGO), ('fp_hash', RFC.SSHFP_HASH), ('fingerprint', lambda val: val.lower()) # resolvers are inconsistent on this one )) return _data2rec(rschema, rdata) def tlsa_rec(rdata): ''' Validate and parse DNS record data for TLSA record(s) :param rdata: DNS record data :return: dict w/fields ''' rschema = OrderedDict(( ('usage', RFC.TLSA_USAGE), ('selector', RFC.TLSA_SELECT), ('matching', RFC.TLSA_MATCHING), ('pub', str) )) return _data2rec(rschema, rdata) def service( svc, proto='tcp', domain=None, walk=False, secure=None ): ''' Find an SRV service in a domain or it's parents :param svc: service to find (ldap, 389, etc) :param proto: protocol the service talks (tcp, udp, etc) :param domain: domain to start search in :param walk: walk the parents if domain doesn't provide the service :param secure: only return DNSSEC-validated results :return: [ [ prio1server1, prio1server2 ], [ prio2server1, prio2server2 ], ] (the servers will already be weighted according to the SRV rules) ''' qres = query(srv_name(svc, proto, domain), 'SRV', walk=walk, secure=secure) if not qres: return False res = [] for _, recs in qres.items(): res.append(_weighted_order(recs)) return res def services(services_file='/etc/services'): ''' Parse through system-known services :return: { 'svc': [ { 'port': port 'proto': proto, 'desc': comment }, ], } ''' res = {} with salt.utils.files.fopen(services_file, 'r') as svc_defs: for svc_def in svc_defs.readlines(): svc_def = salt.utils.stringutils.to_unicode(svc_def.strip()) if not svc_def or svc_def.startswith('#'): continue elif '#' in svc_def: svc_def, comment = svc_def.split('#', 1) comment = comment.strip() else: comment = None svc_def = svc_def.split() port, proto = svc_def.pop(1).split('/') port = int(port) for name in svc_def: svc_res = res.get(name, {}) pp_res = svc_res.get(port, False) if not pp_res: svc = { 'port': port, 'proto': proto, } if comment: svc['desc'] = comment svc_res[port] = svc else: curr_proto = pp_res['proto'] if isinstance(curr_proto, (list, tuple)): curr_proto.append(proto) else: pp_res['proto'] = [curr_proto, proto] curr_desc = pp_res.get('desc', False) if comment: if not curr_desc: pp_res['desc'] = comment elif comment != curr_desc: pp_res['desc'] = '{0}, {1}'.format(curr_desc, comment) res[name] = svc_res for svc, data in res.items(): if len(data) == 1: res[svc] = data.values().pop() continue else: res[svc] = list(data.values()) return res def parse_resolv(src='/etc/resolv.conf'): ''' Parse a resolver configuration file (traditionally /etc/resolv.conf) ''' nameservers = [] ip4_nameservers = [] ip6_nameservers = [] search = [] sortlist = [] domain = '' options = [] try: with salt.utils.files.fopen(src) as src_file: # pylint: disable=too-many-nested-blocks for line in src_file: line = salt.utils.stringutils.to_unicode(line).strip().split() try: (directive, arg) = (line[0].lower(), line[1:]) # Drop everything after # or ; (comments) arg = list(itertools.takewhile(lambda x: x[0] not in ('#', ';'), arg)) if directive == 'nameserver': addr = arg[0] try: ip_addr = ipaddress.ip_address(addr) version = ip_addr.version ip_addr = str(ip_addr) if ip_addr not in nameservers: nameservers.append(ip_addr) if version == 4 and ip_addr not in ip4_nameservers: ip4_nameservers.append(ip_addr) elif version == 6 and ip_addr not in ip6_nameservers: ip6_nameservers.append(ip_addr) except ValueError as exc: log.error('%s: %s', src, exc) elif directive == 'domain': domain = arg[0] elif directive == 'search': search = arg elif directive == 'sortlist': # A sortlist is specified by IP address netmask pairs. # The netmask is optional and defaults to the natural # netmask of the net. The IP address and optional # network pairs are separated by slashes. for ip_raw in arg: try: ip_net = ipaddress.ip_network(ip_raw) except ValueError as exc: log.error('%s: %s', src, exc) else: if '/' not in ip_raw: # No netmask has been provided, guess # the "natural" one if ip_net.version == 4: ip_addr = six.text_type(ip_net.network_address) # pylint: disable=protected-access mask = salt.utils.network.natural_ipv4_netmask(ip_addr) ip_net = ipaddress.ip_network( '{0}{1}'.format(ip_addr, mask), strict=False ) if ip_net.version == 6: # TODO pass if ip_net not in sortlist: sortlist.append(ip_net) elif directive == 'options': # Options allows certain internal resolver variables to # be modified. if arg[0] not in options: options.append(arg[0]) except IndexError: continue if domain and search: # The domain and search keywords are mutually exclusive. If more # than one instance of these keywords is present, the last instance # will override. log.debug( '%s: The domain and search keywords are mutually exclusive.', src ) return { 'nameservers': nameservers, 'ip4_nameservers': ip4_nameservers, 'ip6_nameservers': ip6_nameservers, 'sortlist': [ip.with_netmask for ip in sortlist], 'domain': domain, 'search': search, 'options': options } except IOError: return {}
saltstack/salt
salt/utils/dns.py
tlsa_rec
python
def tlsa_rec(rdata): ''' Validate and parse DNS record data for TLSA record(s) :param rdata: DNS record data :return: dict w/fields ''' rschema = OrderedDict(( ('usage', RFC.TLSA_USAGE), ('selector', RFC.TLSA_SELECT), ('matching', RFC.TLSA_MATCHING), ('pub', str) )) return _data2rec(rschema, rdata)
Validate and parse DNS record data for TLSA record(s) :param rdata: DNS record data :return: dict w/fields
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/dns.py#L1018-L1031
[ "def _data2rec(schema, rec_data):\n '''\n schema = OrderedDict({\n 'prio': int,\n 'weight': int,\n 'port': to_port,\n 'name': str,\n })\n rec_data = '10 20 25 myawesome.nl'\n\n res = {'prio': 10, 'weight': 20, 'port': 25 'name': 'myawesome.nl'}\n '''\n try:\n rec_fields = rec_data.split(' ')\n # spaces in digest fields are allowed\n assert len(rec_fields) >= len(schema)\n if len(rec_fields) > len(schema):\n cutoff = len(schema) - 1\n rec_fields = rec_fields[0:cutoff] + [''.join(rec_fields[cutoff:])]\n\n if len(schema) == 1:\n res = _cast(rec_fields[0], next(iter(schema.values())))\n else:\n res = dict((\n (field_name, _cast(rec_field, rec_cast))\n for (field_name, rec_cast), rec_field in zip(schema.items(), rec_fields)\n ))\n return res\n except (AssertionError, AttributeError, TypeError, ValueError) as e:\n raise ValueError('Unable to cast \"{0}\" as \"{2}\": {1}'.format(\n rec_data,\n e,\n ' '.join(schema.keys())\n ))\n" ]
# -*- coding: utf-8 -*- ''' Compendium of generic DNS utilities # Examples: dns.lookup(name, rdtype, ...) dns.query(name, rdtype, ...) dns.srv_rec(data) dns.srv_data('my1.example.com', 389, prio=10, weight=100) dns.srv_name('ldap/tcp', 'example.com') ''' from __future__ import absolute_import, print_function, unicode_literals # Import Python libs import base64 import binascii import hashlib import itertools import logging import random import re import shlex import socket import ssl import string import functools # Import Salt libs import salt.utils.files import salt.utils.network import salt.utils.path import salt.utils.stringutils import salt.modules.cmdmod from salt._compat import ipaddress from salt.utils.odict import OrderedDict # Import 3rd-party libs from salt.ext import six from salt.ext.six.moves import zip # pylint: disable=redefined-builtin # Integrations try: import dns.resolver HAS_DNSPYTHON = True except ImportError: HAS_DNSPYTHON = False try: import tldextract HAS_TLDEXTRACT = True except ImportError: HAS_TLDEXTRACT = False HAS_DIG = salt.utils.path.which('dig') is not None DIG_OPTIONS = '+search +fail +noall +answer +nocl +nottl' HAS_DRILL = salt.utils.path.which('drill') is not None HAS_HOST = salt.utils.path.which('host') is not None HAS_NSLOOKUP = salt.utils.path.which('nslookup') is not None __salt__ = { 'cmd.run_all': salt.modules.cmdmod.run_all } log = logging.getLogger(__name__) class RFC(object): ''' Simple holding class for all RFC/IANA registered lists & standards ''' # https://tools.ietf.org/html/rfc6844#section-3 CAA_TAGS = ( 'issue', 'issuewild', 'iodef' ) # http://www.iana.org/assignments/dns-sshfp-rr-parameters/dns-sshfp-rr-parameters.xhtml SSHFP_ALGO = OrderedDict(( (1, 'rsa'), (2, 'dsa'), (3, 'ecdsa'), (4, 'ed25519'), )) SSHFP_HASH = OrderedDict(( (1, 'sha1'), (2, 'sha256'), )) # http://www.iana.org/assignments/dane-parameters/dane-parameters.xhtml TLSA_USAGE = OrderedDict(( (0, 'pkixta'), (1, 'pkixee'), (2, 'daneta'), (3, 'daneee'), )) TLSA_SELECT = OrderedDict(( (0, 'cert'), (1, 'spki'), )) TLSA_MATCHING = OrderedDict(( (0, 'full'), (1, 'sha256'), (2, 'sha512'), )) SRV_PROTO = ( 'tcp', 'udp', 'sctp' ) @staticmethod def validate(lookup, ref, match=None): if lookup in ref: return lookup elif match == 'in': return [code for code, name in ref.items() if lookup in name][-1] else: # OrderedDicts only!(?) return ref.keys()[ref.values().index(lookup)] def _to_port(port): try: port = int(port) assert 1 <= port <= 65535 return port except (ValueError, AssertionError): raise ValueError('Invalid port {0}'.format(port)) def _tree(domain, tld=False): ''' Split out a domain in its parents Leverages tldextract to take the TLDs from publicsuffix.org or makes a valiant approximation of that :param domain: dc2.ams2.example.com :param tld: Include TLD in list :return: [ 'dc2.ams2.example.com', 'ams2.example.com', 'example.com'] ''' domain = domain.rstrip('.') assert '.' in domain, 'Provide a decent domain' if not tld: if HAS_TLDEXTRACT: tld = tldextract.extract(domain).suffix else: tld = re.search(r'((?:(?:ac|biz|com?|info|edu|gov|mil|name|net|n[oi]m|org)\.)?[^.]+)$', domain).group() log.info('Without tldextract, dns.util resolves the TLD of %s to %s', domain, tld) res = [domain] while True: idx = domain.find('.') if idx < 0: break domain = domain[idx + 1:] if domain == tld: break res.append(domain) return res def _weighted_order(recs): res = [] weights = [rec['weight'] for rec in recs] while weights: rnd = random.random() * sum(weights) for i, w in enumerate(weights): rnd -= w if rnd < 0: res.append(recs.pop(i)['name']) weights.pop(i) break return res def _cast(rec_data, rec_cast): if isinstance(rec_cast, dict): rec_data = type(rec_cast.keys()[0])(rec_data) res = rec_cast[rec_data] return res elif isinstance(rec_cast, (list, tuple)): return RFC.validate(rec_data, rec_cast) else: return rec_cast(rec_data) def _data2rec(schema, rec_data): ''' schema = OrderedDict({ 'prio': int, 'weight': int, 'port': to_port, 'name': str, }) rec_data = '10 20 25 myawesome.nl' res = {'prio': 10, 'weight': 20, 'port': 25 'name': 'myawesome.nl'} ''' try: rec_fields = rec_data.split(' ') # spaces in digest fields are allowed assert len(rec_fields) >= len(schema) if len(rec_fields) > len(schema): cutoff = len(schema) - 1 rec_fields = rec_fields[0:cutoff] + [''.join(rec_fields[cutoff:])] if len(schema) == 1: res = _cast(rec_fields[0], next(iter(schema.values()))) else: res = dict(( (field_name, _cast(rec_field, rec_cast)) for (field_name, rec_cast), rec_field in zip(schema.items(), rec_fields) )) return res except (AssertionError, AttributeError, TypeError, ValueError) as e: raise ValueError('Unable to cast "{0}" as "{2}": {1}'.format( rec_data, e, ' '.join(schema.keys()) )) def _data2rec_group(schema, recs_data, group_key): if not isinstance(recs_data, (list, tuple)): recs_data = [recs_data] res = OrderedDict() try: for rdata in recs_data: rdata = _data2rec(schema, rdata) assert rdata and group_key in rdata idx = rdata.pop(group_key) if idx not in res: res[idx] = [] if len(rdata) == 1: rdata = next(iter(rdata.values())) res[idx].append(rdata) return res except (AssertionError, ValueError) as e: raise ValueError('Unable to cast "{0}" as a group of "{1}": {2}'.format( ','.join(recs_data), ' '.join(schema.keys()), e )) def _rec2data(*rdata): return ' '.join(rdata) def _data_clean(data): data = data.strip(string.whitespace) if data.startswith(('"', '\'')) and data.endswith(('"', '\'')): return data[1:-1] else: return data def _lookup_dig(name, rdtype, timeout=None, servers=None, secure=None): ''' Use dig to lookup addresses :param name: Name of record to search :param rdtype: DNS record type :param timeout: server response timeout :param servers: [] of servers to use :return: [] of records or False if error ''' cmd = 'dig {0} -t {1} '.format(DIG_OPTIONS, rdtype) if servers: cmd += ''.join(['@{0} '.format(srv) for srv in servers]) if timeout is not None: if servers: timeout = int(float(timeout) / len(servers)) else: timeout = int(timeout) cmd += '+time={0} '.format(timeout) if secure: cmd += '+dnssec +adflag ' cmd = __salt__['cmd.run_all']('{0} {1}'.format(cmd, name), python_shell=False, output_loglevel='quiet') if 'ignoring invalid type' in cmd['stderr']: raise ValueError('Invalid DNS type {}'.format(rdtype)) elif cmd['retcode'] != 0: log.warning( 'dig returned (%s): %s', cmd['retcode'], cmd['stderr'].strip(string.whitespace + ';') ) return False elif not cmd['stdout']: return [] validated = False res = [] for line in cmd['stdout'].splitlines(): _, rtype, rdata = line.split(None, 2) if rtype == 'CNAME' and rdtype != 'CNAME': continue elif rtype == 'RRSIG': validated = True continue res.append(_data_clean(rdata)) if res and secure and not validated: return False else: return res def _lookup_drill(name, rdtype, timeout=None, servers=None, secure=None): ''' Use drill to lookup addresses :param name: Name of record to search :param rdtype: DNS record type :param timeout: command return timeout :param servers: [] of servers to use :return: [] of records or False if error ''' cmd = 'drill ' if secure: cmd += '-D -o ad ' cmd += '{0} {1} '.format(rdtype, name) if servers: cmd += ''.join(['@{0} '.format(srv) for srv in servers]) cmd = __salt__['cmd.run_all']( cmd, timeout=timeout, python_shell=False, output_loglevel='quiet') if cmd['retcode'] != 0: log.warning('drill returned (%s): %s', cmd['retcode'], cmd['stderr']) return False lookup_res = iter(cmd['stdout'].splitlines()) validated = False res = [] try: line = '' while 'ANSWER SECTION' not in line: line = next(lookup_res) while True: line = next(lookup_res) line = line.strip() if not line or line.startswith(';;'): break l_type, l_rec = line.split(None, 4)[-2:] if l_type == 'CNAME' and rdtype != 'CNAME': continue elif l_type == 'RRSIG': validated = True continue elif l_type != rdtype: raise ValueError('Invalid DNS type {}'.format(rdtype)) res.append(_data_clean(l_rec)) except StopIteration: pass if res and secure and not validated: return False else: return res def _lookup_gai(name, rdtype, timeout=None): ''' Use Python's socket interface to lookup addresses :param name: Name of record to search :param rdtype: A or AAAA :param timeout: ignored :return: [] of addresses or False if error ''' try: sock_t = { 'A': socket.AF_INET, 'AAAA': socket.AF_INET6 }[rdtype] except KeyError: raise ValueError('Invalid DNS type {} for gai lookup'.format(rdtype)) if timeout: log.info('Ignoring timeout on gai resolver; fix resolv.conf to do that') try: addresses = [sock[4][0] for sock in socket.getaddrinfo(name, None, sock_t, 0, socket.SOCK_RAW)] return addresses except socket.gaierror: return False def _lookup_host(name, rdtype, timeout=None, server=None): ''' Use host to lookup addresses :param name: Name of record to search :param server: Server to query :param rdtype: DNS record type :param timeout: server response wait :return: [] of records or False if error ''' cmd = 'host -t {0} '.format(rdtype) if timeout: cmd += '-W {0} '.format(int(timeout)) cmd += name if server is not None: cmd += ' {0}'.format(server) cmd = __salt__['cmd.run_all'](cmd, python_shell=False, output_loglevel='quiet') if 'invalid type' in cmd['stderr']: raise ValueError('Invalid DNS type {}'.format(rdtype)) elif cmd['retcode'] != 0: log.warning('host returned (%s): %s', cmd['retcode'], cmd['stderr']) return False elif 'has no' in cmd['stdout']: return [] res = [] _stdout = cmd['stdout'] if server is None else cmd['stdout'].split('\n\n')[-1] for line in _stdout.splitlines(): if rdtype != 'CNAME' and 'is an alias' in line: continue line = line.split(' ', 3)[-1] for prefix in ('record', 'address', 'handled by', 'alias for'): if line.startswith(prefix): line = line[len(prefix) + 1:] break res.append(_data_clean(line)) return res def _lookup_dnspython(name, rdtype, timeout=None, servers=None, secure=None): ''' Use dnspython to lookup addresses :param name: Name of record to search :param rdtype: DNS record type :param timeout: query timeout :param server: [] of server(s) to try in order :return: [] of records or False if error ''' resolver = dns.resolver.Resolver() if timeout is not None: resolver.lifetime = float(timeout) if servers: resolver.nameservers = servers if secure: resolver.ednsflags += dns.flags.DO try: res = [_data_clean(rr.to_text()) for rr in resolver.query(name, rdtype, raise_on_no_answer=False)] return res except dns.rdatatype.UnknownRdatatype: raise ValueError('Invalid DNS type {}'.format(rdtype)) except (dns.resolver.NXDOMAIN, dns.resolver.YXDOMAIN, dns.resolver.NoNameservers, dns.exception.Timeout): return False def _lookup_nslookup(name, rdtype, timeout=None, server=None): ''' Use nslookup to lookup addresses :param name: Name of record to search :param rdtype: DNS record type :param timeout: server response timeout :param server: server to query :return: [] of records or False if error ''' cmd = 'nslookup -query={0} {1}'.format(rdtype, name) if timeout is not None: cmd += ' -timeout={0}'.format(int(timeout)) if server is not None: cmd += ' {0}'.format(server) cmd = __salt__['cmd.run_all'](cmd, python_shell=False, output_loglevel='quiet') if cmd['retcode'] != 0: log.warning( 'nslookup returned (%s): %s', cmd['retcode'], cmd['stdout'].splitlines()[-1].strip(string.whitespace + ';') ) return False lookup_res = iter(cmd['stdout'].splitlines()) res = [] try: line = next(lookup_res) if 'unknown query type' in line: raise ValueError('Invalid DNS type {}'.format(rdtype)) while True: if name in line: break line = next(lookup_res) while True: line = line.strip() if not line or line.startswith('*'): break elif rdtype != 'CNAME' and 'canonical name' in line: name = line.split()[-1][:-1] line = next(lookup_res) continue elif rdtype == 'SOA': line = line.split('=') elif line.startswith('Name:'): line = next(lookup_res) line = line.split(':', 1) elif line.startswith(name): if '=' in line: line = line.split('=', 1) else: line = line.split(' ') res.append(_data_clean(line[-1])) line = next(lookup_res) except StopIteration: pass if rdtype == 'SOA': return [' '.join(res[1:])] else: return res def lookup( name, rdtype, method=None, servers=None, timeout=None, walk=False, walk_tld=False, secure=None ): ''' Lookup DNS records and return their data :param name: name to lookup :param rdtype: DNS record type :param method: gai (getaddrinfo()), dnspython, dig, drill, host, nslookup or auto (default) :param servers: (list of) server(s) to try in-order :param timeout: query timeout or a valiant approximation of that :param walk: Walk the DNS upwards looking for the record type or name/recordtype if walk='name'. :param walk_tld: Include the final domain in the walk :param secure: return only DNSSEC secured responses :return: [] of record data ''' # opts = __opts__.get('dns', {}) opts = {} method = method or opts.get('method', 'auto') secure = secure or opts.get('secure', None) servers = servers or opts.get('servers', None) timeout = timeout or opts.get('timeout', False) rdtype = rdtype.upper() # pylint: disable=bad-whitespace,multiple-spaces-before-keyword query_methods = ( ('gai', _lookup_gai, not any((rdtype not in ('A', 'AAAA'), servers, secure))), ('dnspython', _lookup_dnspython, HAS_DNSPYTHON), ('dig', _lookup_dig, HAS_DIG), ('drill', _lookup_drill, HAS_DRILL), ('host', _lookup_host, HAS_HOST and not secure), ('nslookup', _lookup_nslookup, HAS_NSLOOKUP and not secure), ) # pylint: enable=bad-whitespace,multiple-spaces-before-keyword try: if method == 'auto': # The first one not to bork on the conditions becomes the function method, resolver = next(((rname, rcb) for rname, rcb, rtest in query_methods if rtest)) else: # The first one not to bork on the conditions becomes the function. And the name must match. resolver = next((rcb for rname, rcb, rtest in query_methods if rname == method and rtest)) except StopIteration: log.error( 'Unable to lookup %s/%s: Resolver method %s invalid, unsupported ' 'or unable to perform query', method, rdtype, name ) return False res_kwargs = { 'rdtype': rdtype, } if servers: if not isinstance(servers, (list, tuple)): servers = [servers] if method in ('dnspython', 'dig', 'drill'): res_kwargs['servers'] = servers else: if timeout: timeout /= len(servers) # Inject a wrapper for multi-server behaviour def _multi_srvr(resolv_func): @functools.wraps(resolv_func) def _wrapper(**res_kwargs): for server in servers: s_res = resolv_func(server=server, **res_kwargs) if s_res: return s_res return _wrapper resolver = _multi_srvr(resolver) if not walk: name = [name] else: idx = 0 if rdtype in ('SRV', 'TLSA'): # The only RRs I know that have 2 name components idx = name.find('.') + 1 idx = name.find('.', idx) + 1 domain = name[idx:] rname = name[0:idx] name = _tree(domain, walk_tld) if walk == 'name': name = [rname + domain for domain in name] if timeout: timeout /= len(name) if secure: res_kwargs['secure'] = secure if timeout: res_kwargs['timeout'] = timeout for rname in name: res = resolver(name=rname, **res_kwargs) if res: return res return res def query( name, rdtype, method=None, servers=None, timeout=None, walk=False, walk_tld=False, secure=None ): ''' Query DNS for information. Where `lookup()` returns record data, `query()` tries to interpret the data and return it's results :param name: name to lookup :param rdtype: DNS record type :param method: gai (getaddrinfo()), pydns, dig, drill, host, nslookup or auto (default) :param servers: (list of) server(s) to try in-order :param timeout: query timeout or a valiant approximation of that :param secure: return only DNSSEC secured response :param walk: Walk the DNS upwards looking for the record type or name/recordtype if walk='name'. :param walk_tld: Include the top-level domain in the walk :return: [] of records ''' rdtype = rdtype.upper() qargs = { 'method': method, 'servers': servers, 'timeout': timeout, 'walk': walk, 'walk_tld': walk_tld, 'secure': secure } if rdtype == 'PTR' and not name.endswith('arpa'): name = ptr_name(name) if rdtype == 'SPF': # 'SPF' has become a regular 'TXT' again qres = [answer for answer in lookup(name, 'TXT', **qargs) if answer.startswith('v=spf')] if not qres: qres = lookup(name, rdtype, **qargs) else: qres = lookup(name, rdtype, **qargs) rec_map = { 'A': a_rec, 'AAAA': aaaa_rec, 'CAA': caa_rec, 'MX': mx_rec, 'SOA': soa_rec, 'SPF': spf_rec, 'SRV': srv_rec, 'SSHFP': sshfp_rec, 'TLSA': tlsa_rec, } if not qres or rdtype not in rec_map: return qres elif rdtype in ('A', 'AAAA', 'SSHFP', 'TLSA'): res = [rec_map[rdtype](res) for res in qres] elif rdtype in ('SOA', 'SPF'): res = rec_map[rdtype](qres[0]) else: res = rec_map[rdtype](qres) return res def host(name, ip4=True, ip6=True, **kwargs): ''' Return a list of addresses for name ip6: Return IPv6 addresses ip4: Return IPv4 addresses the rest is passed on to lookup() ''' res = {} if ip6: ip6 = lookup(name, 'AAAA', **kwargs) if ip6: res['ip6'] = ip6 if ip4: ip4 = lookup(name, 'A', **kwargs) if ip4: res['ip4'] = ip4 return res def a_rec(rdata): ''' Validate and parse DNS record data for an A record :param rdata: DNS record data :return: { 'address': ip } ''' rschema = OrderedDict(( ('address', ipaddress.IPv4Address), )) return _data2rec(rschema, rdata) def aaaa_rec(rdata): ''' Validate and parse DNS record data for an AAAA record :param rdata: DNS record data :return: { 'address': ip } ''' rschema = OrderedDict(( ('address', ipaddress.IPv6Address), )) return _data2rec(rschema, rdata) def caa_rec(rdatas): ''' Validate and parse DNS record data for a CAA record :param rdata: DNS record data :return: dict w/fields ''' rschema = OrderedDict(( ('flags', lambda flag: ['critical'] if int(flag) > 0 else []), ('tag', RFC.CAA_TAGS), ('value', lambda val: val.strip('\',"')) )) res = _data2rec_group(rschema, rdatas, 'tag') for tag in ('issue', 'issuewild'): tag_res = res.get(tag, False) if not tag_res: continue for idx, val in enumerate(tag_res): if ';' not in val: continue val, params = val.split(';', 1) params = dict(param.split('=') for param in shlex.split(params)) tag_res[idx] = {val: params} return res def mx_data(target, preference=10): ''' Generate MX record data :param target: server :param preference: preference number :return: DNS record data ''' return _rec2data(int(preference), target) def mx_rec(rdatas): ''' Validate and parse DNS record data for MX record(s) :param rdata: DNS record data :return: dict w/fields ''' rschema = OrderedDict(( ('preference', int), ('name', str), )) return _data2rec_group(rschema, rdatas, 'preference') def ptr_name(rdata): ''' Return PTR name of given IP :param rdata: IP address :return: PTR record name ''' try: return ipaddress.ip_address(rdata).reverse_pointer except ValueError: log.error( 'Unable to generate PTR record; %s is not a valid IP address', rdata ) return False def soa_rec(rdata): ''' Validate and parse DNS record data for SOA record(s) :param rdata: DNS record data :return: dict w/fields ''' rschema = OrderedDict(( ('mname', str), ('rname', str), ('serial', int), ('refresh', int), ('retry', int), ('expire', int), ('minimum', int), )) return _data2rec(rschema, rdata) def spf_rec(rdata): ''' Validate and parse DNS record data for SPF record(s) :param rdata: DNS record data :return: dict w/fields ''' spf_fields = rdata.split(' ') if not spf_fields.pop(0).startswith('v=spf'): raise ValueError('Not an SPF record') res = OrderedDict() mods = set() for mech_spec in spf_fields: if mech_spec.startswith(('exp', 'redirect')): # It's a modifier mod, val = mech_spec.split('=', 1) if mod in mods: raise KeyError('Modifier {0} can only appear once'.format(mod)) mods.add(mod) continue # TODO: Should be in something intelligent like an SPF_get # if mod == 'exp': # res[mod] = lookup(val, 'TXT', **qargs) # continue # elif mod == 'redirect': # return query(val, 'SPF', **qargs) mech = {} if mech_spec[0] in ('+', '-', '~', '?'): mech['qualifier'] = mech_spec[0] mech_spec = mech_spec[1:] if ':' in mech_spec: mech_spec, val = mech_spec.split(':', 1) elif '/' in mech_spec: idx = mech_spec.find('/') mech_spec = mech_spec[0:idx] val = mech_spec[idx:] else: val = None res[mech_spec] = mech if not val: continue elif mech_spec in ('ip4', 'ip6'): val = ipaddress.ip_interface(val) assert val.version == int(mech_spec[-1]) mech['value'] = val return res def srv_data(target, port, prio=10, weight=10): ''' Generate SRV record data :param target: :param port: :param prio: :param weight: :return: ''' return _rec2data(prio, weight, port, target) def srv_name(svc, proto='tcp', domain=None): ''' Generate SRV record name :param svc: ldap, 389 etc :param proto: tcp, udp, sctp etc. :param domain: name to append :return: ''' proto = RFC.validate(proto, RFC.SRV_PROTO) if isinstance(svc, int) or svc.isdigit(): svc = _to_port(svc) if domain: domain = '.' + domain return '_{0}._{1}{2}'.format(svc, proto, domain) def srv_rec(rdatas): ''' Validate and parse DNS record data for SRV record(s) :param rdata: DNS record data :return: dict w/fields ''' rschema = OrderedDict(( ('prio', int), ('weight', int), ('port', _to_port), ('name', str), )) return _data2rec_group(rschema, rdatas, 'prio') def sshfp_data(key_t, hash_t, pub): ''' Generate an SSHFP record :param key_t: rsa/dsa/ecdsa/ed25519 :param hash_t: sha1/sha256 :param pub: the SSH public key ''' key_t = RFC.validate(key_t, RFC.SSHFP_ALGO, 'in') hash_t = RFC.validate(hash_t, RFC.SSHFP_HASH) hasher = hashlib.new(hash_t) hasher.update( base64.b64decode(pub) ) ssh_fp = hasher.hexdigest() return _rec2data(key_t, hash_t, ssh_fp) def sshfp_rec(rdata): ''' Validate and parse DNS record data for TLSA record(s) :param rdata: DNS record data :return: dict w/fields ''' rschema = OrderedDict(( ('algorithm', RFC.SSHFP_ALGO), ('fp_hash', RFC.SSHFP_HASH), ('fingerprint', lambda val: val.lower()) # resolvers are inconsistent on this one )) return _data2rec(rschema, rdata) def tlsa_data(pub, usage, selector, matching): ''' Generate a TLSA rec :param pub: Pub key in PEM format :param usage: :param selector: :param matching: :return: TLSA data portion ''' usage = RFC.validate(usage, RFC.TLSA_USAGE) selector = RFC.validate(selector, RFC.TLSA_SELECT) matching = RFC.validate(matching, RFC.TLSA_MATCHING) pub = ssl.PEM_cert_to_DER_cert(pub.strip()) if matching == 0: cert_fp = binascii.b2a_hex(pub) else: hasher = hashlib.new(RFC.TLSA_MATCHING[matching]) hasher.update( pub ) cert_fp = hasher.hexdigest() return _rec2data(usage, selector, matching, cert_fp) def service( svc, proto='tcp', domain=None, walk=False, secure=None ): ''' Find an SRV service in a domain or it's parents :param svc: service to find (ldap, 389, etc) :param proto: protocol the service talks (tcp, udp, etc) :param domain: domain to start search in :param walk: walk the parents if domain doesn't provide the service :param secure: only return DNSSEC-validated results :return: [ [ prio1server1, prio1server2 ], [ prio2server1, prio2server2 ], ] (the servers will already be weighted according to the SRV rules) ''' qres = query(srv_name(svc, proto, domain), 'SRV', walk=walk, secure=secure) if not qres: return False res = [] for _, recs in qres.items(): res.append(_weighted_order(recs)) return res def services(services_file='/etc/services'): ''' Parse through system-known services :return: { 'svc': [ { 'port': port 'proto': proto, 'desc': comment }, ], } ''' res = {} with salt.utils.files.fopen(services_file, 'r') as svc_defs: for svc_def in svc_defs.readlines(): svc_def = salt.utils.stringutils.to_unicode(svc_def.strip()) if not svc_def or svc_def.startswith('#'): continue elif '#' in svc_def: svc_def, comment = svc_def.split('#', 1) comment = comment.strip() else: comment = None svc_def = svc_def.split() port, proto = svc_def.pop(1).split('/') port = int(port) for name in svc_def: svc_res = res.get(name, {}) pp_res = svc_res.get(port, False) if not pp_res: svc = { 'port': port, 'proto': proto, } if comment: svc['desc'] = comment svc_res[port] = svc else: curr_proto = pp_res['proto'] if isinstance(curr_proto, (list, tuple)): curr_proto.append(proto) else: pp_res['proto'] = [curr_proto, proto] curr_desc = pp_res.get('desc', False) if comment: if not curr_desc: pp_res['desc'] = comment elif comment != curr_desc: pp_res['desc'] = '{0}, {1}'.format(curr_desc, comment) res[name] = svc_res for svc, data in res.items(): if len(data) == 1: res[svc] = data.values().pop() continue else: res[svc] = list(data.values()) return res def parse_resolv(src='/etc/resolv.conf'): ''' Parse a resolver configuration file (traditionally /etc/resolv.conf) ''' nameservers = [] ip4_nameservers = [] ip6_nameservers = [] search = [] sortlist = [] domain = '' options = [] try: with salt.utils.files.fopen(src) as src_file: # pylint: disable=too-many-nested-blocks for line in src_file: line = salt.utils.stringutils.to_unicode(line).strip().split() try: (directive, arg) = (line[0].lower(), line[1:]) # Drop everything after # or ; (comments) arg = list(itertools.takewhile(lambda x: x[0] not in ('#', ';'), arg)) if directive == 'nameserver': addr = arg[0] try: ip_addr = ipaddress.ip_address(addr) version = ip_addr.version ip_addr = str(ip_addr) if ip_addr not in nameservers: nameservers.append(ip_addr) if version == 4 and ip_addr not in ip4_nameservers: ip4_nameservers.append(ip_addr) elif version == 6 and ip_addr not in ip6_nameservers: ip6_nameservers.append(ip_addr) except ValueError as exc: log.error('%s: %s', src, exc) elif directive == 'domain': domain = arg[0] elif directive == 'search': search = arg elif directive == 'sortlist': # A sortlist is specified by IP address netmask pairs. # The netmask is optional and defaults to the natural # netmask of the net. The IP address and optional # network pairs are separated by slashes. for ip_raw in arg: try: ip_net = ipaddress.ip_network(ip_raw) except ValueError as exc: log.error('%s: %s', src, exc) else: if '/' not in ip_raw: # No netmask has been provided, guess # the "natural" one if ip_net.version == 4: ip_addr = six.text_type(ip_net.network_address) # pylint: disable=protected-access mask = salt.utils.network.natural_ipv4_netmask(ip_addr) ip_net = ipaddress.ip_network( '{0}{1}'.format(ip_addr, mask), strict=False ) if ip_net.version == 6: # TODO pass if ip_net not in sortlist: sortlist.append(ip_net) elif directive == 'options': # Options allows certain internal resolver variables to # be modified. if arg[0] not in options: options.append(arg[0]) except IndexError: continue if domain and search: # The domain and search keywords are mutually exclusive. If more # than one instance of these keywords is present, the last instance # will override. log.debug( '%s: The domain and search keywords are mutually exclusive.', src ) return { 'nameservers': nameservers, 'ip4_nameservers': ip4_nameservers, 'ip6_nameservers': ip6_nameservers, 'sortlist': [ip.with_netmask for ip in sortlist], 'domain': domain, 'search': search, 'options': options } except IOError: return {}
saltstack/salt
salt/utils/dns.py
service
python
def service( svc, proto='tcp', domain=None, walk=False, secure=None ): ''' Find an SRV service in a domain or it's parents :param svc: service to find (ldap, 389, etc) :param proto: protocol the service talks (tcp, udp, etc) :param domain: domain to start search in :param walk: walk the parents if domain doesn't provide the service :param secure: only return DNSSEC-validated results :return: [ [ prio1server1, prio1server2 ], [ prio2server1, prio2server2 ], ] (the servers will already be weighted according to the SRV rules) ''' qres = query(srv_name(svc, proto, domain), 'SRV', walk=walk, secure=secure) if not qres: return False res = [] for _, recs in qres.items(): res.append(_weighted_order(recs)) return res
Find an SRV service in a domain or it's parents :param svc: service to find (ldap, 389, etc) :param proto: protocol the service talks (tcp, udp, etc) :param domain: domain to start search in :param walk: walk the parents if domain doesn't provide the service :param secure: only return DNSSEC-validated results :return: [ [ prio1server1, prio1server2 ], [ prio2server1, prio2server2 ], ] (the servers will already be weighted according to the SRV rules)
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/dns.py#L1034-L1061
[ "def query(\n name,\n rdtype,\n method=None,\n servers=None,\n timeout=None,\n walk=False,\n walk_tld=False,\n secure=None\n):\n '''\n Query DNS for information.\n Where `lookup()` returns record data, `query()` tries to interpret the data and return it's results\n\n :param name: name to lookup\n :param rdtype: DNS record type\n :param method: gai (getaddrinfo()), pydns, dig, drill, host, nslookup or auto (default)\n :param servers: (list of) server(s) to try in-order\n :param timeout: query timeout or a valiant approximation of that\n :param secure: return only DNSSEC secured response\n :param walk: Walk the DNS upwards looking for the record type or name/recordtype if walk='name'.\n :param walk_tld: Include the top-level domain in the walk\n :return: [] of records\n '''\n rdtype = rdtype.upper()\n qargs = {\n 'method': method,\n 'servers': servers,\n 'timeout': timeout,\n 'walk': walk,\n 'walk_tld': walk_tld,\n 'secure': secure\n }\n\n if rdtype == 'PTR' and not name.endswith('arpa'):\n name = ptr_name(name)\n\n if rdtype == 'SPF':\n # 'SPF' has become a regular 'TXT' again\n qres = [answer for answer in lookup(name, 'TXT', **qargs) if answer.startswith('v=spf')]\n if not qres:\n qres = lookup(name, rdtype, **qargs)\n else:\n qres = lookup(name, rdtype, **qargs)\n\n rec_map = {\n 'A': a_rec,\n 'AAAA': aaaa_rec,\n 'CAA': caa_rec,\n 'MX': mx_rec,\n 'SOA': soa_rec,\n 'SPF': spf_rec,\n 'SRV': srv_rec,\n 'SSHFP': sshfp_rec,\n 'TLSA': tlsa_rec,\n }\n\n if not qres or rdtype not in rec_map:\n return qres\n elif rdtype in ('A', 'AAAA', 'SSHFP', 'TLSA'):\n res = [rec_map[rdtype](res) for res in qres]\n elif rdtype in ('SOA', 'SPF'):\n res = rec_map[rdtype](qres[0])\n else:\n res = rec_map[rdtype](qres)\n\n return res\n", "def srv_name(svc, proto='tcp', domain=None):\n '''\n Generate SRV record name\n :param svc: ldap, 389 etc\n :param proto: tcp, udp, sctp etc.\n :param domain: name to append\n :return:\n '''\n proto = RFC.validate(proto, RFC.SRV_PROTO)\n if isinstance(svc, int) or svc.isdigit():\n svc = _to_port(svc)\n\n if domain:\n domain = '.' + domain\n return '_{0}._{1}{2}'.format(svc, proto, domain)\n", "def _weighted_order(recs):\n res = []\n weights = [rec['weight'] for rec in recs]\n while weights:\n rnd = random.random() * sum(weights)\n for i, w in enumerate(weights):\n rnd -= w\n if rnd < 0:\n res.append(recs.pop(i)['name'])\n weights.pop(i)\n break\n\n return res\n" ]
# -*- coding: utf-8 -*- ''' Compendium of generic DNS utilities # Examples: dns.lookup(name, rdtype, ...) dns.query(name, rdtype, ...) dns.srv_rec(data) dns.srv_data('my1.example.com', 389, prio=10, weight=100) dns.srv_name('ldap/tcp', 'example.com') ''' from __future__ import absolute_import, print_function, unicode_literals # Import Python libs import base64 import binascii import hashlib import itertools import logging import random import re import shlex import socket import ssl import string import functools # Import Salt libs import salt.utils.files import salt.utils.network import salt.utils.path import salt.utils.stringutils import salt.modules.cmdmod from salt._compat import ipaddress from salt.utils.odict import OrderedDict # Import 3rd-party libs from salt.ext import six from salt.ext.six.moves import zip # pylint: disable=redefined-builtin # Integrations try: import dns.resolver HAS_DNSPYTHON = True except ImportError: HAS_DNSPYTHON = False try: import tldextract HAS_TLDEXTRACT = True except ImportError: HAS_TLDEXTRACT = False HAS_DIG = salt.utils.path.which('dig') is not None DIG_OPTIONS = '+search +fail +noall +answer +nocl +nottl' HAS_DRILL = salt.utils.path.which('drill') is not None HAS_HOST = salt.utils.path.which('host') is not None HAS_NSLOOKUP = salt.utils.path.which('nslookup') is not None __salt__ = { 'cmd.run_all': salt.modules.cmdmod.run_all } log = logging.getLogger(__name__) class RFC(object): ''' Simple holding class for all RFC/IANA registered lists & standards ''' # https://tools.ietf.org/html/rfc6844#section-3 CAA_TAGS = ( 'issue', 'issuewild', 'iodef' ) # http://www.iana.org/assignments/dns-sshfp-rr-parameters/dns-sshfp-rr-parameters.xhtml SSHFP_ALGO = OrderedDict(( (1, 'rsa'), (2, 'dsa'), (3, 'ecdsa'), (4, 'ed25519'), )) SSHFP_HASH = OrderedDict(( (1, 'sha1'), (2, 'sha256'), )) # http://www.iana.org/assignments/dane-parameters/dane-parameters.xhtml TLSA_USAGE = OrderedDict(( (0, 'pkixta'), (1, 'pkixee'), (2, 'daneta'), (3, 'daneee'), )) TLSA_SELECT = OrderedDict(( (0, 'cert'), (1, 'spki'), )) TLSA_MATCHING = OrderedDict(( (0, 'full'), (1, 'sha256'), (2, 'sha512'), )) SRV_PROTO = ( 'tcp', 'udp', 'sctp' ) @staticmethod def validate(lookup, ref, match=None): if lookup in ref: return lookup elif match == 'in': return [code for code, name in ref.items() if lookup in name][-1] else: # OrderedDicts only!(?) return ref.keys()[ref.values().index(lookup)] def _to_port(port): try: port = int(port) assert 1 <= port <= 65535 return port except (ValueError, AssertionError): raise ValueError('Invalid port {0}'.format(port)) def _tree(domain, tld=False): ''' Split out a domain in its parents Leverages tldextract to take the TLDs from publicsuffix.org or makes a valiant approximation of that :param domain: dc2.ams2.example.com :param tld: Include TLD in list :return: [ 'dc2.ams2.example.com', 'ams2.example.com', 'example.com'] ''' domain = domain.rstrip('.') assert '.' in domain, 'Provide a decent domain' if not tld: if HAS_TLDEXTRACT: tld = tldextract.extract(domain).suffix else: tld = re.search(r'((?:(?:ac|biz|com?|info|edu|gov|mil|name|net|n[oi]m|org)\.)?[^.]+)$', domain).group() log.info('Without tldextract, dns.util resolves the TLD of %s to %s', domain, tld) res = [domain] while True: idx = domain.find('.') if idx < 0: break domain = domain[idx + 1:] if domain == tld: break res.append(domain) return res def _weighted_order(recs): res = [] weights = [rec['weight'] for rec in recs] while weights: rnd = random.random() * sum(weights) for i, w in enumerate(weights): rnd -= w if rnd < 0: res.append(recs.pop(i)['name']) weights.pop(i) break return res def _cast(rec_data, rec_cast): if isinstance(rec_cast, dict): rec_data = type(rec_cast.keys()[0])(rec_data) res = rec_cast[rec_data] return res elif isinstance(rec_cast, (list, tuple)): return RFC.validate(rec_data, rec_cast) else: return rec_cast(rec_data) def _data2rec(schema, rec_data): ''' schema = OrderedDict({ 'prio': int, 'weight': int, 'port': to_port, 'name': str, }) rec_data = '10 20 25 myawesome.nl' res = {'prio': 10, 'weight': 20, 'port': 25 'name': 'myawesome.nl'} ''' try: rec_fields = rec_data.split(' ') # spaces in digest fields are allowed assert len(rec_fields) >= len(schema) if len(rec_fields) > len(schema): cutoff = len(schema) - 1 rec_fields = rec_fields[0:cutoff] + [''.join(rec_fields[cutoff:])] if len(schema) == 1: res = _cast(rec_fields[0], next(iter(schema.values()))) else: res = dict(( (field_name, _cast(rec_field, rec_cast)) for (field_name, rec_cast), rec_field in zip(schema.items(), rec_fields) )) return res except (AssertionError, AttributeError, TypeError, ValueError) as e: raise ValueError('Unable to cast "{0}" as "{2}": {1}'.format( rec_data, e, ' '.join(schema.keys()) )) def _data2rec_group(schema, recs_data, group_key): if not isinstance(recs_data, (list, tuple)): recs_data = [recs_data] res = OrderedDict() try: for rdata in recs_data: rdata = _data2rec(schema, rdata) assert rdata and group_key in rdata idx = rdata.pop(group_key) if idx not in res: res[idx] = [] if len(rdata) == 1: rdata = next(iter(rdata.values())) res[idx].append(rdata) return res except (AssertionError, ValueError) as e: raise ValueError('Unable to cast "{0}" as a group of "{1}": {2}'.format( ','.join(recs_data), ' '.join(schema.keys()), e )) def _rec2data(*rdata): return ' '.join(rdata) def _data_clean(data): data = data.strip(string.whitespace) if data.startswith(('"', '\'')) and data.endswith(('"', '\'')): return data[1:-1] else: return data def _lookup_dig(name, rdtype, timeout=None, servers=None, secure=None): ''' Use dig to lookup addresses :param name: Name of record to search :param rdtype: DNS record type :param timeout: server response timeout :param servers: [] of servers to use :return: [] of records or False if error ''' cmd = 'dig {0} -t {1} '.format(DIG_OPTIONS, rdtype) if servers: cmd += ''.join(['@{0} '.format(srv) for srv in servers]) if timeout is not None: if servers: timeout = int(float(timeout) / len(servers)) else: timeout = int(timeout) cmd += '+time={0} '.format(timeout) if secure: cmd += '+dnssec +adflag ' cmd = __salt__['cmd.run_all']('{0} {1}'.format(cmd, name), python_shell=False, output_loglevel='quiet') if 'ignoring invalid type' in cmd['stderr']: raise ValueError('Invalid DNS type {}'.format(rdtype)) elif cmd['retcode'] != 0: log.warning( 'dig returned (%s): %s', cmd['retcode'], cmd['stderr'].strip(string.whitespace + ';') ) return False elif not cmd['stdout']: return [] validated = False res = [] for line in cmd['stdout'].splitlines(): _, rtype, rdata = line.split(None, 2) if rtype == 'CNAME' and rdtype != 'CNAME': continue elif rtype == 'RRSIG': validated = True continue res.append(_data_clean(rdata)) if res and secure and not validated: return False else: return res def _lookup_drill(name, rdtype, timeout=None, servers=None, secure=None): ''' Use drill to lookup addresses :param name: Name of record to search :param rdtype: DNS record type :param timeout: command return timeout :param servers: [] of servers to use :return: [] of records or False if error ''' cmd = 'drill ' if secure: cmd += '-D -o ad ' cmd += '{0} {1} '.format(rdtype, name) if servers: cmd += ''.join(['@{0} '.format(srv) for srv in servers]) cmd = __salt__['cmd.run_all']( cmd, timeout=timeout, python_shell=False, output_loglevel='quiet') if cmd['retcode'] != 0: log.warning('drill returned (%s): %s', cmd['retcode'], cmd['stderr']) return False lookup_res = iter(cmd['stdout'].splitlines()) validated = False res = [] try: line = '' while 'ANSWER SECTION' not in line: line = next(lookup_res) while True: line = next(lookup_res) line = line.strip() if not line or line.startswith(';;'): break l_type, l_rec = line.split(None, 4)[-2:] if l_type == 'CNAME' and rdtype != 'CNAME': continue elif l_type == 'RRSIG': validated = True continue elif l_type != rdtype: raise ValueError('Invalid DNS type {}'.format(rdtype)) res.append(_data_clean(l_rec)) except StopIteration: pass if res and secure and not validated: return False else: return res def _lookup_gai(name, rdtype, timeout=None): ''' Use Python's socket interface to lookup addresses :param name: Name of record to search :param rdtype: A or AAAA :param timeout: ignored :return: [] of addresses or False if error ''' try: sock_t = { 'A': socket.AF_INET, 'AAAA': socket.AF_INET6 }[rdtype] except KeyError: raise ValueError('Invalid DNS type {} for gai lookup'.format(rdtype)) if timeout: log.info('Ignoring timeout on gai resolver; fix resolv.conf to do that') try: addresses = [sock[4][0] for sock in socket.getaddrinfo(name, None, sock_t, 0, socket.SOCK_RAW)] return addresses except socket.gaierror: return False def _lookup_host(name, rdtype, timeout=None, server=None): ''' Use host to lookup addresses :param name: Name of record to search :param server: Server to query :param rdtype: DNS record type :param timeout: server response wait :return: [] of records or False if error ''' cmd = 'host -t {0} '.format(rdtype) if timeout: cmd += '-W {0} '.format(int(timeout)) cmd += name if server is not None: cmd += ' {0}'.format(server) cmd = __salt__['cmd.run_all'](cmd, python_shell=False, output_loglevel='quiet') if 'invalid type' in cmd['stderr']: raise ValueError('Invalid DNS type {}'.format(rdtype)) elif cmd['retcode'] != 0: log.warning('host returned (%s): %s', cmd['retcode'], cmd['stderr']) return False elif 'has no' in cmd['stdout']: return [] res = [] _stdout = cmd['stdout'] if server is None else cmd['stdout'].split('\n\n')[-1] for line in _stdout.splitlines(): if rdtype != 'CNAME' and 'is an alias' in line: continue line = line.split(' ', 3)[-1] for prefix in ('record', 'address', 'handled by', 'alias for'): if line.startswith(prefix): line = line[len(prefix) + 1:] break res.append(_data_clean(line)) return res def _lookup_dnspython(name, rdtype, timeout=None, servers=None, secure=None): ''' Use dnspython to lookup addresses :param name: Name of record to search :param rdtype: DNS record type :param timeout: query timeout :param server: [] of server(s) to try in order :return: [] of records or False if error ''' resolver = dns.resolver.Resolver() if timeout is not None: resolver.lifetime = float(timeout) if servers: resolver.nameservers = servers if secure: resolver.ednsflags += dns.flags.DO try: res = [_data_clean(rr.to_text()) for rr in resolver.query(name, rdtype, raise_on_no_answer=False)] return res except dns.rdatatype.UnknownRdatatype: raise ValueError('Invalid DNS type {}'.format(rdtype)) except (dns.resolver.NXDOMAIN, dns.resolver.YXDOMAIN, dns.resolver.NoNameservers, dns.exception.Timeout): return False def _lookup_nslookup(name, rdtype, timeout=None, server=None): ''' Use nslookup to lookup addresses :param name: Name of record to search :param rdtype: DNS record type :param timeout: server response timeout :param server: server to query :return: [] of records or False if error ''' cmd = 'nslookup -query={0} {1}'.format(rdtype, name) if timeout is not None: cmd += ' -timeout={0}'.format(int(timeout)) if server is not None: cmd += ' {0}'.format(server) cmd = __salt__['cmd.run_all'](cmd, python_shell=False, output_loglevel='quiet') if cmd['retcode'] != 0: log.warning( 'nslookup returned (%s): %s', cmd['retcode'], cmd['stdout'].splitlines()[-1].strip(string.whitespace + ';') ) return False lookup_res = iter(cmd['stdout'].splitlines()) res = [] try: line = next(lookup_res) if 'unknown query type' in line: raise ValueError('Invalid DNS type {}'.format(rdtype)) while True: if name in line: break line = next(lookup_res) while True: line = line.strip() if not line or line.startswith('*'): break elif rdtype != 'CNAME' and 'canonical name' in line: name = line.split()[-1][:-1] line = next(lookup_res) continue elif rdtype == 'SOA': line = line.split('=') elif line.startswith('Name:'): line = next(lookup_res) line = line.split(':', 1) elif line.startswith(name): if '=' in line: line = line.split('=', 1) else: line = line.split(' ') res.append(_data_clean(line[-1])) line = next(lookup_res) except StopIteration: pass if rdtype == 'SOA': return [' '.join(res[1:])] else: return res def lookup( name, rdtype, method=None, servers=None, timeout=None, walk=False, walk_tld=False, secure=None ): ''' Lookup DNS records and return their data :param name: name to lookup :param rdtype: DNS record type :param method: gai (getaddrinfo()), dnspython, dig, drill, host, nslookup or auto (default) :param servers: (list of) server(s) to try in-order :param timeout: query timeout or a valiant approximation of that :param walk: Walk the DNS upwards looking for the record type or name/recordtype if walk='name'. :param walk_tld: Include the final domain in the walk :param secure: return only DNSSEC secured responses :return: [] of record data ''' # opts = __opts__.get('dns', {}) opts = {} method = method or opts.get('method', 'auto') secure = secure or opts.get('secure', None) servers = servers or opts.get('servers', None) timeout = timeout or opts.get('timeout', False) rdtype = rdtype.upper() # pylint: disable=bad-whitespace,multiple-spaces-before-keyword query_methods = ( ('gai', _lookup_gai, not any((rdtype not in ('A', 'AAAA'), servers, secure))), ('dnspython', _lookup_dnspython, HAS_DNSPYTHON), ('dig', _lookup_dig, HAS_DIG), ('drill', _lookup_drill, HAS_DRILL), ('host', _lookup_host, HAS_HOST and not secure), ('nslookup', _lookup_nslookup, HAS_NSLOOKUP and not secure), ) # pylint: enable=bad-whitespace,multiple-spaces-before-keyword try: if method == 'auto': # The first one not to bork on the conditions becomes the function method, resolver = next(((rname, rcb) for rname, rcb, rtest in query_methods if rtest)) else: # The first one not to bork on the conditions becomes the function. And the name must match. resolver = next((rcb for rname, rcb, rtest in query_methods if rname == method and rtest)) except StopIteration: log.error( 'Unable to lookup %s/%s: Resolver method %s invalid, unsupported ' 'or unable to perform query', method, rdtype, name ) return False res_kwargs = { 'rdtype': rdtype, } if servers: if not isinstance(servers, (list, tuple)): servers = [servers] if method in ('dnspython', 'dig', 'drill'): res_kwargs['servers'] = servers else: if timeout: timeout /= len(servers) # Inject a wrapper for multi-server behaviour def _multi_srvr(resolv_func): @functools.wraps(resolv_func) def _wrapper(**res_kwargs): for server in servers: s_res = resolv_func(server=server, **res_kwargs) if s_res: return s_res return _wrapper resolver = _multi_srvr(resolver) if not walk: name = [name] else: idx = 0 if rdtype in ('SRV', 'TLSA'): # The only RRs I know that have 2 name components idx = name.find('.') + 1 idx = name.find('.', idx) + 1 domain = name[idx:] rname = name[0:idx] name = _tree(domain, walk_tld) if walk == 'name': name = [rname + domain for domain in name] if timeout: timeout /= len(name) if secure: res_kwargs['secure'] = secure if timeout: res_kwargs['timeout'] = timeout for rname in name: res = resolver(name=rname, **res_kwargs) if res: return res return res def query( name, rdtype, method=None, servers=None, timeout=None, walk=False, walk_tld=False, secure=None ): ''' Query DNS for information. Where `lookup()` returns record data, `query()` tries to interpret the data and return it's results :param name: name to lookup :param rdtype: DNS record type :param method: gai (getaddrinfo()), pydns, dig, drill, host, nslookup or auto (default) :param servers: (list of) server(s) to try in-order :param timeout: query timeout or a valiant approximation of that :param secure: return only DNSSEC secured response :param walk: Walk the DNS upwards looking for the record type or name/recordtype if walk='name'. :param walk_tld: Include the top-level domain in the walk :return: [] of records ''' rdtype = rdtype.upper() qargs = { 'method': method, 'servers': servers, 'timeout': timeout, 'walk': walk, 'walk_tld': walk_tld, 'secure': secure } if rdtype == 'PTR' and not name.endswith('arpa'): name = ptr_name(name) if rdtype == 'SPF': # 'SPF' has become a regular 'TXT' again qres = [answer for answer in lookup(name, 'TXT', **qargs) if answer.startswith('v=spf')] if not qres: qres = lookup(name, rdtype, **qargs) else: qres = lookup(name, rdtype, **qargs) rec_map = { 'A': a_rec, 'AAAA': aaaa_rec, 'CAA': caa_rec, 'MX': mx_rec, 'SOA': soa_rec, 'SPF': spf_rec, 'SRV': srv_rec, 'SSHFP': sshfp_rec, 'TLSA': tlsa_rec, } if not qres or rdtype not in rec_map: return qres elif rdtype in ('A', 'AAAA', 'SSHFP', 'TLSA'): res = [rec_map[rdtype](res) for res in qres] elif rdtype in ('SOA', 'SPF'): res = rec_map[rdtype](qres[0]) else: res = rec_map[rdtype](qres) return res def host(name, ip4=True, ip6=True, **kwargs): ''' Return a list of addresses for name ip6: Return IPv6 addresses ip4: Return IPv4 addresses the rest is passed on to lookup() ''' res = {} if ip6: ip6 = lookup(name, 'AAAA', **kwargs) if ip6: res['ip6'] = ip6 if ip4: ip4 = lookup(name, 'A', **kwargs) if ip4: res['ip4'] = ip4 return res def a_rec(rdata): ''' Validate and parse DNS record data for an A record :param rdata: DNS record data :return: { 'address': ip } ''' rschema = OrderedDict(( ('address', ipaddress.IPv4Address), )) return _data2rec(rschema, rdata) def aaaa_rec(rdata): ''' Validate and parse DNS record data for an AAAA record :param rdata: DNS record data :return: { 'address': ip } ''' rschema = OrderedDict(( ('address', ipaddress.IPv6Address), )) return _data2rec(rschema, rdata) def caa_rec(rdatas): ''' Validate and parse DNS record data for a CAA record :param rdata: DNS record data :return: dict w/fields ''' rschema = OrderedDict(( ('flags', lambda flag: ['critical'] if int(flag) > 0 else []), ('tag', RFC.CAA_TAGS), ('value', lambda val: val.strip('\',"')) )) res = _data2rec_group(rschema, rdatas, 'tag') for tag in ('issue', 'issuewild'): tag_res = res.get(tag, False) if not tag_res: continue for idx, val in enumerate(tag_res): if ';' not in val: continue val, params = val.split(';', 1) params = dict(param.split('=') for param in shlex.split(params)) tag_res[idx] = {val: params} return res def mx_data(target, preference=10): ''' Generate MX record data :param target: server :param preference: preference number :return: DNS record data ''' return _rec2data(int(preference), target) def mx_rec(rdatas): ''' Validate and parse DNS record data for MX record(s) :param rdata: DNS record data :return: dict w/fields ''' rschema = OrderedDict(( ('preference', int), ('name', str), )) return _data2rec_group(rschema, rdatas, 'preference') def ptr_name(rdata): ''' Return PTR name of given IP :param rdata: IP address :return: PTR record name ''' try: return ipaddress.ip_address(rdata).reverse_pointer except ValueError: log.error( 'Unable to generate PTR record; %s is not a valid IP address', rdata ) return False def soa_rec(rdata): ''' Validate and parse DNS record data for SOA record(s) :param rdata: DNS record data :return: dict w/fields ''' rschema = OrderedDict(( ('mname', str), ('rname', str), ('serial', int), ('refresh', int), ('retry', int), ('expire', int), ('minimum', int), )) return _data2rec(rschema, rdata) def spf_rec(rdata): ''' Validate and parse DNS record data for SPF record(s) :param rdata: DNS record data :return: dict w/fields ''' spf_fields = rdata.split(' ') if not spf_fields.pop(0).startswith('v=spf'): raise ValueError('Not an SPF record') res = OrderedDict() mods = set() for mech_spec in spf_fields: if mech_spec.startswith(('exp', 'redirect')): # It's a modifier mod, val = mech_spec.split('=', 1) if mod in mods: raise KeyError('Modifier {0} can only appear once'.format(mod)) mods.add(mod) continue # TODO: Should be in something intelligent like an SPF_get # if mod == 'exp': # res[mod] = lookup(val, 'TXT', **qargs) # continue # elif mod == 'redirect': # return query(val, 'SPF', **qargs) mech = {} if mech_spec[0] in ('+', '-', '~', '?'): mech['qualifier'] = mech_spec[0] mech_spec = mech_spec[1:] if ':' in mech_spec: mech_spec, val = mech_spec.split(':', 1) elif '/' in mech_spec: idx = mech_spec.find('/') mech_spec = mech_spec[0:idx] val = mech_spec[idx:] else: val = None res[mech_spec] = mech if not val: continue elif mech_spec in ('ip4', 'ip6'): val = ipaddress.ip_interface(val) assert val.version == int(mech_spec[-1]) mech['value'] = val return res def srv_data(target, port, prio=10, weight=10): ''' Generate SRV record data :param target: :param port: :param prio: :param weight: :return: ''' return _rec2data(prio, weight, port, target) def srv_name(svc, proto='tcp', domain=None): ''' Generate SRV record name :param svc: ldap, 389 etc :param proto: tcp, udp, sctp etc. :param domain: name to append :return: ''' proto = RFC.validate(proto, RFC.SRV_PROTO) if isinstance(svc, int) or svc.isdigit(): svc = _to_port(svc) if domain: domain = '.' + domain return '_{0}._{1}{2}'.format(svc, proto, domain) def srv_rec(rdatas): ''' Validate and parse DNS record data for SRV record(s) :param rdata: DNS record data :return: dict w/fields ''' rschema = OrderedDict(( ('prio', int), ('weight', int), ('port', _to_port), ('name', str), )) return _data2rec_group(rschema, rdatas, 'prio') def sshfp_data(key_t, hash_t, pub): ''' Generate an SSHFP record :param key_t: rsa/dsa/ecdsa/ed25519 :param hash_t: sha1/sha256 :param pub: the SSH public key ''' key_t = RFC.validate(key_t, RFC.SSHFP_ALGO, 'in') hash_t = RFC.validate(hash_t, RFC.SSHFP_HASH) hasher = hashlib.new(hash_t) hasher.update( base64.b64decode(pub) ) ssh_fp = hasher.hexdigest() return _rec2data(key_t, hash_t, ssh_fp) def sshfp_rec(rdata): ''' Validate and parse DNS record data for TLSA record(s) :param rdata: DNS record data :return: dict w/fields ''' rschema = OrderedDict(( ('algorithm', RFC.SSHFP_ALGO), ('fp_hash', RFC.SSHFP_HASH), ('fingerprint', lambda val: val.lower()) # resolvers are inconsistent on this one )) return _data2rec(rschema, rdata) def tlsa_data(pub, usage, selector, matching): ''' Generate a TLSA rec :param pub: Pub key in PEM format :param usage: :param selector: :param matching: :return: TLSA data portion ''' usage = RFC.validate(usage, RFC.TLSA_USAGE) selector = RFC.validate(selector, RFC.TLSA_SELECT) matching = RFC.validate(matching, RFC.TLSA_MATCHING) pub = ssl.PEM_cert_to_DER_cert(pub.strip()) if matching == 0: cert_fp = binascii.b2a_hex(pub) else: hasher = hashlib.new(RFC.TLSA_MATCHING[matching]) hasher.update( pub ) cert_fp = hasher.hexdigest() return _rec2data(usage, selector, matching, cert_fp) def tlsa_rec(rdata): ''' Validate and parse DNS record data for TLSA record(s) :param rdata: DNS record data :return: dict w/fields ''' rschema = OrderedDict(( ('usage', RFC.TLSA_USAGE), ('selector', RFC.TLSA_SELECT), ('matching', RFC.TLSA_MATCHING), ('pub', str) )) return _data2rec(rschema, rdata) def services(services_file='/etc/services'): ''' Parse through system-known services :return: { 'svc': [ { 'port': port 'proto': proto, 'desc': comment }, ], } ''' res = {} with salt.utils.files.fopen(services_file, 'r') as svc_defs: for svc_def in svc_defs.readlines(): svc_def = salt.utils.stringutils.to_unicode(svc_def.strip()) if not svc_def or svc_def.startswith('#'): continue elif '#' in svc_def: svc_def, comment = svc_def.split('#', 1) comment = comment.strip() else: comment = None svc_def = svc_def.split() port, proto = svc_def.pop(1).split('/') port = int(port) for name in svc_def: svc_res = res.get(name, {}) pp_res = svc_res.get(port, False) if not pp_res: svc = { 'port': port, 'proto': proto, } if comment: svc['desc'] = comment svc_res[port] = svc else: curr_proto = pp_res['proto'] if isinstance(curr_proto, (list, tuple)): curr_proto.append(proto) else: pp_res['proto'] = [curr_proto, proto] curr_desc = pp_res.get('desc', False) if comment: if not curr_desc: pp_res['desc'] = comment elif comment != curr_desc: pp_res['desc'] = '{0}, {1}'.format(curr_desc, comment) res[name] = svc_res for svc, data in res.items(): if len(data) == 1: res[svc] = data.values().pop() continue else: res[svc] = list(data.values()) return res def parse_resolv(src='/etc/resolv.conf'): ''' Parse a resolver configuration file (traditionally /etc/resolv.conf) ''' nameservers = [] ip4_nameservers = [] ip6_nameservers = [] search = [] sortlist = [] domain = '' options = [] try: with salt.utils.files.fopen(src) as src_file: # pylint: disable=too-many-nested-blocks for line in src_file: line = salt.utils.stringutils.to_unicode(line).strip().split() try: (directive, arg) = (line[0].lower(), line[1:]) # Drop everything after # or ; (comments) arg = list(itertools.takewhile(lambda x: x[0] not in ('#', ';'), arg)) if directive == 'nameserver': addr = arg[0] try: ip_addr = ipaddress.ip_address(addr) version = ip_addr.version ip_addr = str(ip_addr) if ip_addr not in nameservers: nameservers.append(ip_addr) if version == 4 and ip_addr not in ip4_nameservers: ip4_nameservers.append(ip_addr) elif version == 6 and ip_addr not in ip6_nameservers: ip6_nameservers.append(ip_addr) except ValueError as exc: log.error('%s: %s', src, exc) elif directive == 'domain': domain = arg[0] elif directive == 'search': search = arg elif directive == 'sortlist': # A sortlist is specified by IP address netmask pairs. # The netmask is optional and defaults to the natural # netmask of the net. The IP address and optional # network pairs are separated by slashes. for ip_raw in arg: try: ip_net = ipaddress.ip_network(ip_raw) except ValueError as exc: log.error('%s: %s', src, exc) else: if '/' not in ip_raw: # No netmask has been provided, guess # the "natural" one if ip_net.version == 4: ip_addr = six.text_type(ip_net.network_address) # pylint: disable=protected-access mask = salt.utils.network.natural_ipv4_netmask(ip_addr) ip_net = ipaddress.ip_network( '{0}{1}'.format(ip_addr, mask), strict=False ) if ip_net.version == 6: # TODO pass if ip_net not in sortlist: sortlist.append(ip_net) elif directive == 'options': # Options allows certain internal resolver variables to # be modified. if arg[0] not in options: options.append(arg[0]) except IndexError: continue if domain and search: # The domain and search keywords are mutually exclusive. If more # than one instance of these keywords is present, the last instance # will override. log.debug( '%s: The domain and search keywords are mutually exclusive.', src ) return { 'nameservers': nameservers, 'ip4_nameservers': ip4_nameservers, 'ip6_nameservers': ip6_nameservers, 'sortlist': [ip.with_netmask for ip in sortlist], 'domain': domain, 'search': search, 'options': options } except IOError: return {}
saltstack/salt
salt/utils/dns.py
services
python
def services(services_file='/etc/services'): ''' Parse through system-known services :return: { 'svc': [ { 'port': port 'proto': proto, 'desc': comment }, ], } ''' res = {} with salt.utils.files.fopen(services_file, 'r') as svc_defs: for svc_def in svc_defs.readlines(): svc_def = salt.utils.stringutils.to_unicode(svc_def.strip()) if not svc_def or svc_def.startswith('#'): continue elif '#' in svc_def: svc_def, comment = svc_def.split('#', 1) comment = comment.strip() else: comment = None svc_def = svc_def.split() port, proto = svc_def.pop(1).split('/') port = int(port) for name in svc_def: svc_res = res.get(name, {}) pp_res = svc_res.get(port, False) if not pp_res: svc = { 'port': port, 'proto': proto, } if comment: svc['desc'] = comment svc_res[port] = svc else: curr_proto = pp_res['proto'] if isinstance(curr_proto, (list, tuple)): curr_proto.append(proto) else: pp_res['proto'] = [curr_proto, proto] curr_desc = pp_res.get('desc', False) if comment: if not curr_desc: pp_res['desc'] = comment elif comment != curr_desc: pp_res['desc'] = '{0}, {1}'.format(curr_desc, comment) res[name] = svc_res for svc, data in res.items(): if len(data) == 1: res[svc] = data.values().pop() continue else: res[svc] = list(data.values()) return res
Parse through system-known services :return: { 'svc': [ { 'port': port 'proto': proto, 'desc': comment }, ], }
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/dns.py#L1064-L1125
[ "def fopen(*args, **kwargs):\n '''\n Wrapper around open() built-in to set CLOEXEC on the fd.\n\n This flag specifies that the file descriptor should be closed when an exec\n function is invoked;\n\n When a file descriptor is allocated (as with open or dup), this bit is\n initially cleared on the new file descriptor, meaning that descriptor will\n survive into the new program after exec.\n\n NB! We still have small race condition between open and fcntl.\n '''\n if six.PY3:\n try:\n # Don't permit stdin/stdout/stderr to be opened. The boolean False\n # and True are treated by Python 3's open() as file descriptors 0\n # and 1, respectively.\n if args[0] in (0, 1, 2):\n raise TypeError(\n '{0} is not a permitted file descriptor'.format(args[0])\n )\n except IndexError:\n pass\n binary = None\n # ensure 'binary' mode is always used on Windows in Python 2\n if ((six.PY2 and salt.utils.platform.is_windows() and 'binary' not in kwargs) or\n kwargs.pop('binary', False)):\n if len(args) > 1:\n args = list(args)\n if 'b' not in args[1]:\n args[1] = args[1].replace('t', 'b')\n if 'b' not in args[1]:\n args[1] += 'b'\n elif kwargs.get('mode'):\n if 'b' not in kwargs['mode']:\n kwargs['mode'] = kwargs['mode'].replace('t', 'b')\n if 'b' not in kwargs['mode']:\n kwargs['mode'] += 'b'\n else:\n # the default is to read\n kwargs['mode'] = 'rb'\n elif six.PY3 and 'encoding' not in kwargs:\n # In Python 3, if text mode is used and the encoding\n # is not specified, set the encoding to 'utf-8'.\n binary = False\n if len(args) > 1:\n args = list(args)\n if 'b' in args[1]:\n binary = True\n if kwargs.get('mode', None):\n if 'b' in kwargs['mode']:\n binary = True\n if not binary:\n kwargs['encoding'] = __salt_system_encoding__\n\n if six.PY3 and not binary and not kwargs.get('newline', None):\n kwargs['newline'] = ''\n\n f_handle = open(*args, **kwargs) # pylint: disable=resource-leakage\n\n if is_fcntl_available():\n # modify the file descriptor on systems with fcntl\n # unix and unix-like systems only\n try:\n FD_CLOEXEC = fcntl.FD_CLOEXEC # pylint: disable=C0103\n except AttributeError:\n FD_CLOEXEC = 1 # pylint: disable=C0103\n old_flags = fcntl.fcntl(f_handle.fileno(), fcntl.F_GETFD)\n fcntl.fcntl(f_handle.fileno(), fcntl.F_SETFD, old_flags | FD_CLOEXEC)\n\n return f_handle\n", "def to_unicode(s, encoding=None, errors='strict', normalize=False):\n '''\n Given str or unicode, return unicode (str for python 3)\n '''\n def _normalize(s):\n return unicodedata.normalize('NFC', s) if normalize else s\n\n if encoding is None:\n # Try utf-8 first, and fall back to detected encoding\n encoding = ('utf-8', __salt_system_encoding__)\n if not isinstance(encoding, (tuple, list)):\n encoding = (encoding,)\n\n if not encoding:\n raise ValueError('encoding cannot be empty')\n\n exc = None\n if six.PY3:\n if isinstance(s, str):\n return _normalize(s)\n elif isinstance(s, (bytes, bytearray)):\n return _normalize(to_str(s, encoding, errors))\n raise TypeError('expected str, bytes, or bytearray')\n else:\n # This needs to be str and not six.string_types, since if the string is\n # already a unicode type, it does not need to be decoded (and doing so\n # will raise an exception).\n if isinstance(s, unicode): # pylint: disable=incompatible-py3-code,undefined-variable\n return _normalize(s)\n elif isinstance(s, (str, bytearray)):\n for enc in encoding:\n try:\n return _normalize(s.decode(enc, errors))\n except UnicodeDecodeError as err:\n exc = err\n continue\n # The only way we get this far is if a UnicodeDecodeError was\n # raised, otherwise we would have already returned (or raised some\n # other exception).\n raise exc # pylint: disable=raising-bad-type\n raise TypeError('expected str or bytearray')\n" ]
# -*- coding: utf-8 -*- ''' Compendium of generic DNS utilities # Examples: dns.lookup(name, rdtype, ...) dns.query(name, rdtype, ...) dns.srv_rec(data) dns.srv_data('my1.example.com', 389, prio=10, weight=100) dns.srv_name('ldap/tcp', 'example.com') ''' from __future__ import absolute_import, print_function, unicode_literals # Import Python libs import base64 import binascii import hashlib import itertools import logging import random import re import shlex import socket import ssl import string import functools # Import Salt libs import salt.utils.files import salt.utils.network import salt.utils.path import salt.utils.stringutils import salt.modules.cmdmod from salt._compat import ipaddress from salt.utils.odict import OrderedDict # Import 3rd-party libs from salt.ext import six from salt.ext.six.moves import zip # pylint: disable=redefined-builtin # Integrations try: import dns.resolver HAS_DNSPYTHON = True except ImportError: HAS_DNSPYTHON = False try: import tldextract HAS_TLDEXTRACT = True except ImportError: HAS_TLDEXTRACT = False HAS_DIG = salt.utils.path.which('dig') is not None DIG_OPTIONS = '+search +fail +noall +answer +nocl +nottl' HAS_DRILL = salt.utils.path.which('drill') is not None HAS_HOST = salt.utils.path.which('host') is not None HAS_NSLOOKUP = salt.utils.path.which('nslookup') is not None __salt__ = { 'cmd.run_all': salt.modules.cmdmod.run_all } log = logging.getLogger(__name__) class RFC(object): ''' Simple holding class for all RFC/IANA registered lists & standards ''' # https://tools.ietf.org/html/rfc6844#section-3 CAA_TAGS = ( 'issue', 'issuewild', 'iodef' ) # http://www.iana.org/assignments/dns-sshfp-rr-parameters/dns-sshfp-rr-parameters.xhtml SSHFP_ALGO = OrderedDict(( (1, 'rsa'), (2, 'dsa'), (3, 'ecdsa'), (4, 'ed25519'), )) SSHFP_HASH = OrderedDict(( (1, 'sha1'), (2, 'sha256'), )) # http://www.iana.org/assignments/dane-parameters/dane-parameters.xhtml TLSA_USAGE = OrderedDict(( (0, 'pkixta'), (1, 'pkixee'), (2, 'daneta'), (3, 'daneee'), )) TLSA_SELECT = OrderedDict(( (0, 'cert'), (1, 'spki'), )) TLSA_MATCHING = OrderedDict(( (0, 'full'), (1, 'sha256'), (2, 'sha512'), )) SRV_PROTO = ( 'tcp', 'udp', 'sctp' ) @staticmethod def validate(lookup, ref, match=None): if lookup in ref: return lookup elif match == 'in': return [code for code, name in ref.items() if lookup in name][-1] else: # OrderedDicts only!(?) return ref.keys()[ref.values().index(lookup)] def _to_port(port): try: port = int(port) assert 1 <= port <= 65535 return port except (ValueError, AssertionError): raise ValueError('Invalid port {0}'.format(port)) def _tree(domain, tld=False): ''' Split out a domain in its parents Leverages tldextract to take the TLDs from publicsuffix.org or makes a valiant approximation of that :param domain: dc2.ams2.example.com :param tld: Include TLD in list :return: [ 'dc2.ams2.example.com', 'ams2.example.com', 'example.com'] ''' domain = domain.rstrip('.') assert '.' in domain, 'Provide a decent domain' if not tld: if HAS_TLDEXTRACT: tld = tldextract.extract(domain).suffix else: tld = re.search(r'((?:(?:ac|biz|com?|info|edu|gov|mil|name|net|n[oi]m|org)\.)?[^.]+)$', domain).group() log.info('Without tldextract, dns.util resolves the TLD of %s to %s', domain, tld) res = [domain] while True: idx = domain.find('.') if idx < 0: break domain = domain[idx + 1:] if domain == tld: break res.append(domain) return res def _weighted_order(recs): res = [] weights = [rec['weight'] for rec in recs] while weights: rnd = random.random() * sum(weights) for i, w in enumerate(weights): rnd -= w if rnd < 0: res.append(recs.pop(i)['name']) weights.pop(i) break return res def _cast(rec_data, rec_cast): if isinstance(rec_cast, dict): rec_data = type(rec_cast.keys()[0])(rec_data) res = rec_cast[rec_data] return res elif isinstance(rec_cast, (list, tuple)): return RFC.validate(rec_data, rec_cast) else: return rec_cast(rec_data) def _data2rec(schema, rec_data): ''' schema = OrderedDict({ 'prio': int, 'weight': int, 'port': to_port, 'name': str, }) rec_data = '10 20 25 myawesome.nl' res = {'prio': 10, 'weight': 20, 'port': 25 'name': 'myawesome.nl'} ''' try: rec_fields = rec_data.split(' ') # spaces in digest fields are allowed assert len(rec_fields) >= len(schema) if len(rec_fields) > len(schema): cutoff = len(schema) - 1 rec_fields = rec_fields[0:cutoff] + [''.join(rec_fields[cutoff:])] if len(schema) == 1: res = _cast(rec_fields[0], next(iter(schema.values()))) else: res = dict(( (field_name, _cast(rec_field, rec_cast)) for (field_name, rec_cast), rec_field in zip(schema.items(), rec_fields) )) return res except (AssertionError, AttributeError, TypeError, ValueError) as e: raise ValueError('Unable to cast "{0}" as "{2}": {1}'.format( rec_data, e, ' '.join(schema.keys()) )) def _data2rec_group(schema, recs_data, group_key): if not isinstance(recs_data, (list, tuple)): recs_data = [recs_data] res = OrderedDict() try: for rdata in recs_data: rdata = _data2rec(schema, rdata) assert rdata and group_key in rdata idx = rdata.pop(group_key) if idx not in res: res[idx] = [] if len(rdata) == 1: rdata = next(iter(rdata.values())) res[idx].append(rdata) return res except (AssertionError, ValueError) as e: raise ValueError('Unable to cast "{0}" as a group of "{1}": {2}'.format( ','.join(recs_data), ' '.join(schema.keys()), e )) def _rec2data(*rdata): return ' '.join(rdata) def _data_clean(data): data = data.strip(string.whitespace) if data.startswith(('"', '\'')) and data.endswith(('"', '\'')): return data[1:-1] else: return data def _lookup_dig(name, rdtype, timeout=None, servers=None, secure=None): ''' Use dig to lookup addresses :param name: Name of record to search :param rdtype: DNS record type :param timeout: server response timeout :param servers: [] of servers to use :return: [] of records or False if error ''' cmd = 'dig {0} -t {1} '.format(DIG_OPTIONS, rdtype) if servers: cmd += ''.join(['@{0} '.format(srv) for srv in servers]) if timeout is not None: if servers: timeout = int(float(timeout) / len(servers)) else: timeout = int(timeout) cmd += '+time={0} '.format(timeout) if secure: cmd += '+dnssec +adflag ' cmd = __salt__['cmd.run_all']('{0} {1}'.format(cmd, name), python_shell=False, output_loglevel='quiet') if 'ignoring invalid type' in cmd['stderr']: raise ValueError('Invalid DNS type {}'.format(rdtype)) elif cmd['retcode'] != 0: log.warning( 'dig returned (%s): %s', cmd['retcode'], cmd['stderr'].strip(string.whitespace + ';') ) return False elif not cmd['stdout']: return [] validated = False res = [] for line in cmd['stdout'].splitlines(): _, rtype, rdata = line.split(None, 2) if rtype == 'CNAME' and rdtype != 'CNAME': continue elif rtype == 'RRSIG': validated = True continue res.append(_data_clean(rdata)) if res and secure and not validated: return False else: return res def _lookup_drill(name, rdtype, timeout=None, servers=None, secure=None): ''' Use drill to lookup addresses :param name: Name of record to search :param rdtype: DNS record type :param timeout: command return timeout :param servers: [] of servers to use :return: [] of records or False if error ''' cmd = 'drill ' if secure: cmd += '-D -o ad ' cmd += '{0} {1} '.format(rdtype, name) if servers: cmd += ''.join(['@{0} '.format(srv) for srv in servers]) cmd = __salt__['cmd.run_all']( cmd, timeout=timeout, python_shell=False, output_loglevel='quiet') if cmd['retcode'] != 0: log.warning('drill returned (%s): %s', cmd['retcode'], cmd['stderr']) return False lookup_res = iter(cmd['stdout'].splitlines()) validated = False res = [] try: line = '' while 'ANSWER SECTION' not in line: line = next(lookup_res) while True: line = next(lookup_res) line = line.strip() if not line or line.startswith(';;'): break l_type, l_rec = line.split(None, 4)[-2:] if l_type == 'CNAME' and rdtype != 'CNAME': continue elif l_type == 'RRSIG': validated = True continue elif l_type != rdtype: raise ValueError('Invalid DNS type {}'.format(rdtype)) res.append(_data_clean(l_rec)) except StopIteration: pass if res and secure and not validated: return False else: return res def _lookup_gai(name, rdtype, timeout=None): ''' Use Python's socket interface to lookup addresses :param name: Name of record to search :param rdtype: A or AAAA :param timeout: ignored :return: [] of addresses or False if error ''' try: sock_t = { 'A': socket.AF_INET, 'AAAA': socket.AF_INET6 }[rdtype] except KeyError: raise ValueError('Invalid DNS type {} for gai lookup'.format(rdtype)) if timeout: log.info('Ignoring timeout on gai resolver; fix resolv.conf to do that') try: addresses = [sock[4][0] for sock in socket.getaddrinfo(name, None, sock_t, 0, socket.SOCK_RAW)] return addresses except socket.gaierror: return False def _lookup_host(name, rdtype, timeout=None, server=None): ''' Use host to lookup addresses :param name: Name of record to search :param server: Server to query :param rdtype: DNS record type :param timeout: server response wait :return: [] of records or False if error ''' cmd = 'host -t {0} '.format(rdtype) if timeout: cmd += '-W {0} '.format(int(timeout)) cmd += name if server is not None: cmd += ' {0}'.format(server) cmd = __salt__['cmd.run_all'](cmd, python_shell=False, output_loglevel='quiet') if 'invalid type' in cmd['stderr']: raise ValueError('Invalid DNS type {}'.format(rdtype)) elif cmd['retcode'] != 0: log.warning('host returned (%s): %s', cmd['retcode'], cmd['stderr']) return False elif 'has no' in cmd['stdout']: return [] res = [] _stdout = cmd['stdout'] if server is None else cmd['stdout'].split('\n\n')[-1] for line in _stdout.splitlines(): if rdtype != 'CNAME' and 'is an alias' in line: continue line = line.split(' ', 3)[-1] for prefix in ('record', 'address', 'handled by', 'alias for'): if line.startswith(prefix): line = line[len(prefix) + 1:] break res.append(_data_clean(line)) return res def _lookup_dnspython(name, rdtype, timeout=None, servers=None, secure=None): ''' Use dnspython to lookup addresses :param name: Name of record to search :param rdtype: DNS record type :param timeout: query timeout :param server: [] of server(s) to try in order :return: [] of records or False if error ''' resolver = dns.resolver.Resolver() if timeout is not None: resolver.lifetime = float(timeout) if servers: resolver.nameservers = servers if secure: resolver.ednsflags += dns.flags.DO try: res = [_data_clean(rr.to_text()) for rr in resolver.query(name, rdtype, raise_on_no_answer=False)] return res except dns.rdatatype.UnknownRdatatype: raise ValueError('Invalid DNS type {}'.format(rdtype)) except (dns.resolver.NXDOMAIN, dns.resolver.YXDOMAIN, dns.resolver.NoNameservers, dns.exception.Timeout): return False def _lookup_nslookup(name, rdtype, timeout=None, server=None): ''' Use nslookup to lookup addresses :param name: Name of record to search :param rdtype: DNS record type :param timeout: server response timeout :param server: server to query :return: [] of records or False if error ''' cmd = 'nslookup -query={0} {1}'.format(rdtype, name) if timeout is not None: cmd += ' -timeout={0}'.format(int(timeout)) if server is not None: cmd += ' {0}'.format(server) cmd = __salt__['cmd.run_all'](cmd, python_shell=False, output_loglevel='quiet') if cmd['retcode'] != 0: log.warning( 'nslookup returned (%s): %s', cmd['retcode'], cmd['stdout'].splitlines()[-1].strip(string.whitespace + ';') ) return False lookup_res = iter(cmd['stdout'].splitlines()) res = [] try: line = next(lookup_res) if 'unknown query type' in line: raise ValueError('Invalid DNS type {}'.format(rdtype)) while True: if name in line: break line = next(lookup_res) while True: line = line.strip() if not line or line.startswith('*'): break elif rdtype != 'CNAME' and 'canonical name' in line: name = line.split()[-1][:-1] line = next(lookup_res) continue elif rdtype == 'SOA': line = line.split('=') elif line.startswith('Name:'): line = next(lookup_res) line = line.split(':', 1) elif line.startswith(name): if '=' in line: line = line.split('=', 1) else: line = line.split(' ') res.append(_data_clean(line[-1])) line = next(lookup_res) except StopIteration: pass if rdtype == 'SOA': return [' '.join(res[1:])] else: return res def lookup( name, rdtype, method=None, servers=None, timeout=None, walk=False, walk_tld=False, secure=None ): ''' Lookup DNS records and return their data :param name: name to lookup :param rdtype: DNS record type :param method: gai (getaddrinfo()), dnspython, dig, drill, host, nslookup or auto (default) :param servers: (list of) server(s) to try in-order :param timeout: query timeout or a valiant approximation of that :param walk: Walk the DNS upwards looking for the record type or name/recordtype if walk='name'. :param walk_tld: Include the final domain in the walk :param secure: return only DNSSEC secured responses :return: [] of record data ''' # opts = __opts__.get('dns', {}) opts = {} method = method or opts.get('method', 'auto') secure = secure or opts.get('secure', None) servers = servers or opts.get('servers', None) timeout = timeout or opts.get('timeout', False) rdtype = rdtype.upper() # pylint: disable=bad-whitespace,multiple-spaces-before-keyword query_methods = ( ('gai', _lookup_gai, not any((rdtype not in ('A', 'AAAA'), servers, secure))), ('dnspython', _lookup_dnspython, HAS_DNSPYTHON), ('dig', _lookup_dig, HAS_DIG), ('drill', _lookup_drill, HAS_DRILL), ('host', _lookup_host, HAS_HOST and not secure), ('nslookup', _lookup_nslookup, HAS_NSLOOKUP and not secure), ) # pylint: enable=bad-whitespace,multiple-spaces-before-keyword try: if method == 'auto': # The first one not to bork on the conditions becomes the function method, resolver = next(((rname, rcb) for rname, rcb, rtest in query_methods if rtest)) else: # The first one not to bork on the conditions becomes the function. And the name must match. resolver = next((rcb for rname, rcb, rtest in query_methods if rname == method and rtest)) except StopIteration: log.error( 'Unable to lookup %s/%s: Resolver method %s invalid, unsupported ' 'or unable to perform query', method, rdtype, name ) return False res_kwargs = { 'rdtype': rdtype, } if servers: if not isinstance(servers, (list, tuple)): servers = [servers] if method in ('dnspython', 'dig', 'drill'): res_kwargs['servers'] = servers else: if timeout: timeout /= len(servers) # Inject a wrapper for multi-server behaviour def _multi_srvr(resolv_func): @functools.wraps(resolv_func) def _wrapper(**res_kwargs): for server in servers: s_res = resolv_func(server=server, **res_kwargs) if s_res: return s_res return _wrapper resolver = _multi_srvr(resolver) if not walk: name = [name] else: idx = 0 if rdtype in ('SRV', 'TLSA'): # The only RRs I know that have 2 name components idx = name.find('.') + 1 idx = name.find('.', idx) + 1 domain = name[idx:] rname = name[0:idx] name = _tree(domain, walk_tld) if walk == 'name': name = [rname + domain for domain in name] if timeout: timeout /= len(name) if secure: res_kwargs['secure'] = secure if timeout: res_kwargs['timeout'] = timeout for rname in name: res = resolver(name=rname, **res_kwargs) if res: return res return res def query( name, rdtype, method=None, servers=None, timeout=None, walk=False, walk_tld=False, secure=None ): ''' Query DNS for information. Where `lookup()` returns record data, `query()` tries to interpret the data and return it's results :param name: name to lookup :param rdtype: DNS record type :param method: gai (getaddrinfo()), pydns, dig, drill, host, nslookup or auto (default) :param servers: (list of) server(s) to try in-order :param timeout: query timeout or a valiant approximation of that :param secure: return only DNSSEC secured response :param walk: Walk the DNS upwards looking for the record type or name/recordtype if walk='name'. :param walk_tld: Include the top-level domain in the walk :return: [] of records ''' rdtype = rdtype.upper() qargs = { 'method': method, 'servers': servers, 'timeout': timeout, 'walk': walk, 'walk_tld': walk_tld, 'secure': secure } if rdtype == 'PTR' and not name.endswith('arpa'): name = ptr_name(name) if rdtype == 'SPF': # 'SPF' has become a regular 'TXT' again qres = [answer for answer in lookup(name, 'TXT', **qargs) if answer.startswith('v=spf')] if not qres: qres = lookup(name, rdtype, **qargs) else: qres = lookup(name, rdtype, **qargs) rec_map = { 'A': a_rec, 'AAAA': aaaa_rec, 'CAA': caa_rec, 'MX': mx_rec, 'SOA': soa_rec, 'SPF': spf_rec, 'SRV': srv_rec, 'SSHFP': sshfp_rec, 'TLSA': tlsa_rec, } if not qres or rdtype not in rec_map: return qres elif rdtype in ('A', 'AAAA', 'SSHFP', 'TLSA'): res = [rec_map[rdtype](res) for res in qres] elif rdtype in ('SOA', 'SPF'): res = rec_map[rdtype](qres[0]) else: res = rec_map[rdtype](qres) return res def host(name, ip4=True, ip6=True, **kwargs): ''' Return a list of addresses for name ip6: Return IPv6 addresses ip4: Return IPv4 addresses the rest is passed on to lookup() ''' res = {} if ip6: ip6 = lookup(name, 'AAAA', **kwargs) if ip6: res['ip6'] = ip6 if ip4: ip4 = lookup(name, 'A', **kwargs) if ip4: res['ip4'] = ip4 return res def a_rec(rdata): ''' Validate and parse DNS record data for an A record :param rdata: DNS record data :return: { 'address': ip } ''' rschema = OrderedDict(( ('address', ipaddress.IPv4Address), )) return _data2rec(rschema, rdata) def aaaa_rec(rdata): ''' Validate and parse DNS record data for an AAAA record :param rdata: DNS record data :return: { 'address': ip } ''' rschema = OrderedDict(( ('address', ipaddress.IPv6Address), )) return _data2rec(rschema, rdata) def caa_rec(rdatas): ''' Validate and parse DNS record data for a CAA record :param rdata: DNS record data :return: dict w/fields ''' rschema = OrderedDict(( ('flags', lambda flag: ['critical'] if int(flag) > 0 else []), ('tag', RFC.CAA_TAGS), ('value', lambda val: val.strip('\',"')) )) res = _data2rec_group(rschema, rdatas, 'tag') for tag in ('issue', 'issuewild'): tag_res = res.get(tag, False) if not tag_res: continue for idx, val in enumerate(tag_res): if ';' not in val: continue val, params = val.split(';', 1) params = dict(param.split('=') for param in shlex.split(params)) tag_res[idx] = {val: params} return res def mx_data(target, preference=10): ''' Generate MX record data :param target: server :param preference: preference number :return: DNS record data ''' return _rec2data(int(preference), target) def mx_rec(rdatas): ''' Validate and parse DNS record data for MX record(s) :param rdata: DNS record data :return: dict w/fields ''' rschema = OrderedDict(( ('preference', int), ('name', str), )) return _data2rec_group(rschema, rdatas, 'preference') def ptr_name(rdata): ''' Return PTR name of given IP :param rdata: IP address :return: PTR record name ''' try: return ipaddress.ip_address(rdata).reverse_pointer except ValueError: log.error( 'Unable to generate PTR record; %s is not a valid IP address', rdata ) return False def soa_rec(rdata): ''' Validate and parse DNS record data for SOA record(s) :param rdata: DNS record data :return: dict w/fields ''' rschema = OrderedDict(( ('mname', str), ('rname', str), ('serial', int), ('refresh', int), ('retry', int), ('expire', int), ('minimum', int), )) return _data2rec(rschema, rdata) def spf_rec(rdata): ''' Validate and parse DNS record data for SPF record(s) :param rdata: DNS record data :return: dict w/fields ''' spf_fields = rdata.split(' ') if not spf_fields.pop(0).startswith('v=spf'): raise ValueError('Not an SPF record') res = OrderedDict() mods = set() for mech_spec in spf_fields: if mech_spec.startswith(('exp', 'redirect')): # It's a modifier mod, val = mech_spec.split('=', 1) if mod in mods: raise KeyError('Modifier {0} can only appear once'.format(mod)) mods.add(mod) continue # TODO: Should be in something intelligent like an SPF_get # if mod == 'exp': # res[mod] = lookup(val, 'TXT', **qargs) # continue # elif mod == 'redirect': # return query(val, 'SPF', **qargs) mech = {} if mech_spec[0] in ('+', '-', '~', '?'): mech['qualifier'] = mech_spec[0] mech_spec = mech_spec[1:] if ':' in mech_spec: mech_spec, val = mech_spec.split(':', 1) elif '/' in mech_spec: idx = mech_spec.find('/') mech_spec = mech_spec[0:idx] val = mech_spec[idx:] else: val = None res[mech_spec] = mech if not val: continue elif mech_spec in ('ip4', 'ip6'): val = ipaddress.ip_interface(val) assert val.version == int(mech_spec[-1]) mech['value'] = val return res def srv_data(target, port, prio=10, weight=10): ''' Generate SRV record data :param target: :param port: :param prio: :param weight: :return: ''' return _rec2data(prio, weight, port, target) def srv_name(svc, proto='tcp', domain=None): ''' Generate SRV record name :param svc: ldap, 389 etc :param proto: tcp, udp, sctp etc. :param domain: name to append :return: ''' proto = RFC.validate(proto, RFC.SRV_PROTO) if isinstance(svc, int) or svc.isdigit(): svc = _to_port(svc) if domain: domain = '.' + domain return '_{0}._{1}{2}'.format(svc, proto, domain) def srv_rec(rdatas): ''' Validate and parse DNS record data for SRV record(s) :param rdata: DNS record data :return: dict w/fields ''' rschema = OrderedDict(( ('prio', int), ('weight', int), ('port', _to_port), ('name', str), )) return _data2rec_group(rschema, rdatas, 'prio') def sshfp_data(key_t, hash_t, pub): ''' Generate an SSHFP record :param key_t: rsa/dsa/ecdsa/ed25519 :param hash_t: sha1/sha256 :param pub: the SSH public key ''' key_t = RFC.validate(key_t, RFC.SSHFP_ALGO, 'in') hash_t = RFC.validate(hash_t, RFC.SSHFP_HASH) hasher = hashlib.new(hash_t) hasher.update( base64.b64decode(pub) ) ssh_fp = hasher.hexdigest() return _rec2data(key_t, hash_t, ssh_fp) def sshfp_rec(rdata): ''' Validate and parse DNS record data for TLSA record(s) :param rdata: DNS record data :return: dict w/fields ''' rschema = OrderedDict(( ('algorithm', RFC.SSHFP_ALGO), ('fp_hash', RFC.SSHFP_HASH), ('fingerprint', lambda val: val.lower()) # resolvers are inconsistent on this one )) return _data2rec(rschema, rdata) def tlsa_data(pub, usage, selector, matching): ''' Generate a TLSA rec :param pub: Pub key in PEM format :param usage: :param selector: :param matching: :return: TLSA data portion ''' usage = RFC.validate(usage, RFC.TLSA_USAGE) selector = RFC.validate(selector, RFC.TLSA_SELECT) matching = RFC.validate(matching, RFC.TLSA_MATCHING) pub = ssl.PEM_cert_to_DER_cert(pub.strip()) if matching == 0: cert_fp = binascii.b2a_hex(pub) else: hasher = hashlib.new(RFC.TLSA_MATCHING[matching]) hasher.update( pub ) cert_fp = hasher.hexdigest() return _rec2data(usage, selector, matching, cert_fp) def tlsa_rec(rdata): ''' Validate and parse DNS record data for TLSA record(s) :param rdata: DNS record data :return: dict w/fields ''' rschema = OrderedDict(( ('usage', RFC.TLSA_USAGE), ('selector', RFC.TLSA_SELECT), ('matching', RFC.TLSA_MATCHING), ('pub', str) )) return _data2rec(rschema, rdata) def service( svc, proto='tcp', domain=None, walk=False, secure=None ): ''' Find an SRV service in a domain or it's parents :param svc: service to find (ldap, 389, etc) :param proto: protocol the service talks (tcp, udp, etc) :param domain: domain to start search in :param walk: walk the parents if domain doesn't provide the service :param secure: only return DNSSEC-validated results :return: [ [ prio1server1, prio1server2 ], [ prio2server1, prio2server2 ], ] (the servers will already be weighted according to the SRV rules) ''' qres = query(srv_name(svc, proto, domain), 'SRV', walk=walk, secure=secure) if not qres: return False res = [] for _, recs in qres.items(): res.append(_weighted_order(recs)) return res def parse_resolv(src='/etc/resolv.conf'): ''' Parse a resolver configuration file (traditionally /etc/resolv.conf) ''' nameservers = [] ip4_nameservers = [] ip6_nameservers = [] search = [] sortlist = [] domain = '' options = [] try: with salt.utils.files.fopen(src) as src_file: # pylint: disable=too-many-nested-blocks for line in src_file: line = salt.utils.stringutils.to_unicode(line).strip().split() try: (directive, arg) = (line[0].lower(), line[1:]) # Drop everything after # or ; (comments) arg = list(itertools.takewhile(lambda x: x[0] not in ('#', ';'), arg)) if directive == 'nameserver': addr = arg[0] try: ip_addr = ipaddress.ip_address(addr) version = ip_addr.version ip_addr = str(ip_addr) if ip_addr not in nameservers: nameservers.append(ip_addr) if version == 4 and ip_addr not in ip4_nameservers: ip4_nameservers.append(ip_addr) elif version == 6 and ip_addr not in ip6_nameservers: ip6_nameservers.append(ip_addr) except ValueError as exc: log.error('%s: %s', src, exc) elif directive == 'domain': domain = arg[0] elif directive == 'search': search = arg elif directive == 'sortlist': # A sortlist is specified by IP address netmask pairs. # The netmask is optional and defaults to the natural # netmask of the net. The IP address and optional # network pairs are separated by slashes. for ip_raw in arg: try: ip_net = ipaddress.ip_network(ip_raw) except ValueError as exc: log.error('%s: %s', src, exc) else: if '/' not in ip_raw: # No netmask has been provided, guess # the "natural" one if ip_net.version == 4: ip_addr = six.text_type(ip_net.network_address) # pylint: disable=protected-access mask = salt.utils.network.natural_ipv4_netmask(ip_addr) ip_net = ipaddress.ip_network( '{0}{1}'.format(ip_addr, mask), strict=False ) if ip_net.version == 6: # TODO pass if ip_net not in sortlist: sortlist.append(ip_net) elif directive == 'options': # Options allows certain internal resolver variables to # be modified. if arg[0] not in options: options.append(arg[0]) except IndexError: continue if domain and search: # The domain and search keywords are mutually exclusive. If more # than one instance of these keywords is present, the last instance # will override. log.debug( '%s: The domain and search keywords are mutually exclusive.', src ) return { 'nameservers': nameservers, 'ip4_nameservers': ip4_nameservers, 'ip6_nameservers': ip6_nameservers, 'sortlist': [ip.with_netmask for ip in sortlist], 'domain': domain, 'search': search, 'options': options } except IOError: return {}
saltstack/salt
salt/utils/dns.py
parse_resolv
python
def parse_resolv(src='/etc/resolv.conf'): ''' Parse a resolver configuration file (traditionally /etc/resolv.conf) ''' nameservers = [] ip4_nameservers = [] ip6_nameservers = [] search = [] sortlist = [] domain = '' options = [] try: with salt.utils.files.fopen(src) as src_file: # pylint: disable=too-many-nested-blocks for line in src_file: line = salt.utils.stringutils.to_unicode(line).strip().split() try: (directive, arg) = (line[0].lower(), line[1:]) # Drop everything after # or ; (comments) arg = list(itertools.takewhile(lambda x: x[0] not in ('#', ';'), arg)) if directive == 'nameserver': addr = arg[0] try: ip_addr = ipaddress.ip_address(addr) version = ip_addr.version ip_addr = str(ip_addr) if ip_addr not in nameservers: nameservers.append(ip_addr) if version == 4 and ip_addr not in ip4_nameservers: ip4_nameservers.append(ip_addr) elif version == 6 and ip_addr not in ip6_nameservers: ip6_nameservers.append(ip_addr) except ValueError as exc: log.error('%s: %s', src, exc) elif directive == 'domain': domain = arg[0] elif directive == 'search': search = arg elif directive == 'sortlist': # A sortlist is specified by IP address netmask pairs. # The netmask is optional and defaults to the natural # netmask of the net. The IP address and optional # network pairs are separated by slashes. for ip_raw in arg: try: ip_net = ipaddress.ip_network(ip_raw) except ValueError as exc: log.error('%s: %s', src, exc) else: if '/' not in ip_raw: # No netmask has been provided, guess # the "natural" one if ip_net.version == 4: ip_addr = six.text_type(ip_net.network_address) # pylint: disable=protected-access mask = salt.utils.network.natural_ipv4_netmask(ip_addr) ip_net = ipaddress.ip_network( '{0}{1}'.format(ip_addr, mask), strict=False ) if ip_net.version == 6: # TODO pass if ip_net not in sortlist: sortlist.append(ip_net) elif directive == 'options': # Options allows certain internal resolver variables to # be modified. if arg[0] not in options: options.append(arg[0]) except IndexError: continue if domain and search: # The domain and search keywords are mutually exclusive. If more # than one instance of these keywords is present, the last instance # will override. log.debug( '%s: The domain and search keywords are mutually exclusive.', src ) return { 'nameservers': nameservers, 'ip4_nameservers': ip4_nameservers, 'ip6_nameservers': ip6_nameservers, 'sortlist': [ip.with_netmask for ip in sortlist], 'domain': domain, 'search': search, 'options': options } except IOError: return {}
Parse a resolver configuration file (traditionally /etc/resolv.conf)
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/dns.py#L1128-L1224
[ "def fopen(*args, **kwargs):\n '''\n Wrapper around open() built-in to set CLOEXEC on the fd.\n\n This flag specifies that the file descriptor should be closed when an exec\n function is invoked;\n\n When a file descriptor is allocated (as with open or dup), this bit is\n initially cleared on the new file descriptor, meaning that descriptor will\n survive into the new program after exec.\n\n NB! We still have small race condition between open and fcntl.\n '''\n if six.PY3:\n try:\n # Don't permit stdin/stdout/stderr to be opened. The boolean False\n # and True are treated by Python 3's open() as file descriptors 0\n # and 1, respectively.\n if args[0] in (0, 1, 2):\n raise TypeError(\n '{0} is not a permitted file descriptor'.format(args[0])\n )\n except IndexError:\n pass\n binary = None\n # ensure 'binary' mode is always used on Windows in Python 2\n if ((six.PY2 and salt.utils.platform.is_windows() and 'binary' not in kwargs) or\n kwargs.pop('binary', False)):\n if len(args) > 1:\n args = list(args)\n if 'b' not in args[1]:\n args[1] = args[1].replace('t', 'b')\n if 'b' not in args[1]:\n args[1] += 'b'\n elif kwargs.get('mode'):\n if 'b' not in kwargs['mode']:\n kwargs['mode'] = kwargs['mode'].replace('t', 'b')\n if 'b' not in kwargs['mode']:\n kwargs['mode'] += 'b'\n else:\n # the default is to read\n kwargs['mode'] = 'rb'\n elif six.PY3 and 'encoding' not in kwargs:\n # In Python 3, if text mode is used and the encoding\n # is not specified, set the encoding to 'utf-8'.\n binary = False\n if len(args) > 1:\n args = list(args)\n if 'b' in args[1]:\n binary = True\n if kwargs.get('mode', None):\n if 'b' in kwargs['mode']:\n binary = True\n if not binary:\n kwargs['encoding'] = __salt_system_encoding__\n\n if six.PY3 and not binary and not kwargs.get('newline', None):\n kwargs['newline'] = ''\n\n f_handle = open(*args, **kwargs) # pylint: disable=resource-leakage\n\n if is_fcntl_available():\n # modify the file descriptor on systems with fcntl\n # unix and unix-like systems only\n try:\n FD_CLOEXEC = fcntl.FD_CLOEXEC # pylint: disable=C0103\n except AttributeError:\n FD_CLOEXEC = 1 # pylint: disable=C0103\n old_flags = fcntl.fcntl(f_handle.fileno(), fcntl.F_GETFD)\n fcntl.fcntl(f_handle.fileno(), fcntl.F_SETFD, old_flags | FD_CLOEXEC)\n\n return f_handle\n", "def to_unicode(s, encoding=None, errors='strict', normalize=False):\n '''\n Given str or unicode, return unicode (str for python 3)\n '''\n def _normalize(s):\n return unicodedata.normalize('NFC', s) if normalize else s\n\n if encoding is None:\n # Try utf-8 first, and fall back to detected encoding\n encoding = ('utf-8', __salt_system_encoding__)\n if not isinstance(encoding, (tuple, list)):\n encoding = (encoding,)\n\n if not encoding:\n raise ValueError('encoding cannot be empty')\n\n exc = None\n if six.PY3:\n if isinstance(s, str):\n return _normalize(s)\n elif isinstance(s, (bytes, bytearray)):\n return _normalize(to_str(s, encoding, errors))\n raise TypeError('expected str, bytes, or bytearray')\n else:\n # This needs to be str and not six.string_types, since if the string is\n # already a unicode type, it does not need to be decoded (and doing so\n # will raise an exception).\n if isinstance(s, unicode): # pylint: disable=incompatible-py3-code,undefined-variable\n return _normalize(s)\n elif isinstance(s, (str, bytearray)):\n for enc in encoding:\n try:\n return _normalize(s.decode(enc, errors))\n except UnicodeDecodeError as err:\n exc = err\n continue\n # The only way we get this far is if a UnicodeDecodeError was\n # raised, otherwise we would have already returned (or raised some\n # other exception).\n raise exc # pylint: disable=raising-bad-type\n raise TypeError('expected str or bytearray')\n" ]
# -*- coding: utf-8 -*- ''' Compendium of generic DNS utilities # Examples: dns.lookup(name, rdtype, ...) dns.query(name, rdtype, ...) dns.srv_rec(data) dns.srv_data('my1.example.com', 389, prio=10, weight=100) dns.srv_name('ldap/tcp', 'example.com') ''' from __future__ import absolute_import, print_function, unicode_literals # Import Python libs import base64 import binascii import hashlib import itertools import logging import random import re import shlex import socket import ssl import string import functools # Import Salt libs import salt.utils.files import salt.utils.network import salt.utils.path import salt.utils.stringutils import salt.modules.cmdmod from salt._compat import ipaddress from salt.utils.odict import OrderedDict # Import 3rd-party libs from salt.ext import six from salt.ext.six.moves import zip # pylint: disable=redefined-builtin # Integrations try: import dns.resolver HAS_DNSPYTHON = True except ImportError: HAS_DNSPYTHON = False try: import tldextract HAS_TLDEXTRACT = True except ImportError: HAS_TLDEXTRACT = False HAS_DIG = salt.utils.path.which('dig') is not None DIG_OPTIONS = '+search +fail +noall +answer +nocl +nottl' HAS_DRILL = salt.utils.path.which('drill') is not None HAS_HOST = salt.utils.path.which('host') is not None HAS_NSLOOKUP = salt.utils.path.which('nslookup') is not None __salt__ = { 'cmd.run_all': salt.modules.cmdmod.run_all } log = logging.getLogger(__name__) class RFC(object): ''' Simple holding class for all RFC/IANA registered lists & standards ''' # https://tools.ietf.org/html/rfc6844#section-3 CAA_TAGS = ( 'issue', 'issuewild', 'iodef' ) # http://www.iana.org/assignments/dns-sshfp-rr-parameters/dns-sshfp-rr-parameters.xhtml SSHFP_ALGO = OrderedDict(( (1, 'rsa'), (2, 'dsa'), (3, 'ecdsa'), (4, 'ed25519'), )) SSHFP_HASH = OrderedDict(( (1, 'sha1'), (2, 'sha256'), )) # http://www.iana.org/assignments/dane-parameters/dane-parameters.xhtml TLSA_USAGE = OrderedDict(( (0, 'pkixta'), (1, 'pkixee'), (2, 'daneta'), (3, 'daneee'), )) TLSA_SELECT = OrderedDict(( (0, 'cert'), (1, 'spki'), )) TLSA_MATCHING = OrderedDict(( (0, 'full'), (1, 'sha256'), (2, 'sha512'), )) SRV_PROTO = ( 'tcp', 'udp', 'sctp' ) @staticmethod def validate(lookup, ref, match=None): if lookup in ref: return lookup elif match == 'in': return [code for code, name in ref.items() if lookup in name][-1] else: # OrderedDicts only!(?) return ref.keys()[ref.values().index(lookup)] def _to_port(port): try: port = int(port) assert 1 <= port <= 65535 return port except (ValueError, AssertionError): raise ValueError('Invalid port {0}'.format(port)) def _tree(domain, tld=False): ''' Split out a domain in its parents Leverages tldextract to take the TLDs from publicsuffix.org or makes a valiant approximation of that :param domain: dc2.ams2.example.com :param tld: Include TLD in list :return: [ 'dc2.ams2.example.com', 'ams2.example.com', 'example.com'] ''' domain = domain.rstrip('.') assert '.' in domain, 'Provide a decent domain' if not tld: if HAS_TLDEXTRACT: tld = tldextract.extract(domain).suffix else: tld = re.search(r'((?:(?:ac|biz|com?|info|edu|gov|mil|name|net|n[oi]m|org)\.)?[^.]+)$', domain).group() log.info('Without tldextract, dns.util resolves the TLD of %s to %s', domain, tld) res = [domain] while True: idx = domain.find('.') if idx < 0: break domain = domain[idx + 1:] if domain == tld: break res.append(domain) return res def _weighted_order(recs): res = [] weights = [rec['weight'] for rec in recs] while weights: rnd = random.random() * sum(weights) for i, w in enumerate(weights): rnd -= w if rnd < 0: res.append(recs.pop(i)['name']) weights.pop(i) break return res def _cast(rec_data, rec_cast): if isinstance(rec_cast, dict): rec_data = type(rec_cast.keys()[0])(rec_data) res = rec_cast[rec_data] return res elif isinstance(rec_cast, (list, tuple)): return RFC.validate(rec_data, rec_cast) else: return rec_cast(rec_data) def _data2rec(schema, rec_data): ''' schema = OrderedDict({ 'prio': int, 'weight': int, 'port': to_port, 'name': str, }) rec_data = '10 20 25 myawesome.nl' res = {'prio': 10, 'weight': 20, 'port': 25 'name': 'myawesome.nl'} ''' try: rec_fields = rec_data.split(' ') # spaces in digest fields are allowed assert len(rec_fields) >= len(schema) if len(rec_fields) > len(schema): cutoff = len(schema) - 1 rec_fields = rec_fields[0:cutoff] + [''.join(rec_fields[cutoff:])] if len(schema) == 1: res = _cast(rec_fields[0], next(iter(schema.values()))) else: res = dict(( (field_name, _cast(rec_field, rec_cast)) for (field_name, rec_cast), rec_field in zip(schema.items(), rec_fields) )) return res except (AssertionError, AttributeError, TypeError, ValueError) as e: raise ValueError('Unable to cast "{0}" as "{2}": {1}'.format( rec_data, e, ' '.join(schema.keys()) )) def _data2rec_group(schema, recs_data, group_key): if not isinstance(recs_data, (list, tuple)): recs_data = [recs_data] res = OrderedDict() try: for rdata in recs_data: rdata = _data2rec(schema, rdata) assert rdata and group_key in rdata idx = rdata.pop(group_key) if idx not in res: res[idx] = [] if len(rdata) == 1: rdata = next(iter(rdata.values())) res[idx].append(rdata) return res except (AssertionError, ValueError) as e: raise ValueError('Unable to cast "{0}" as a group of "{1}": {2}'.format( ','.join(recs_data), ' '.join(schema.keys()), e )) def _rec2data(*rdata): return ' '.join(rdata) def _data_clean(data): data = data.strip(string.whitespace) if data.startswith(('"', '\'')) and data.endswith(('"', '\'')): return data[1:-1] else: return data def _lookup_dig(name, rdtype, timeout=None, servers=None, secure=None): ''' Use dig to lookup addresses :param name: Name of record to search :param rdtype: DNS record type :param timeout: server response timeout :param servers: [] of servers to use :return: [] of records or False if error ''' cmd = 'dig {0} -t {1} '.format(DIG_OPTIONS, rdtype) if servers: cmd += ''.join(['@{0} '.format(srv) for srv in servers]) if timeout is not None: if servers: timeout = int(float(timeout) / len(servers)) else: timeout = int(timeout) cmd += '+time={0} '.format(timeout) if secure: cmd += '+dnssec +adflag ' cmd = __salt__['cmd.run_all']('{0} {1}'.format(cmd, name), python_shell=False, output_loglevel='quiet') if 'ignoring invalid type' in cmd['stderr']: raise ValueError('Invalid DNS type {}'.format(rdtype)) elif cmd['retcode'] != 0: log.warning( 'dig returned (%s): %s', cmd['retcode'], cmd['stderr'].strip(string.whitespace + ';') ) return False elif not cmd['stdout']: return [] validated = False res = [] for line in cmd['stdout'].splitlines(): _, rtype, rdata = line.split(None, 2) if rtype == 'CNAME' and rdtype != 'CNAME': continue elif rtype == 'RRSIG': validated = True continue res.append(_data_clean(rdata)) if res and secure and not validated: return False else: return res def _lookup_drill(name, rdtype, timeout=None, servers=None, secure=None): ''' Use drill to lookup addresses :param name: Name of record to search :param rdtype: DNS record type :param timeout: command return timeout :param servers: [] of servers to use :return: [] of records or False if error ''' cmd = 'drill ' if secure: cmd += '-D -o ad ' cmd += '{0} {1} '.format(rdtype, name) if servers: cmd += ''.join(['@{0} '.format(srv) for srv in servers]) cmd = __salt__['cmd.run_all']( cmd, timeout=timeout, python_shell=False, output_loglevel='quiet') if cmd['retcode'] != 0: log.warning('drill returned (%s): %s', cmd['retcode'], cmd['stderr']) return False lookup_res = iter(cmd['stdout'].splitlines()) validated = False res = [] try: line = '' while 'ANSWER SECTION' not in line: line = next(lookup_res) while True: line = next(lookup_res) line = line.strip() if not line or line.startswith(';;'): break l_type, l_rec = line.split(None, 4)[-2:] if l_type == 'CNAME' and rdtype != 'CNAME': continue elif l_type == 'RRSIG': validated = True continue elif l_type != rdtype: raise ValueError('Invalid DNS type {}'.format(rdtype)) res.append(_data_clean(l_rec)) except StopIteration: pass if res and secure and not validated: return False else: return res def _lookup_gai(name, rdtype, timeout=None): ''' Use Python's socket interface to lookup addresses :param name: Name of record to search :param rdtype: A or AAAA :param timeout: ignored :return: [] of addresses or False if error ''' try: sock_t = { 'A': socket.AF_INET, 'AAAA': socket.AF_INET6 }[rdtype] except KeyError: raise ValueError('Invalid DNS type {} for gai lookup'.format(rdtype)) if timeout: log.info('Ignoring timeout on gai resolver; fix resolv.conf to do that') try: addresses = [sock[4][0] for sock in socket.getaddrinfo(name, None, sock_t, 0, socket.SOCK_RAW)] return addresses except socket.gaierror: return False def _lookup_host(name, rdtype, timeout=None, server=None): ''' Use host to lookup addresses :param name: Name of record to search :param server: Server to query :param rdtype: DNS record type :param timeout: server response wait :return: [] of records or False if error ''' cmd = 'host -t {0} '.format(rdtype) if timeout: cmd += '-W {0} '.format(int(timeout)) cmd += name if server is not None: cmd += ' {0}'.format(server) cmd = __salt__['cmd.run_all'](cmd, python_shell=False, output_loglevel='quiet') if 'invalid type' in cmd['stderr']: raise ValueError('Invalid DNS type {}'.format(rdtype)) elif cmd['retcode'] != 0: log.warning('host returned (%s): %s', cmd['retcode'], cmd['stderr']) return False elif 'has no' in cmd['stdout']: return [] res = [] _stdout = cmd['stdout'] if server is None else cmd['stdout'].split('\n\n')[-1] for line in _stdout.splitlines(): if rdtype != 'CNAME' and 'is an alias' in line: continue line = line.split(' ', 3)[-1] for prefix in ('record', 'address', 'handled by', 'alias for'): if line.startswith(prefix): line = line[len(prefix) + 1:] break res.append(_data_clean(line)) return res def _lookup_dnspython(name, rdtype, timeout=None, servers=None, secure=None): ''' Use dnspython to lookup addresses :param name: Name of record to search :param rdtype: DNS record type :param timeout: query timeout :param server: [] of server(s) to try in order :return: [] of records or False if error ''' resolver = dns.resolver.Resolver() if timeout is not None: resolver.lifetime = float(timeout) if servers: resolver.nameservers = servers if secure: resolver.ednsflags += dns.flags.DO try: res = [_data_clean(rr.to_text()) for rr in resolver.query(name, rdtype, raise_on_no_answer=False)] return res except dns.rdatatype.UnknownRdatatype: raise ValueError('Invalid DNS type {}'.format(rdtype)) except (dns.resolver.NXDOMAIN, dns.resolver.YXDOMAIN, dns.resolver.NoNameservers, dns.exception.Timeout): return False def _lookup_nslookup(name, rdtype, timeout=None, server=None): ''' Use nslookup to lookup addresses :param name: Name of record to search :param rdtype: DNS record type :param timeout: server response timeout :param server: server to query :return: [] of records or False if error ''' cmd = 'nslookup -query={0} {1}'.format(rdtype, name) if timeout is not None: cmd += ' -timeout={0}'.format(int(timeout)) if server is not None: cmd += ' {0}'.format(server) cmd = __salt__['cmd.run_all'](cmd, python_shell=False, output_loglevel='quiet') if cmd['retcode'] != 0: log.warning( 'nslookup returned (%s): %s', cmd['retcode'], cmd['stdout'].splitlines()[-1].strip(string.whitespace + ';') ) return False lookup_res = iter(cmd['stdout'].splitlines()) res = [] try: line = next(lookup_res) if 'unknown query type' in line: raise ValueError('Invalid DNS type {}'.format(rdtype)) while True: if name in line: break line = next(lookup_res) while True: line = line.strip() if not line or line.startswith('*'): break elif rdtype != 'CNAME' and 'canonical name' in line: name = line.split()[-1][:-1] line = next(lookup_res) continue elif rdtype == 'SOA': line = line.split('=') elif line.startswith('Name:'): line = next(lookup_res) line = line.split(':', 1) elif line.startswith(name): if '=' in line: line = line.split('=', 1) else: line = line.split(' ') res.append(_data_clean(line[-1])) line = next(lookup_res) except StopIteration: pass if rdtype == 'SOA': return [' '.join(res[1:])] else: return res def lookup( name, rdtype, method=None, servers=None, timeout=None, walk=False, walk_tld=False, secure=None ): ''' Lookup DNS records and return their data :param name: name to lookup :param rdtype: DNS record type :param method: gai (getaddrinfo()), dnspython, dig, drill, host, nslookup or auto (default) :param servers: (list of) server(s) to try in-order :param timeout: query timeout or a valiant approximation of that :param walk: Walk the DNS upwards looking for the record type or name/recordtype if walk='name'. :param walk_tld: Include the final domain in the walk :param secure: return only DNSSEC secured responses :return: [] of record data ''' # opts = __opts__.get('dns', {}) opts = {} method = method or opts.get('method', 'auto') secure = secure or opts.get('secure', None) servers = servers or opts.get('servers', None) timeout = timeout or opts.get('timeout', False) rdtype = rdtype.upper() # pylint: disable=bad-whitespace,multiple-spaces-before-keyword query_methods = ( ('gai', _lookup_gai, not any((rdtype not in ('A', 'AAAA'), servers, secure))), ('dnspython', _lookup_dnspython, HAS_DNSPYTHON), ('dig', _lookup_dig, HAS_DIG), ('drill', _lookup_drill, HAS_DRILL), ('host', _lookup_host, HAS_HOST and not secure), ('nslookup', _lookup_nslookup, HAS_NSLOOKUP and not secure), ) # pylint: enable=bad-whitespace,multiple-spaces-before-keyword try: if method == 'auto': # The first one not to bork on the conditions becomes the function method, resolver = next(((rname, rcb) for rname, rcb, rtest in query_methods if rtest)) else: # The first one not to bork on the conditions becomes the function. And the name must match. resolver = next((rcb for rname, rcb, rtest in query_methods if rname == method and rtest)) except StopIteration: log.error( 'Unable to lookup %s/%s: Resolver method %s invalid, unsupported ' 'or unable to perform query', method, rdtype, name ) return False res_kwargs = { 'rdtype': rdtype, } if servers: if not isinstance(servers, (list, tuple)): servers = [servers] if method in ('dnspython', 'dig', 'drill'): res_kwargs['servers'] = servers else: if timeout: timeout /= len(servers) # Inject a wrapper for multi-server behaviour def _multi_srvr(resolv_func): @functools.wraps(resolv_func) def _wrapper(**res_kwargs): for server in servers: s_res = resolv_func(server=server, **res_kwargs) if s_res: return s_res return _wrapper resolver = _multi_srvr(resolver) if not walk: name = [name] else: idx = 0 if rdtype in ('SRV', 'TLSA'): # The only RRs I know that have 2 name components idx = name.find('.') + 1 idx = name.find('.', idx) + 1 domain = name[idx:] rname = name[0:idx] name = _tree(domain, walk_tld) if walk == 'name': name = [rname + domain for domain in name] if timeout: timeout /= len(name) if secure: res_kwargs['secure'] = secure if timeout: res_kwargs['timeout'] = timeout for rname in name: res = resolver(name=rname, **res_kwargs) if res: return res return res def query( name, rdtype, method=None, servers=None, timeout=None, walk=False, walk_tld=False, secure=None ): ''' Query DNS for information. Where `lookup()` returns record data, `query()` tries to interpret the data and return it's results :param name: name to lookup :param rdtype: DNS record type :param method: gai (getaddrinfo()), pydns, dig, drill, host, nslookup or auto (default) :param servers: (list of) server(s) to try in-order :param timeout: query timeout or a valiant approximation of that :param secure: return only DNSSEC secured response :param walk: Walk the DNS upwards looking for the record type or name/recordtype if walk='name'. :param walk_tld: Include the top-level domain in the walk :return: [] of records ''' rdtype = rdtype.upper() qargs = { 'method': method, 'servers': servers, 'timeout': timeout, 'walk': walk, 'walk_tld': walk_tld, 'secure': secure } if rdtype == 'PTR' and not name.endswith('arpa'): name = ptr_name(name) if rdtype == 'SPF': # 'SPF' has become a regular 'TXT' again qres = [answer for answer in lookup(name, 'TXT', **qargs) if answer.startswith('v=spf')] if not qres: qres = lookup(name, rdtype, **qargs) else: qres = lookup(name, rdtype, **qargs) rec_map = { 'A': a_rec, 'AAAA': aaaa_rec, 'CAA': caa_rec, 'MX': mx_rec, 'SOA': soa_rec, 'SPF': spf_rec, 'SRV': srv_rec, 'SSHFP': sshfp_rec, 'TLSA': tlsa_rec, } if not qres or rdtype not in rec_map: return qres elif rdtype in ('A', 'AAAA', 'SSHFP', 'TLSA'): res = [rec_map[rdtype](res) for res in qres] elif rdtype in ('SOA', 'SPF'): res = rec_map[rdtype](qres[0]) else: res = rec_map[rdtype](qres) return res def host(name, ip4=True, ip6=True, **kwargs): ''' Return a list of addresses for name ip6: Return IPv6 addresses ip4: Return IPv4 addresses the rest is passed on to lookup() ''' res = {} if ip6: ip6 = lookup(name, 'AAAA', **kwargs) if ip6: res['ip6'] = ip6 if ip4: ip4 = lookup(name, 'A', **kwargs) if ip4: res['ip4'] = ip4 return res def a_rec(rdata): ''' Validate and parse DNS record data for an A record :param rdata: DNS record data :return: { 'address': ip } ''' rschema = OrderedDict(( ('address', ipaddress.IPv4Address), )) return _data2rec(rschema, rdata) def aaaa_rec(rdata): ''' Validate and parse DNS record data for an AAAA record :param rdata: DNS record data :return: { 'address': ip } ''' rschema = OrderedDict(( ('address', ipaddress.IPv6Address), )) return _data2rec(rschema, rdata) def caa_rec(rdatas): ''' Validate and parse DNS record data for a CAA record :param rdata: DNS record data :return: dict w/fields ''' rschema = OrderedDict(( ('flags', lambda flag: ['critical'] if int(flag) > 0 else []), ('tag', RFC.CAA_TAGS), ('value', lambda val: val.strip('\',"')) )) res = _data2rec_group(rschema, rdatas, 'tag') for tag in ('issue', 'issuewild'): tag_res = res.get(tag, False) if not tag_res: continue for idx, val in enumerate(tag_res): if ';' not in val: continue val, params = val.split(';', 1) params = dict(param.split('=') for param in shlex.split(params)) tag_res[idx] = {val: params} return res def mx_data(target, preference=10): ''' Generate MX record data :param target: server :param preference: preference number :return: DNS record data ''' return _rec2data(int(preference), target) def mx_rec(rdatas): ''' Validate and parse DNS record data for MX record(s) :param rdata: DNS record data :return: dict w/fields ''' rschema = OrderedDict(( ('preference', int), ('name', str), )) return _data2rec_group(rschema, rdatas, 'preference') def ptr_name(rdata): ''' Return PTR name of given IP :param rdata: IP address :return: PTR record name ''' try: return ipaddress.ip_address(rdata).reverse_pointer except ValueError: log.error( 'Unable to generate PTR record; %s is not a valid IP address', rdata ) return False def soa_rec(rdata): ''' Validate and parse DNS record data for SOA record(s) :param rdata: DNS record data :return: dict w/fields ''' rschema = OrderedDict(( ('mname', str), ('rname', str), ('serial', int), ('refresh', int), ('retry', int), ('expire', int), ('minimum', int), )) return _data2rec(rschema, rdata) def spf_rec(rdata): ''' Validate and parse DNS record data for SPF record(s) :param rdata: DNS record data :return: dict w/fields ''' spf_fields = rdata.split(' ') if not spf_fields.pop(0).startswith('v=spf'): raise ValueError('Not an SPF record') res = OrderedDict() mods = set() for mech_spec in spf_fields: if mech_spec.startswith(('exp', 'redirect')): # It's a modifier mod, val = mech_spec.split('=', 1) if mod in mods: raise KeyError('Modifier {0} can only appear once'.format(mod)) mods.add(mod) continue # TODO: Should be in something intelligent like an SPF_get # if mod == 'exp': # res[mod] = lookup(val, 'TXT', **qargs) # continue # elif mod == 'redirect': # return query(val, 'SPF', **qargs) mech = {} if mech_spec[0] in ('+', '-', '~', '?'): mech['qualifier'] = mech_spec[0] mech_spec = mech_spec[1:] if ':' in mech_spec: mech_spec, val = mech_spec.split(':', 1) elif '/' in mech_spec: idx = mech_spec.find('/') mech_spec = mech_spec[0:idx] val = mech_spec[idx:] else: val = None res[mech_spec] = mech if not val: continue elif mech_spec in ('ip4', 'ip6'): val = ipaddress.ip_interface(val) assert val.version == int(mech_spec[-1]) mech['value'] = val return res def srv_data(target, port, prio=10, weight=10): ''' Generate SRV record data :param target: :param port: :param prio: :param weight: :return: ''' return _rec2data(prio, weight, port, target) def srv_name(svc, proto='tcp', domain=None): ''' Generate SRV record name :param svc: ldap, 389 etc :param proto: tcp, udp, sctp etc. :param domain: name to append :return: ''' proto = RFC.validate(proto, RFC.SRV_PROTO) if isinstance(svc, int) or svc.isdigit(): svc = _to_port(svc) if domain: domain = '.' + domain return '_{0}._{1}{2}'.format(svc, proto, domain) def srv_rec(rdatas): ''' Validate and parse DNS record data for SRV record(s) :param rdata: DNS record data :return: dict w/fields ''' rschema = OrderedDict(( ('prio', int), ('weight', int), ('port', _to_port), ('name', str), )) return _data2rec_group(rschema, rdatas, 'prio') def sshfp_data(key_t, hash_t, pub): ''' Generate an SSHFP record :param key_t: rsa/dsa/ecdsa/ed25519 :param hash_t: sha1/sha256 :param pub: the SSH public key ''' key_t = RFC.validate(key_t, RFC.SSHFP_ALGO, 'in') hash_t = RFC.validate(hash_t, RFC.SSHFP_HASH) hasher = hashlib.new(hash_t) hasher.update( base64.b64decode(pub) ) ssh_fp = hasher.hexdigest() return _rec2data(key_t, hash_t, ssh_fp) def sshfp_rec(rdata): ''' Validate and parse DNS record data for TLSA record(s) :param rdata: DNS record data :return: dict w/fields ''' rschema = OrderedDict(( ('algorithm', RFC.SSHFP_ALGO), ('fp_hash', RFC.SSHFP_HASH), ('fingerprint', lambda val: val.lower()) # resolvers are inconsistent on this one )) return _data2rec(rschema, rdata) def tlsa_data(pub, usage, selector, matching): ''' Generate a TLSA rec :param pub: Pub key in PEM format :param usage: :param selector: :param matching: :return: TLSA data portion ''' usage = RFC.validate(usage, RFC.TLSA_USAGE) selector = RFC.validate(selector, RFC.TLSA_SELECT) matching = RFC.validate(matching, RFC.TLSA_MATCHING) pub = ssl.PEM_cert_to_DER_cert(pub.strip()) if matching == 0: cert_fp = binascii.b2a_hex(pub) else: hasher = hashlib.new(RFC.TLSA_MATCHING[matching]) hasher.update( pub ) cert_fp = hasher.hexdigest() return _rec2data(usage, selector, matching, cert_fp) def tlsa_rec(rdata): ''' Validate and parse DNS record data for TLSA record(s) :param rdata: DNS record data :return: dict w/fields ''' rschema = OrderedDict(( ('usage', RFC.TLSA_USAGE), ('selector', RFC.TLSA_SELECT), ('matching', RFC.TLSA_MATCHING), ('pub', str) )) return _data2rec(rschema, rdata) def service( svc, proto='tcp', domain=None, walk=False, secure=None ): ''' Find an SRV service in a domain or it's parents :param svc: service to find (ldap, 389, etc) :param proto: protocol the service talks (tcp, udp, etc) :param domain: domain to start search in :param walk: walk the parents if domain doesn't provide the service :param secure: only return DNSSEC-validated results :return: [ [ prio1server1, prio1server2 ], [ prio2server1, prio2server2 ], ] (the servers will already be weighted according to the SRV rules) ''' qres = query(srv_name(svc, proto, domain), 'SRV', walk=walk, secure=secure) if not qres: return False res = [] for _, recs in qres.items(): res.append(_weighted_order(recs)) return res def services(services_file='/etc/services'): ''' Parse through system-known services :return: { 'svc': [ { 'port': port 'proto': proto, 'desc': comment }, ], } ''' res = {} with salt.utils.files.fopen(services_file, 'r') as svc_defs: for svc_def in svc_defs.readlines(): svc_def = salt.utils.stringutils.to_unicode(svc_def.strip()) if not svc_def or svc_def.startswith('#'): continue elif '#' in svc_def: svc_def, comment = svc_def.split('#', 1) comment = comment.strip() else: comment = None svc_def = svc_def.split() port, proto = svc_def.pop(1).split('/') port = int(port) for name in svc_def: svc_res = res.get(name, {}) pp_res = svc_res.get(port, False) if not pp_res: svc = { 'port': port, 'proto': proto, } if comment: svc['desc'] = comment svc_res[port] = svc else: curr_proto = pp_res['proto'] if isinstance(curr_proto, (list, tuple)): curr_proto.append(proto) else: pp_res['proto'] = [curr_proto, proto] curr_desc = pp_res.get('desc', False) if comment: if not curr_desc: pp_res['desc'] = comment elif comment != curr_desc: pp_res['desc'] = '{0}, {1}'.format(curr_desc, comment) res[name] = svc_res for svc, data in res.items(): if len(data) == 1: res[svc] = data.values().pop() continue else: res[svc] = list(data.values()) return res
saltstack/salt
salt/returners/__init__.py
get_returner_options
python
def get_returner_options(virtualname=None, ret=None, attrs=None, **kwargs): ''' Get the returner options from salt. :param str virtualname: The returner virtualname (as returned by __virtual__() :param ret: result of the module that ran. dict-like object May contain a `ret_config` key pointing to a string If a `ret_config` is specified, config options are read from:: value.virtualname.option If not, config options are read from:: value.virtualname.option :param attrs: options the returner wants to read :param __opts__: Optional dict-like object that contains a fallback config in case the param `__salt__` is not supplied. Defaults to empty dict. :param __salt__: Optional dict-like object that exposes the salt API. Defaults to empty dict. a) if __salt__ contains a 'config.option' configuration options, we infer the returner is being called from a state or module run -> config is a copy of the `config.option` function b) if __salt__ was not available, we infer that the returner is being called from the Salt scheduler, so we look for the configuration options in the param `__opts__` -> cfg is a copy for the __opts__ dictionary :param str profile_attr: Optional. If supplied, an overriding config profile is read from the corresponding key of `__salt__`. :param dict profile_attrs: Optional .. fixme:: only keys are read For each key in profile_attr, a value is read in the are used to fetch a value pointed by 'virtualname.%key' in the dict found thanks to the param `profile_attr` ''' ret_config = _fetch_ret_config(ret) attrs = attrs or {} profile_attr = kwargs.get('profile_attr', None) profile_attrs = kwargs.get('profile_attrs', None) defaults = kwargs.get('defaults', None) __salt__ = kwargs.get('__salt__', {}) __opts__ = kwargs.get('__opts__', {}) # select the config source cfg = __salt__.get('config.option', __opts__) # browse the config for relevant options, store them in a dict _options = dict( _options_browser( cfg, ret_config, defaults, virtualname, attrs, ) ) # override some values with relevant profile options _options.update( _fetch_profile_opts( cfg, virtualname, __salt__, _options, profile_attr, profile_attrs ) ) # override some values with relevant options from # keyword arguments passed via return_kwargs if ret and 'ret_kwargs' in ret: _options.update(ret['ret_kwargs']) return _options
Get the returner options from salt. :param str virtualname: The returner virtualname (as returned by __virtual__() :param ret: result of the module that ran. dict-like object May contain a `ret_config` key pointing to a string If a `ret_config` is specified, config options are read from:: value.virtualname.option If not, config options are read from:: value.virtualname.option :param attrs: options the returner wants to read :param __opts__: Optional dict-like object that contains a fallback config in case the param `__salt__` is not supplied. Defaults to empty dict. :param __salt__: Optional dict-like object that exposes the salt API. Defaults to empty dict. a) if __salt__ contains a 'config.option' configuration options, we infer the returner is being called from a state or module run -> config is a copy of the `config.option` function b) if __salt__ was not available, we infer that the returner is being called from the Salt scheduler, so we look for the configuration options in the param `__opts__` -> cfg is a copy for the __opts__ dictionary :param str profile_attr: Optional. If supplied, an overriding config profile is read from the corresponding key of `__salt__`. :param dict profile_attrs: Optional .. fixme:: only keys are read For each key in profile_attr, a value is read in the are used to fetch a value pointed by 'virtualname.%key' in the dict found thanks to the param `profile_attr`
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/returners/__init__.py#L16-L107
[ "def _fetch_ret_config(ret):\n \"\"\"\n Fetches 'ret_config' if available.\n\n @see :func:`get_returner_options`\n \"\"\"\n if not ret:\n return None\n if 'ret_config' not in ret:\n return ''\n return six.text_type(ret['ret_config'])\n", "def _options_browser(cfg, ret_config, defaults, virtualname, options):\n \"\"\"\n Iterator generating all duples ```option name -> value```\n\n @see :func:`get_returner_options`\n \"\"\"\n\n for option in options:\n\n # default place for the option in the config\n value = _fetch_option(cfg, ret_config, virtualname, options[option])\n\n if value:\n yield option, value\n continue\n\n # Attribute not found, check for a default value\n if defaults:\n if option in defaults:\n log.info('Using default for %s %s', virtualname, option)\n yield option, defaults[option]\n continue\n\n # fallback (implicit else for all ifs)\n continue\n", "def _fetch_profile_opts(\n cfg, virtualname,\n __salt__,\n _options,\n profile_attr,\n profile_attrs\n ):\n \"\"\"\n Fetches profile specific options if applicable\n\n @see :func:`get_returner_options`\n\n :return: a options dict\n \"\"\"\n\n if (not profile_attr) or (profile_attr not in _options):\n return {}\n\n # Using a profile and it is in _options\n\n creds = {}\n profile = _options[profile_attr]\n if profile:\n log.info('Using profile %s', profile)\n\n if 'config.option' in __salt__:\n creds = cfg(profile)\n else:\n creds = cfg.get(profile)\n\n if not creds:\n return {}\n\n return dict(\n (\n pattr,\n creds.get('{0}.{1}'.format(virtualname, profile_attrs[pattr]))\n )\n for pattr in profile_attrs\n )\n" ]
# -*- coding: utf-8 -*- ''' Returners Directory :func:`get_returner_options` is a general purpose function that returners may use to fetch their configuration options. ''' from __future__ import absolute_import, print_function, unicode_literals import logging from salt.ext import six log = logging.getLogger(__name__) def _fetch_ret_config(ret): """ Fetches 'ret_config' if available. @see :func:`get_returner_options` """ if not ret: return None if 'ret_config' not in ret: return '' return six.text_type(ret['ret_config']) def _fetch_option(cfg, ret_config, virtualname, attr_name): """ Fetch a given option value from the config. @see :func:`get_returner_options` """ # c_cfg is a dictionary returned from config.option for # any options configured for this returner. if isinstance(cfg, dict): c_cfg = cfg else: c_cfg = cfg('{0}'.format(virtualname), {}) default_cfg_key = '{0}.{1}'.format(virtualname, attr_name) if not ret_config: # Using the default configuration key if isinstance(cfg, dict): if default_cfg_key in cfg: return cfg[default_cfg_key] else: return c_cfg.get(attr_name) else: return c_cfg.get(attr_name, cfg(default_cfg_key)) # Using ret_config to override the default configuration key ret_cfg = cfg('{0}.{1}'.format(ret_config, virtualname), {}) override_default_cfg_key = '{0}.{1}.{2}'.format( ret_config, virtualname, attr_name, ) override_cfg_default = cfg(override_default_cfg_key) # Look for the configuration item in the override location ret_override_cfg = ret_cfg.get( attr_name, override_cfg_default ) if ret_override_cfg: return ret_override_cfg # if not configuration item found, fall back to the default location. return c_cfg.get(attr_name, cfg(default_cfg_key)) def _options_browser(cfg, ret_config, defaults, virtualname, options): """ Iterator generating all duples ```option name -> value``` @see :func:`get_returner_options` """ for option in options: # default place for the option in the config value = _fetch_option(cfg, ret_config, virtualname, options[option]) if value: yield option, value continue # Attribute not found, check for a default value if defaults: if option in defaults: log.info('Using default for %s %s', virtualname, option) yield option, defaults[option] continue # fallback (implicit else for all ifs) continue def _fetch_profile_opts( cfg, virtualname, __salt__, _options, profile_attr, profile_attrs ): """ Fetches profile specific options if applicable @see :func:`get_returner_options` :return: a options dict """ if (not profile_attr) or (profile_attr not in _options): return {} # Using a profile and it is in _options creds = {} profile = _options[profile_attr] if profile: log.info('Using profile %s', profile) if 'config.option' in __salt__: creds = cfg(profile) else: creds = cfg.get(profile) if not creds: return {} return dict( ( pattr, creds.get('{0}.{1}'.format(virtualname, profile_attrs[pattr])) ) for pattr in profile_attrs )
saltstack/salt
salt/returners/__init__.py
_fetch_option
python
def _fetch_option(cfg, ret_config, virtualname, attr_name): # c_cfg is a dictionary returned from config.option for # any options configured for this returner. if isinstance(cfg, dict): c_cfg = cfg else: c_cfg = cfg('{0}'.format(virtualname), {}) default_cfg_key = '{0}.{1}'.format(virtualname, attr_name) if not ret_config: # Using the default configuration key if isinstance(cfg, dict): if default_cfg_key in cfg: return cfg[default_cfg_key] else: return c_cfg.get(attr_name) else: return c_cfg.get(attr_name, cfg(default_cfg_key)) # Using ret_config to override the default configuration key ret_cfg = cfg('{0}.{1}'.format(ret_config, virtualname), {}) override_default_cfg_key = '{0}.{1}.{2}'.format( ret_config, virtualname, attr_name, ) override_cfg_default = cfg(override_default_cfg_key) # Look for the configuration item in the override location ret_override_cfg = ret_cfg.get( attr_name, override_cfg_default ) if ret_override_cfg: return ret_override_cfg # if not configuration item found, fall back to the default location. return c_cfg.get(attr_name, cfg(default_cfg_key))
Fetch a given option value from the config. @see :func:`get_returner_options`
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/returners/__init__.py#L123-L166
null
# -*- coding: utf-8 -*- ''' Returners Directory :func:`get_returner_options` is a general purpose function that returners may use to fetch their configuration options. ''' from __future__ import absolute_import, print_function, unicode_literals import logging from salt.ext import six log = logging.getLogger(__name__) def get_returner_options(virtualname=None, ret=None, attrs=None, **kwargs): ''' Get the returner options from salt. :param str virtualname: The returner virtualname (as returned by __virtual__() :param ret: result of the module that ran. dict-like object May contain a `ret_config` key pointing to a string If a `ret_config` is specified, config options are read from:: value.virtualname.option If not, config options are read from:: value.virtualname.option :param attrs: options the returner wants to read :param __opts__: Optional dict-like object that contains a fallback config in case the param `__salt__` is not supplied. Defaults to empty dict. :param __salt__: Optional dict-like object that exposes the salt API. Defaults to empty dict. a) if __salt__ contains a 'config.option' configuration options, we infer the returner is being called from a state or module run -> config is a copy of the `config.option` function b) if __salt__ was not available, we infer that the returner is being called from the Salt scheduler, so we look for the configuration options in the param `__opts__` -> cfg is a copy for the __opts__ dictionary :param str profile_attr: Optional. If supplied, an overriding config profile is read from the corresponding key of `__salt__`. :param dict profile_attrs: Optional .. fixme:: only keys are read For each key in profile_attr, a value is read in the are used to fetch a value pointed by 'virtualname.%key' in the dict found thanks to the param `profile_attr` ''' ret_config = _fetch_ret_config(ret) attrs = attrs or {} profile_attr = kwargs.get('profile_attr', None) profile_attrs = kwargs.get('profile_attrs', None) defaults = kwargs.get('defaults', None) __salt__ = kwargs.get('__salt__', {}) __opts__ = kwargs.get('__opts__', {}) # select the config source cfg = __salt__.get('config.option', __opts__) # browse the config for relevant options, store them in a dict _options = dict( _options_browser( cfg, ret_config, defaults, virtualname, attrs, ) ) # override some values with relevant profile options _options.update( _fetch_profile_opts( cfg, virtualname, __salt__, _options, profile_attr, profile_attrs ) ) # override some values with relevant options from # keyword arguments passed via return_kwargs if ret and 'ret_kwargs' in ret: _options.update(ret['ret_kwargs']) return _options def _fetch_ret_config(ret): """ Fetches 'ret_config' if available. @see :func:`get_returner_options` """ if not ret: return None if 'ret_config' not in ret: return '' return six.text_type(ret['ret_config']) def _options_browser(cfg, ret_config, defaults, virtualname, options): """ Iterator generating all duples ```option name -> value``` @see :func:`get_returner_options` """ for option in options: # default place for the option in the config value = _fetch_option(cfg, ret_config, virtualname, options[option]) if value: yield option, value continue # Attribute not found, check for a default value if defaults: if option in defaults: log.info('Using default for %s %s', virtualname, option) yield option, defaults[option] continue # fallback (implicit else for all ifs) continue def _fetch_profile_opts( cfg, virtualname, __salt__, _options, profile_attr, profile_attrs ): """ Fetches profile specific options if applicable @see :func:`get_returner_options` :return: a options dict """ if (not profile_attr) or (profile_attr not in _options): return {} # Using a profile and it is in _options creds = {} profile = _options[profile_attr] if profile: log.info('Using profile %s', profile) if 'config.option' in __salt__: creds = cfg(profile) else: creds = cfg.get(profile) if not creds: return {} return dict( ( pattr, creds.get('{0}.{1}'.format(virtualname, profile_attrs[pattr])) ) for pattr in profile_attrs )
saltstack/salt
salt/returners/__init__.py
_options_browser
python
def _options_browser(cfg, ret_config, defaults, virtualname, options): for option in options: # default place for the option in the config value = _fetch_option(cfg, ret_config, virtualname, options[option]) if value: yield option, value continue # Attribute not found, check for a default value if defaults: if option in defaults: log.info('Using default for %s %s', virtualname, option) yield option, defaults[option] continue # fallback (implicit else for all ifs) continue
Iterator generating all duples ```option name -> value``` @see :func:`get_returner_options`
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/returners/__init__.py#L169-L193
[ "def _fetch_option(cfg, ret_config, virtualname, attr_name):\n \"\"\"\n Fetch a given option value from the config.\n\n @see :func:`get_returner_options`\n \"\"\"\n # c_cfg is a dictionary returned from config.option for\n # any options configured for this returner.\n if isinstance(cfg, dict):\n c_cfg = cfg\n else:\n c_cfg = cfg('{0}'.format(virtualname), {})\n\n default_cfg_key = '{0}.{1}'.format(virtualname, attr_name)\n if not ret_config:\n # Using the default configuration key\n if isinstance(cfg, dict):\n if default_cfg_key in cfg:\n return cfg[default_cfg_key]\n else:\n return c_cfg.get(attr_name)\n else:\n return c_cfg.get(attr_name, cfg(default_cfg_key))\n\n # Using ret_config to override the default configuration key\n ret_cfg = cfg('{0}.{1}'.format(ret_config, virtualname), {})\n\n override_default_cfg_key = '{0}.{1}.{2}'.format(\n ret_config,\n virtualname,\n attr_name,\n )\n override_cfg_default = cfg(override_default_cfg_key)\n\n # Look for the configuration item in the override location\n ret_override_cfg = ret_cfg.get(\n attr_name,\n override_cfg_default\n )\n if ret_override_cfg:\n return ret_override_cfg\n\n # if not configuration item found, fall back to the default location.\n return c_cfg.get(attr_name, cfg(default_cfg_key))\n" ]
# -*- coding: utf-8 -*- ''' Returners Directory :func:`get_returner_options` is a general purpose function that returners may use to fetch their configuration options. ''' from __future__ import absolute_import, print_function, unicode_literals import logging from salt.ext import six log = logging.getLogger(__name__) def get_returner_options(virtualname=None, ret=None, attrs=None, **kwargs): ''' Get the returner options from salt. :param str virtualname: The returner virtualname (as returned by __virtual__() :param ret: result of the module that ran. dict-like object May contain a `ret_config` key pointing to a string If a `ret_config` is specified, config options are read from:: value.virtualname.option If not, config options are read from:: value.virtualname.option :param attrs: options the returner wants to read :param __opts__: Optional dict-like object that contains a fallback config in case the param `__salt__` is not supplied. Defaults to empty dict. :param __salt__: Optional dict-like object that exposes the salt API. Defaults to empty dict. a) if __salt__ contains a 'config.option' configuration options, we infer the returner is being called from a state or module run -> config is a copy of the `config.option` function b) if __salt__ was not available, we infer that the returner is being called from the Salt scheduler, so we look for the configuration options in the param `__opts__` -> cfg is a copy for the __opts__ dictionary :param str profile_attr: Optional. If supplied, an overriding config profile is read from the corresponding key of `__salt__`. :param dict profile_attrs: Optional .. fixme:: only keys are read For each key in profile_attr, a value is read in the are used to fetch a value pointed by 'virtualname.%key' in the dict found thanks to the param `profile_attr` ''' ret_config = _fetch_ret_config(ret) attrs = attrs or {} profile_attr = kwargs.get('profile_attr', None) profile_attrs = kwargs.get('profile_attrs', None) defaults = kwargs.get('defaults', None) __salt__ = kwargs.get('__salt__', {}) __opts__ = kwargs.get('__opts__', {}) # select the config source cfg = __salt__.get('config.option', __opts__) # browse the config for relevant options, store them in a dict _options = dict( _options_browser( cfg, ret_config, defaults, virtualname, attrs, ) ) # override some values with relevant profile options _options.update( _fetch_profile_opts( cfg, virtualname, __salt__, _options, profile_attr, profile_attrs ) ) # override some values with relevant options from # keyword arguments passed via return_kwargs if ret and 'ret_kwargs' in ret: _options.update(ret['ret_kwargs']) return _options def _fetch_ret_config(ret): """ Fetches 'ret_config' if available. @see :func:`get_returner_options` """ if not ret: return None if 'ret_config' not in ret: return '' return six.text_type(ret['ret_config']) def _fetch_option(cfg, ret_config, virtualname, attr_name): """ Fetch a given option value from the config. @see :func:`get_returner_options` """ # c_cfg is a dictionary returned from config.option for # any options configured for this returner. if isinstance(cfg, dict): c_cfg = cfg else: c_cfg = cfg('{0}'.format(virtualname), {}) default_cfg_key = '{0}.{1}'.format(virtualname, attr_name) if not ret_config: # Using the default configuration key if isinstance(cfg, dict): if default_cfg_key in cfg: return cfg[default_cfg_key] else: return c_cfg.get(attr_name) else: return c_cfg.get(attr_name, cfg(default_cfg_key)) # Using ret_config to override the default configuration key ret_cfg = cfg('{0}.{1}'.format(ret_config, virtualname), {}) override_default_cfg_key = '{0}.{1}.{2}'.format( ret_config, virtualname, attr_name, ) override_cfg_default = cfg(override_default_cfg_key) # Look for the configuration item in the override location ret_override_cfg = ret_cfg.get( attr_name, override_cfg_default ) if ret_override_cfg: return ret_override_cfg # if not configuration item found, fall back to the default location. return c_cfg.get(attr_name, cfg(default_cfg_key)) def _fetch_profile_opts( cfg, virtualname, __salt__, _options, profile_attr, profile_attrs ): """ Fetches profile specific options if applicable @see :func:`get_returner_options` :return: a options dict """ if (not profile_attr) or (profile_attr not in _options): return {} # Using a profile and it is in _options creds = {} profile = _options[profile_attr] if profile: log.info('Using profile %s', profile) if 'config.option' in __salt__: creds = cfg(profile) else: creds = cfg.get(profile) if not creds: return {} return dict( ( pattr, creds.get('{0}.{1}'.format(virtualname, profile_attrs[pattr])) ) for pattr in profile_attrs )
saltstack/salt
salt/returners/__init__.py
_fetch_profile_opts
python
def _fetch_profile_opts( cfg, virtualname, __salt__, _options, profile_attr, profile_attrs ): if (not profile_attr) or (profile_attr not in _options): return {} # Using a profile and it is in _options creds = {} profile = _options[profile_attr] if profile: log.info('Using profile %s', profile) if 'config.option' in __salt__: creds = cfg(profile) else: creds = cfg.get(profile) if not creds: return {} return dict( ( pattr, creds.get('{0}.{1}'.format(virtualname, profile_attrs[pattr])) ) for pattr in profile_attrs )
Fetches profile specific options if applicable @see :func:`get_returner_options` :return: a options dict
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/returners/__init__.py#L196-L235
null
# -*- coding: utf-8 -*- ''' Returners Directory :func:`get_returner_options` is a general purpose function that returners may use to fetch their configuration options. ''' from __future__ import absolute_import, print_function, unicode_literals import logging from salt.ext import six log = logging.getLogger(__name__) def get_returner_options(virtualname=None, ret=None, attrs=None, **kwargs): ''' Get the returner options from salt. :param str virtualname: The returner virtualname (as returned by __virtual__() :param ret: result of the module that ran. dict-like object May contain a `ret_config` key pointing to a string If a `ret_config` is specified, config options are read from:: value.virtualname.option If not, config options are read from:: value.virtualname.option :param attrs: options the returner wants to read :param __opts__: Optional dict-like object that contains a fallback config in case the param `__salt__` is not supplied. Defaults to empty dict. :param __salt__: Optional dict-like object that exposes the salt API. Defaults to empty dict. a) if __salt__ contains a 'config.option' configuration options, we infer the returner is being called from a state or module run -> config is a copy of the `config.option` function b) if __salt__ was not available, we infer that the returner is being called from the Salt scheduler, so we look for the configuration options in the param `__opts__` -> cfg is a copy for the __opts__ dictionary :param str profile_attr: Optional. If supplied, an overriding config profile is read from the corresponding key of `__salt__`. :param dict profile_attrs: Optional .. fixme:: only keys are read For each key in profile_attr, a value is read in the are used to fetch a value pointed by 'virtualname.%key' in the dict found thanks to the param `profile_attr` ''' ret_config = _fetch_ret_config(ret) attrs = attrs or {} profile_attr = kwargs.get('profile_attr', None) profile_attrs = kwargs.get('profile_attrs', None) defaults = kwargs.get('defaults', None) __salt__ = kwargs.get('__salt__', {}) __opts__ = kwargs.get('__opts__', {}) # select the config source cfg = __salt__.get('config.option', __opts__) # browse the config for relevant options, store them in a dict _options = dict( _options_browser( cfg, ret_config, defaults, virtualname, attrs, ) ) # override some values with relevant profile options _options.update( _fetch_profile_opts( cfg, virtualname, __salt__, _options, profile_attr, profile_attrs ) ) # override some values with relevant options from # keyword arguments passed via return_kwargs if ret and 'ret_kwargs' in ret: _options.update(ret['ret_kwargs']) return _options def _fetch_ret_config(ret): """ Fetches 'ret_config' if available. @see :func:`get_returner_options` """ if not ret: return None if 'ret_config' not in ret: return '' return six.text_type(ret['ret_config']) def _fetch_option(cfg, ret_config, virtualname, attr_name): """ Fetch a given option value from the config. @see :func:`get_returner_options` """ # c_cfg is a dictionary returned from config.option for # any options configured for this returner. if isinstance(cfg, dict): c_cfg = cfg else: c_cfg = cfg('{0}'.format(virtualname), {}) default_cfg_key = '{0}.{1}'.format(virtualname, attr_name) if not ret_config: # Using the default configuration key if isinstance(cfg, dict): if default_cfg_key in cfg: return cfg[default_cfg_key] else: return c_cfg.get(attr_name) else: return c_cfg.get(attr_name, cfg(default_cfg_key)) # Using ret_config to override the default configuration key ret_cfg = cfg('{0}.{1}'.format(ret_config, virtualname), {}) override_default_cfg_key = '{0}.{1}.{2}'.format( ret_config, virtualname, attr_name, ) override_cfg_default = cfg(override_default_cfg_key) # Look for the configuration item in the override location ret_override_cfg = ret_cfg.get( attr_name, override_cfg_default ) if ret_override_cfg: return ret_override_cfg # if not configuration item found, fall back to the default location. return c_cfg.get(attr_name, cfg(default_cfg_key)) def _options_browser(cfg, ret_config, defaults, virtualname, options): """ Iterator generating all duples ```option name -> value``` @see :func:`get_returner_options` """ for option in options: # default place for the option in the config value = _fetch_option(cfg, ret_config, virtualname, options[option]) if value: yield option, value continue # Attribute not found, check for a default value if defaults: if option in defaults: log.info('Using default for %s %s', virtualname, option) yield option, defaults[option] continue # fallback (implicit else for all ifs) continue
saltstack/salt
salt/states/rabbitmq_plugin.py
enabled
python
def enabled(name, runas=None): ''' Ensure the RabbitMQ plugin is enabled. name The name of the plugin runas The user to run the rabbitmq-plugin command as ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} try: plugin_enabled = __salt__['rabbitmq.plugin_is_enabled'](name, runas=runas) except CommandExecutionError as err: ret['result'] = False ret['comment'] = 'Error: {0}'.format(err) return ret if plugin_enabled: ret['comment'] = 'Plugin \'{0}\' is already enabled.'.format(name) return ret if not __opts__['test']: try: __salt__['rabbitmq.enable_plugin'](name, runas=runas) except CommandExecutionError as err: ret['result'] = False ret['comment'] = 'Error: {0}'.format(err) return ret ret['changes'].update({'old': '', 'new': name}) if __opts__['test'] and ret['changes']: ret['result'] = None ret['comment'] = 'Plugin \'{0}\' is set to be enabled.'.format(name) return ret ret['comment'] = 'Plugin \'{0}\' was enabled.'.format(name) return ret
Ensure the RabbitMQ plugin is enabled. name The name of the plugin runas The user to run the rabbitmq-plugin command as
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/rabbitmq_plugin.py#L35-L73
null
# -*- coding: utf-8 -*- ''' Manage RabbitMQ Plugins ======================= .. versionadded:: 2014.1.0 Example: .. code-block:: yaml some_plugin: rabbitmq_plugin.enabled: [] ''' # Import Python Libs from __future__ import absolute_import, unicode_literals, print_function import logging # Import Salt Libs from salt.exceptions import CommandExecutionError log = logging.getLogger(__name__) def __virtual__(): ''' Only load if RabbitMQ is installed. ''' if __salt__['cmd.has_exec']('rabbitmqctl'): return True return False def disabled(name, runas=None): ''' Ensure the RabbitMQ plugin is disabled. name The name of the plugin runas The user to run the rabbitmq-plugin command as ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} try: plugin_enabled = __salt__['rabbitmq.plugin_is_enabled'](name, runas=runas) except CommandExecutionError as err: ret['result'] = False ret['comment'] = 'Error: {0}'.format(err) return ret if not plugin_enabled: ret['comment'] = 'Plugin \'{0}\' is already disabled.'.format(name) return ret if not __opts__['test']: try: __salt__['rabbitmq.disable_plugin'](name, runas=runas) except CommandExecutionError as err: ret['result'] = False ret['comment'] = 'Error: {0}'.format(err) return ret ret['changes'].update({'old': name, 'new': ''}) if __opts__['test'] and ret['changes']: ret['result'] = None ret['comment'] = 'Plugin \'{0}\' is set to be disabled.'.format(name) return ret ret['comment'] = 'Plugin \'{0}\' was disabled.'.format(name) return ret
saltstack/salt
salt/states/git.py
_revs_equal
python
def _revs_equal(rev1, rev2, rev_type): ''' Shorthand helper function for comparing SHA1s. If rev_type == 'sha1' then the comparison will be done using str.startwith() to allow short SHA1s to compare successfully. NOTE: This means that rev2 must be the short rev. ''' if (rev1 is None and rev2 is not None) \ or (rev2 is None and rev1 is not None): return False elif rev1 is rev2 is None: return True elif rev_type == 'sha1': return rev1.startswith(rev2) else: return rev1 == rev2
Shorthand helper function for comparing SHA1s. If rev_type == 'sha1' then the comparison will be done using str.startwith() to allow short SHA1s to compare successfully. NOTE: This means that rev2 must be the short rev.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/git.py#L47-L63
null
# -*- coding: utf-8 -*- ''' States to manage git repositories and git configuration .. important:: Before using git over ssh, make sure your remote host fingerprint exists in your ``~/.ssh/known_hosts`` file. .. versionchanged:: 2015.8.8 This state module now requires git 1.6.5 (released 10 October 2009) or newer. ''' from __future__ import absolute_import, print_function, unicode_literals # Import python libs import copy import errno import logging import os import re import string # Import salt libs import salt.utils.args import salt.utils.files import salt.utils.url import salt.utils.versions from salt.exceptions import CommandExecutionError from salt.utils.versions import LooseVersion as _LooseVersion # Import 3rd-party libs from salt.ext import six log = logging.getLogger(__name__) def __virtual__(): ''' Only load if git is available ''' if 'git.version' not in __salt__: return False git_ver = _LooseVersion(__salt__['git.version'](versioninfo=False)) return git_ver >= _LooseVersion('1.6.5') def _short_sha(sha1): return sha1[:7] if sha1 is not None else None def _format_comments(comments): ''' Return a joined list ''' ret = '. '.join(comments) if len(comments) > 1: ret += '.' return ret def _need_branch_change(branch, local_branch): ''' Short hand for telling when a new branch is needed ''' return branch is not None and branch != local_branch def _get_branch_opts(branch, local_branch, all_local_branches, desired_upstream, git_ver=None): ''' DRY helper to build list of opts for git.branch, for the purposes of setting upstream tracking branch ''' if branch is not None and branch not in all_local_branches: # We won't be setting upstream because the act of checking out a new # branch will set upstream for us return None if git_ver is None: git_ver = _LooseVersion(__salt__['git.version'](versioninfo=False)) ret = [] if git_ver >= _LooseVersion('1.8.0'): ret.extend(['--set-upstream-to', desired_upstream]) else: ret.append('--set-upstream') # --set-upstream does not assume the current branch, so we have to # tell it which branch we'll be using ret.append(local_branch if branch is None else branch) ret.append(desired_upstream) return ret def _get_local_rev_and_branch(target, user, password, output_encoding=None): ''' Return the local revision for before/after comparisons ''' log.info('Checking local revision for %s', target) try: local_rev = __salt__['git.revision']( target, user=user, password=password, ignore_retcode=True, output_encoding=output_encoding) except CommandExecutionError: log.info('No local revision for %s', target) local_rev = None log.info('Checking local branch for %s', target) try: local_branch = __salt__['git.current_branch']( target, user=user, password=password, ignore_retcode=True, output_encoding=output_encoding) except CommandExecutionError: log.info('No local branch for %s', target) local_branch = None return local_rev, local_branch def _strip_exc(exc): ''' Strip the actual command that was run from exc.strerror to leave just the error message ''' return re.sub(r'^Command [\'"].+[\'"] failed: ', '', exc.strerror) def _uptodate(ret, target, comments=None, local_changes=False): ret['comment'] = 'Repository {0} is up-to-date'.format(target) if local_changes: ret['comment'] += ( ', but with uncommitted changes. Set \'force_reset\' to True to ' 'purge uncommitted changes.' ) if comments: # Shouldn't be making any changes if the repo was up to date, but # report on them so we are alerted to potential problems with our # logic. ret['comment'] += ( '\n\nChanges {0}made: {1}'.format( 'that would be ' if __opts__['test'] else '', _format_comments(comments) ) ) return ret def _neutral_test(ret, comment): ret['result'] = None ret['comment'] = comment return ret def _fail(ret, msg, comments=None): ret['result'] = False if comments: msg += '\n\nChanges already made: ' + _format_comments(comments) ret['comment'] = msg return ret def _already_cloned(ret, target, branch=None, comments=None): ret['result'] = True ret['comment'] = 'Repository already exists at {0}{1}'.format( target, ' and is checked out to branch \'{0}\''.format(branch) if branch else '' ) if comments: ret['comment'] += ( '\n\nChanges {0}made: {1}'.format( 'that would be ' if __opts__['test'] else '', _format_comments(comments) ) ) return ret def _failed_fetch(ret, exc, comments=None): msg = ( 'Fetch failed. Set \'force_fetch\' to True to force the fetch if the ' 'failure was due to not being able to fast-forward. Output of the fetch ' 'command follows:\n\n{0}'.format(_strip_exc(exc)) ) return _fail(ret, msg, comments) def _failed_submodule_update(ret, exc, comments=None): msg = 'Failed to update submodules: ' + _strip_exc(exc) return _fail(ret, msg, comments) def _not_fast_forward(ret, rev, pre, post, branch, local_branch, default_branch, local_changes, comments): branch_msg = '' if branch is None: if rev != 'HEAD': if local_branch != rev: branch_msg = ( ' The desired rev ({0}) differs from the name of the ' 'local branch ({1}), if the desired rev is a branch name ' 'then a forced update could possibly be avoided by ' 'setting the \'branch\' argument to \'{0}\' instead.' .format(rev, local_branch) ) else: if default_branch is not None and local_branch != default_branch: branch_msg = ( ' The default remote branch ({0}) differs from the ' 'local branch ({1}). This could be caused by changing the ' 'default remote branch, or if the local branch was ' 'manually changed. Rather than forcing an update, it ' 'may be advisable to set the \'branch\' argument to ' '\'{0}\' instead. To ensure that this state follows the ' '\'{0}\' branch instead of the remote HEAD, set the ' '\'rev\' argument to \'{0}\'.' .format(default_branch, local_branch) ) pre = _short_sha(pre) post = _short_sha(post) return _fail( ret, 'Repository would be updated {0}{1}, but {2}. Set \'force_reset\' to ' 'True{3} to force this update{4}.{5}'.format( 'from {0} to {1}'.format(pre, post) if local_changes and pre != post else 'to {0}'.format(post), ' (after checking out local branch \'{0}\')'.format(branch) if _need_branch_change(branch, local_branch) else '', 'this is not a fast-forward merge' if not local_changes else 'there are uncommitted changes', ' (or \'remote-changes\')' if local_changes else '', ' and discard these changes' if local_changes else '', branch_msg, ), comments ) def latest(name, rev='HEAD', target=None, branch=None, user=None, password=None, update_head=True, force_checkout=False, force_clone=False, force_fetch=False, force_reset=False, submodules=False, bare=False, mirror=False, remote='origin', fetch_tags=True, sync_tags=True, depth=None, identity=None, https_user=None, https_pass=None, onlyif=None, unless=None, refspec_branch='*', refspec_tag='*', output_encoding=None, **kwargs): ''' Make sure the repository is cloned to the given directory and is up-to-date. name Address of the remote repository, as passed to ``git clone`` .. note:: From the `Git documentation`_, there are two URL formats supported for SSH authentication. The below two examples are equivalent: .. code-block:: text # ssh:// URL ssh://user@server/project.git # SCP-like syntax user@server:project.git A common mistake is to use an ``ssh://`` URL, but with a colon after the domain instead of a slash. This is invalid syntax in Git, and will therefore not work in Salt. When in doubt, confirm that a ``git clone`` works for the URL before using it in Salt. It has been reported by some users that SCP-like syntax is incompatible with git repos hosted on `Atlassian Stash/BitBucket Server`_. In these cases, it may be necessary to use ``ssh://`` URLs for SSH authentication. .. _`Git documentation`: https://git-scm.com/book/en/v2/Git-on-the-Server-The-Protocols#The-SSH-Protocol .. _`Atlassian Stash/BitBucket Server`: https://www.atlassian.com/software/bitbucket/server rev : HEAD The remote branch, tag, or revision ID to checkout after clone / before update. If specified, then Salt will also ensure that the tracking branch is set to ``<remote>/<rev>``, unless ``rev`` refers to a tag or SHA1, in which case Salt will ensure that the tracking branch is unset. If ``rev`` is not specified, it will be assumed to be ``HEAD``, and Salt will not manage the tracking branch at all. .. versionchanged:: 2015.8.0 If not specified, ``rev`` now defaults to the remote repository's HEAD. target Name of the target directory where repository is about to be cloned branch Name of the local branch into which to checkout the specified rev. If not specified, then Salt will not care what branch is being used locally and will just use whatever branch is currently there. .. versionadded:: 2015.8.0 .. note:: If this argument is not specified, this means that Salt will not change the local branch if the repository is reset to another branch/tag/SHA1. For example, assume that the following state was run initially: .. code-block:: yaml foo_app: git.latest: - name: https://mydomain.tld/apps/foo.git - target: /var/www/foo - user: www This would have cloned the HEAD of that repo (since a ``rev`` wasn't specified), and because ``branch`` is not specified, the branch in the local clone at ``/var/www/foo`` would be whatever the default branch is on the remote repository (usually ``master``, but not always). Now, assume that it becomes necessary to switch this checkout to the ``dev`` branch. This would require ``rev`` to be set, and probably would also require ``force_reset`` to be enabled: .. code-block:: yaml foo_app: git.latest: - name: https://mydomain.tld/apps/foo.git - target: /var/www/foo - user: www - rev: dev - force_reset: True The result of this state would be to perform a hard-reset to ``origin/dev``. Since ``branch`` was not specified though, while ``/var/www/foo`` would reflect the contents of the remote repo's ``dev`` branch, the local branch would still remain whatever it was when it was cloned. To make the local branch match the remote one, set ``branch`` as well, like so: .. code-block:: yaml foo_app: git.latest: - name: https://mydomain.tld/apps/foo.git - target: /var/www/foo - user: www - rev: dev - branch: dev - force_reset: True This may seem redundant, but Salt tries to support a wide variety of use cases, and doing it this way allows for the use case where the local branch doesn't need to be strictly managed. user Local system user under which to run git commands. By default, commands are run by the user under which the minion is running. .. note:: This is not to be confused with the username for http(s)/SSH authentication. .. versionadded:: 0.17.0 password Windows only. Required when specifying ``user``. This parameter will be ignored on non-Windows platforms. .. versionadded:: 2016.3.4 update_head : True If set to ``False``, then the remote repository will be fetched (if necessary) to ensure that the commit to which ``rev`` points exists in the local checkout, but no changes will be made to the local HEAD. .. versionadded:: 2015.8.3 force_checkout : False When checking out the local branch, the state will fail if there are unwritten changes. Set this argument to ``True`` to discard unwritten changes when checking out. force_clone : False If the ``target`` directory exists and is not a git repository, then this state will fail. Set this argument to ``True`` to remove the contents of the target directory and clone the repo into it. force_fetch : False If a fetch needs to be performed, non-fast-forward fetches will cause this state to fail. Set this argument to ``True`` to force the fetch even if it is a non-fast-forward update. .. versionadded:: 2015.8.0 force_reset : False If the update is not a fast-forward, this state will fail. Set this argument to ``True`` to force a hard-reset to the remote revision in these cases. .. versionchanged:: 2019.2.0 This option can now be set to ``remote-changes``, which will instruct Salt not to discard local changes if the repo is up-to-date with the remote repository. submodules : False Update submodules on clone or branch change bare : False Set to ``True`` if the repository is to be a bare clone of the remote repository. .. note: Setting this option to ``True`` is incompatible with the ``rev`` argument. mirror Set to ``True`` if the repository is to be a mirror of the remote repository. This implies that ``bare`` set to ``True``, and thus is incompatible with ``rev``. remote : origin Git remote to use. If this state needs to clone the repo, it will clone it using this value as the initial remote name. If the repository already exists, and a remote by this name is not present, one will be added. fetch_tags : True If ``True``, then when a fetch is performed all tags will be fetched, even those which are not reachable by any branch on the remote. sync_tags : True If ``True``, then Salt will delete tags which exist in the local clone but are not found on the remote repository. .. versionadded:: 2018.3.4 depth Defines depth in history when git a clone is needed in order to ensure latest. E.g. ``depth: 1`` is useful when deploying from a repository with a long history. Use rev to specify branch or tag. This is not compatible with revision IDs. .. versionchanged:: 2019.2.0 This option now supports tags as well as branches, on Git 1.8.0 and newer. identity Path to a private key to use for ssh URLs. This can be either a single string, or a list of strings. For example: .. code-block:: yaml # Single key git@github.com:user/repo.git: git.latest: - user: deployer - identity: /home/deployer/.ssh/id_rsa # Two keys git@github.com:user/repo.git: git.latest: - user: deployer - identity: - /home/deployer/.ssh/id_rsa - /home/deployer/.ssh/id_rsa_alternate If multiple keys are specified, they will be tried one-by-one in order for each git command which needs to authenticate. .. warning:: Unless Salt is invoked from the minion using ``salt-call``, the key(s) must be passphraseless. For greater security with passphraseless private keys, see the `sshd(8)`_ manpage for information on securing the keypair from the remote side in the ``authorized_keys`` file. .. _`sshd(8)`: http://www.man7.org/linux/man-pages/man8/sshd.8.html#AUTHORIZED_KEYS_FILE%20FORMAT .. versionchanged:: 2015.8.7 Salt will no longer attempt to use passphrase-protected keys unless invoked from the minion using ``salt-call``, to prevent blocking waiting for user input. .. versionchanged:: 2016.3.0 Key can now be specified as a SaltStack fileserver URL (e.g. ``salt://path/to/identity_file``). https_user HTTP Basic Auth username for HTTPS (only) clones .. versionadded:: 2015.5.0 https_pass HTTP Basic Auth password for HTTPS (only) clones .. versionadded:: 2015.5.0 onlyif A command to run as a check, run the named command only if the command passed to the ``onlyif`` option returns true unless A command to run as a check, only run the named command if the command passed to the ``unless`` option returns false refspec_branch : * A glob expression defining which branches to retrieve when fetching. See `git-fetch(1)`_ for more information on how refspecs work. .. versionadded:: 2017.7.0 refspec_tag : * A glob expression defining which tags to retrieve when fetching. See `git-fetch(1)`_ for more information on how refspecs work. .. versionadded:: 2017.7.0 output_encoding Use this option to specify which encoding to use to decode the output from any git commands which are run. This should not be needed in most cases. .. note:: This should only be needed if the files in the repository were created with filenames using an encoding other than UTF-8 to handle Unicode characters. .. versionadded:: 2018.3.1 .. _`git-fetch(1)`: http://git-scm.com/docs/git-fetch .. note:: Clashing ID declarations can be avoided when including different branches from the same git repository in the same SLS file by using the ``name`` argument. The example below checks out the ``gh-pages`` and ``gh-pages-prod`` branches from the same repository into separate directories. The example also sets up the ``ssh_known_hosts`` ssh key required to perform the git checkout. Also, it has been reported that the SCP-like syntax for .. code-block:: yaml gitlab.example.com: ssh_known_hosts: - present - user: root - enc: ecdsa - fingerprint: 4e:94:b0:54:c1:5b:29:a2:70:0e:e1:a3:51:ee:ee:e3 git-website-staging: git.latest: - name: git@gitlab.example.com:user/website.git - rev: gh-pages - target: /usr/share/nginx/staging - identity: /root/.ssh/website_id_rsa - require: - pkg: git - ssh_known_hosts: gitlab.example.com git-website-staging: git.latest: - name: git@gitlab.example.com:user/website.git - rev: gh-pages - target: /usr/share/nginx/staging - identity: salt://website/id_rsa - require: - pkg: git - ssh_known_hosts: gitlab.example.com git-website-prod: git.latest: - name: git@gitlab.example.com:user/website.git - rev: gh-pages-prod - target: /usr/share/nginx/prod - identity: /root/.ssh/website_id_rsa - require: - pkg: git - ssh_known_hosts: gitlab.example.com ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} kwargs = salt.utils.args.clean_kwargs(**kwargs) if kwargs: return _fail( ret, salt.utils.args.invalid_kwargs(kwargs, raise_exc=False) ) if not remote: return _fail(ret, '\'remote\' argument is required') if not target: return _fail(ret, '\'target\' argument is required') if not rev: return _fail( ret, '\'{0}\' is not a valid value for the \'rev\' argument'.format(rev) ) if force_reset not in (True, False, 'remote-changes'): return _fail( ret, '\'force_reset\' must be one of True, False, or \'remote-changes\'' ) # Ensure that certain arguments are strings to ensure that comparisons work if not isinstance(rev, six.string_types): rev = six.text_type(rev) if target is not None: if not isinstance(target, six.string_types): target = six.text_type(target) if not os.path.isabs(target): return _fail( ret, 'target \'{0}\' is not an absolute path'.format(target) ) if branch is not None and not isinstance(branch, six.string_types): branch = six.text_type(branch) if user is not None and not isinstance(user, six.string_types): user = six.text_type(user) if password is not None and not isinstance(password, six.string_types): password = six.text_type(password) if remote is not None and not isinstance(remote, six.string_types): remote = six.text_type(remote) if identity is not None: if isinstance(identity, six.string_types): identity = [identity] elif not isinstance(identity, list): return _fail(ret, 'identity must be either a list or a string') identity = [os.path.expanduser(x) for x in identity] for ident_path in identity: if 'salt://' in ident_path: try: ident_path = __salt__['cp.cache_file'](ident_path, __env__) except IOError as exc: log.exception('Failed to cache %s', ident_path) return _fail( ret, 'identity \'{0}\' does not exist.'.format( ident_path ) ) if not os.path.isabs(ident_path): return _fail( ret, 'identity \'{0}\' is not an absolute path'.format( ident_path ) ) if https_user is not None and not isinstance(https_user, six.string_types): https_user = six.text_type(https_user) if https_pass is not None and not isinstance(https_pass, six.string_types): https_pass = six.text_type(https_pass) # Check for lfs filter settings, and setup lfs_opts accordingly. These opts # will be passed where appropriate to ensure that these commands are # authenticated and that the git LFS plugin can download files. use_lfs = bool( __salt__['git.config_get_regexp']( r'filter\.lfs\.', **{'global': True})) lfs_opts = {'identity': identity} if use_lfs else {} if os.path.isfile(target): return _fail( ret, 'Target \'{0}\' exists and is a regular file, cannot proceed' .format(target) ) try: desired_fetch_url = salt.utils.url.add_http_basic_auth( name, https_user, https_pass, https_only=True ) except ValueError as exc: return _fail(ret, exc.__str__()) redacted_fetch_url = \ salt.utils.url.redact_http_basic_auth(desired_fetch_url) if mirror: bare = True # Check to make sure rev and mirror/bare are not both in use if rev != 'HEAD' and bare: return _fail(ret, ('\'rev\' is not compatible with the \'mirror\' and ' '\'bare\' arguments')) run_check_cmd_kwargs = {'runas': user, 'password': password} if 'shell' in __grains__: run_check_cmd_kwargs['shell'] = __grains__['shell'] # check if git.latest should be applied cret = mod_run_check( run_check_cmd_kwargs, onlyif, unless ) if isinstance(cret, dict): ret.update(cret) return ret refspecs = [ 'refs/heads/{0}:refs/remotes/{1}/{0}'.format(refspec_branch, remote), '+refs/tags/{0}:refs/tags/{0}'.format(refspec_tag) ] if fetch_tags else [] log.info('Checking remote revision for %s', name) try: all_remote_refs = __salt__['git.remote_refs']( name, heads=False, tags=False, user=user, password=password, identity=identity, https_user=https_user, https_pass=https_pass, ignore_retcode=False, saltenv=__env__, output_encoding=output_encoding) except CommandExecutionError as exc: return _fail( ret, 'Failed to check remote refs: {0}'.format(_strip_exc(exc)) ) except NameError as exc: if 'global name' in exc.message: raise CommandExecutionError( 'Failed to check remote refs: You may need to install ' 'GitPython or PyGit2') raise if 'HEAD' in all_remote_refs: head_rev = all_remote_refs['HEAD'] for refname, refsha in six.iteritems(all_remote_refs): if refname.startswith('refs/heads/'): if refsha == head_rev: default_branch = refname.partition('refs/heads/')[-1] break else: default_branch = None else: head_rev = None default_branch = None desired_upstream = False if bare: remote_rev = None remote_rev_type = None else: if rev == 'HEAD': if head_rev is not None: remote_rev = head_rev # Just go with whatever the upstream currently is desired_upstream = None remote_rev_type = 'sha1' else: # Empty remote repo remote_rev = None remote_rev_type = None elif 'refs/heads/' + rev in all_remote_refs: remote_rev = all_remote_refs['refs/heads/' + rev] desired_upstream = '/'.join((remote, rev)) remote_rev_type = 'branch' elif 'refs/tags/' + rev + '^{}' in all_remote_refs: # Annotated tag remote_rev = all_remote_refs['refs/tags/' + rev + '^{}'] remote_rev_type = 'tag' elif 'refs/tags/' + rev in all_remote_refs: # Non-annotated tag remote_rev = all_remote_refs['refs/tags/' + rev] remote_rev_type = 'tag' else: if len(rev) <= 40 \ and all(x in string.hexdigits for x in rev): # git ls-remote did not find the rev, and because it's a # hex string <= 40 chars we're going to assume that the # desired rev is a SHA1 rev = rev.lower() remote_rev = rev remote_rev_type = 'sha1' else: remote_rev = None remote_rev_type = None # For the comment field of the state return dict, the remote location # (and short-sha1, if rev is not a sha1) is referenced several times, # determine it once here and reuse the value below. if remote_rev_type == 'sha1': if rev == 'HEAD': remote_loc = 'remote HEAD (' + remote_rev[:7] + ')' else: remote_loc = remote_rev[:7] elif remote_rev is not None: remote_loc = '{0} ({1})'.format( desired_upstream if remote_rev_type == 'branch' else rev, remote_rev[:7] ) else: # Shouldn't happen but log a warning here for future # troubleshooting purposes in the event we find a corner case. log.warning( 'Unable to determine remote_loc. rev is %s, remote_rev is ' '%s, remove_rev_type is %s, desired_upstream is %s, and bare ' 'is%s set', rev, remote_rev, remote_rev_type, desired_upstream, ' not' if not bare else '' ) remote_loc = None if depth is not None and remote_rev_type not in ('branch', 'tag'): return _fail( ret, 'When \'depth\' is used, \'rev\' must be set to the name of a ' 'branch or tag on the remote repository' ) if remote_rev is None and not bare: if rev != 'HEAD': # A specific rev is desired, but that rev doesn't exist on the # remote repo. return _fail( ret, 'No revision matching \'{0}\' exists in the remote ' 'repository'.format(rev) ) git_ver = _LooseVersion(__salt__['git.version'](versioninfo=False)) check = 'refs' if bare else '.git' gitdir = os.path.join(target, check) comments = [] if os.path.isdir(gitdir) \ or __salt__['git.is_worktree']( target, user=user, password=password, output_encoding=output_encoding): # Target directory is a git repository or git worktree try: all_local_branches = __salt__['git.list_branches']( target, user=user, password=password, output_encoding=output_encoding) all_local_tags = set( __salt__['git.list_tags']( target, user=user, password=password, output_encoding=output_encoding ) ) local_rev, local_branch = _get_local_rev_and_branch( target, user, password, output_encoding) if not bare and remote_rev is None and local_rev is not None: return _fail( ret, 'Remote repository is empty, cannot update from a ' 'non-empty to an empty repository' ) # Base rev and branch are the ones from which any reset or merge # will take place. If the branch is not being specified, the base # will be the "local" rev and branch, i.e. those we began with # before this state was run. If a branch is being specified and it # both exists and is not the one with which we started, then we'll # be checking that branch out first, and it instead becomes our # base. The base branch and rev will be used below in comparisons # to determine what changes to make. base_rev = local_rev base_branch = local_branch if _need_branch_change(branch, local_branch): if branch not in all_local_branches: # We're checking out a new branch, so the base_rev and # remote_rev will be identical. base_rev = remote_rev else: base_branch = branch # Desired branch exists locally and is not the current # branch. We'll be performing a checkout to that branch # eventually, but before we do that we need to find the # current SHA1. try: base_rev = __salt__['git.rev_parse']( target, branch + '^{commit}', user=user, password=password, ignore_retcode=True, output_encoding=output_encoding) except CommandExecutionError as exc: return _fail( ret, 'Unable to get position of local branch \'{0}\': ' '{1}'.format(branch, _strip_exc(exc)), comments ) remotes = __salt__['git.remotes'](target, user=user, password=password, redact_auth=False, output_encoding=output_encoding) revs_match = _revs_equal(local_rev, remote_rev, remote_rev_type) try: # If not a bare repo, check `git diff HEAD` to determine if # there are local changes. local_changes = bool( not bare and __salt__['git.diff'](target, 'HEAD', user=user, password=password, output_encoding=output_encoding) ) except CommandExecutionError: # No need to capture the error and log it, the _git_run() # helper in the git execution module will have already logged # the output from the command. log.warning( 'git.latest: Unable to determine if %s has local changes', target ) local_changes = False if local_changes and revs_match: if force_reset is True: msg = ( '{0} is up-to-date, but with uncommitted changes. ' 'Since \'force_reset\' is set to True, these local ' 'changes would be reset. To only reset when there are ' 'changes in the remote repository, set ' '\'force_reset\' to \'remote-changes\'.'.format(target) ) if __opts__['test']: ret['changes']['forced update'] = True if comments: msg += _format_comments(comments) return _neutral_test(ret, msg) log.debug(msg.replace('would', 'will')) else: log.debug( '%s up-to-date, but with uncommitted changes. Since ' '\'force_reset\' is set to %s, no changes will be ' 'made.', target, force_reset ) return _uptodate(ret, target, _format_comments(comments), local_changes) if remote_rev_type == 'sha1' \ and base_rev is not None \ and base_rev.startswith(remote_rev): # Either we're already checked out to the branch we need and it # is up-to-date, or the branch to which we need to switch is # on the same SHA1 as the desired remote revision. Either way, # we know we have the remote rev present already and no fetch # will be needed. has_remote_rev = True else: has_remote_rev = False if remote_rev is not None: try: __salt__['git.rev_parse']( target, remote_rev + '^{commit}', user=user, password=password, ignore_retcode=True, output_encoding=output_encoding) except CommandExecutionError: # Local checkout doesn't have the remote_rev pass else: # The object might exist enough to get a rev-parse to # work, while the local ref could have been # deleted/changed/force updated. Do some further sanity # checks to determine if we really do have the # remote_rev. if remote_rev_type == 'branch': if remote in remotes: try: # Do a rev-parse on <remote>/<rev> to get # the local SHA1 for it, so we can compare # it to the remote_rev SHA1. local_copy = __salt__['git.rev_parse']( target, desired_upstream, user=user, password=password, ignore_retcode=True, output_encoding=output_encoding) except CommandExecutionError: pass else: # If the SHA1s don't match, then the remote # branch was force-updated, and we need to # fetch to update our local copy the ref # for the remote branch. If they do match, # then we have the remote_rev and don't # need to fetch. if local_copy == remote_rev: has_remote_rev = True elif remote_rev_type == 'tag': if rev in all_local_tags: try: local_tag_sha1 = __salt__['git.rev_parse']( target, rev + '^{commit}', user=user, password=password, ignore_retcode=True, output_encoding=output_encoding) except CommandExecutionError: # Shouldn't happen if the tag exists # locally but account for this just in # case. local_tag_sha1 = None if local_tag_sha1 == remote_rev: has_remote_rev = True else: if not force_reset: # SHA1 of tag on remote repo is # different than local tag. Unless # we're doing a hard reset then we # don't need to proceed as we know that # the fetch will update the tag and the # only way to make the state succeed is # to reset the branch to point at the # tag's new location. return _fail( ret, '\'{0}\' is a tag, but the remote ' 'SHA1 for this tag ({1}) doesn\'t ' 'match the local SHA1 ({2}). Set ' '\'force_reset\' to True to force ' 'this update.'.format( rev, _short_sha(remote_rev), _short_sha(local_tag_sha1) ) ) elif remote_rev_type == 'sha1': has_remote_rev = True # If fast_forward is not boolean, then we don't yet know if this # will be a fast forward or not, because a fetch is required. fast_forward = False \ if (local_changes and force_reset != 'remote-changes') \ else None if has_remote_rev: if (not revs_match and not update_head) \ and (branch is None or branch == local_branch): ret['comment'] = ( '{0} is already present and local HEAD ({1}) does not ' 'match, but update_head=False. HEAD has not been ' 'updated locally.'.format( remote_loc.capitalize() if rev == 'HEAD' else remote_loc, local_rev[:7] ) ) return ret # No need to check if this is a fast_forward if we already know # that it won't be (due to local changes). if fast_forward is not False: if base_rev is None: # If we're here, the remote_rev exists in the local # checkout but there is still no HEAD locally. A # possible reason for this is that an empty repository # existed there and a remote was added and fetched, but # the repository was not fast-forwarded. Regardless, # going from no HEAD to a locally-present rev is # considered a fast-forward update. fast_forward = True else: fast_forward = __salt__['git.merge_base']( target, refs=[base_rev, remote_rev], is_ancestor=True, user=user, password=password, ignore_retcode=True, output_encoding=output_encoding) if fast_forward is False: if force_reset is False: return _not_fast_forward( ret, rev, base_rev, remote_rev, branch, local_branch, default_branch, local_changes, comments) merge_action = 'hard-reset' elif fast_forward is True: merge_action = 'fast-forwarded' else: merge_action = 'updated' if base_branch is None: # No local branch, no upstream tracking branch upstream = None else: try: upstream = __salt__['git.rev_parse']( target, base_branch + '@{upstream}', opts=['--abbrev-ref'], user=user, password=password, ignore_retcode=True, output_encoding=output_encoding) except CommandExecutionError: # There is a local branch but the rev-parse command # failed, so that means there is no upstream tracking # branch. This could be because it is just not set, or # because the branch was checked out to a SHA1 or tag # instead of a branch. Set upstream to False to make a # distinction between the case above where there is no # local_branch (when the local checkout is an empty # repository). upstream = False if remote in remotes: fetch_url = remotes[remote]['fetch'] else: log.debug( 'Remote \'%s\' not found in git checkout at %s', remote, target ) fetch_url = None if remote_rev is not None and desired_fetch_url != fetch_url: if __opts__['test']: actions = [ 'Remote \'{0}\' would be changed from {1} to {2}' .format( remote, salt.utils.url.redact_http_basic_auth(fetch_url), redacted_fetch_url ) ] if not has_remote_rev: actions.append('Remote would be fetched') if not revs_match: if update_head: ret['changes']['revision'] = { 'old': local_rev, 'new': remote_rev } if fast_forward is False: ret['changes']['forced update'] = True actions.append( 'Repository would be {0} to {1}'.format( merge_action, _short_sha(remote_rev) ) ) if ret['changes']: return _neutral_test(ret, _format_comments(actions)) else: if not revs_match and not update_head: # Repo content would not be modified but the remote # URL would be modified, so we can't just say that # the repo is up-to-date, we need to inform the # user of the actions taken. ret['comment'] = _format_comments(actions) return ret return _uptodate(ret, target, _format_comments(actions)) # The fetch_url for the desired remote does not match the # specified URL (or the remote does not exist), so set the # remote URL. __salt__['git.remote_set'](target, url=name, remote=remote, user=user, password=password, https_user=https_user, https_pass=https_pass, output_encoding=output_encoding) if fetch_url is None: comments.append( 'Remote \'{0}\' set to {1}'.format( remote, redacted_fetch_url ) ) ret['changes']['new'] = name + ' => ' + remote else: comments.append( 'Remote \'{0}\' changed from {1} to {2}'.format( remote, salt.utils.url.redact_http_basic_auth(fetch_url), redacted_fetch_url ) ) if remote_rev is not None: if __opts__['test']: actions = [] if not has_remote_rev: actions.append( 'Remote \'{0}\' would be fetched'.format(remote) ) if (not revs_match) \ and (update_head or (branch is not None and branch != local_branch)): ret['changes']['revision'] = { 'old': local_rev, 'new': remote_rev } if _need_branch_change(branch, local_branch): if branch not in all_local_branches: actions.append( 'New branch \'{0}\' would be checked ' 'out, with {1} as a starting ' 'point'.format(branch, remote_loc) ) if desired_upstream: actions.append( 'Tracking branch would be set to {0}' .format(desired_upstream) ) else: actions.append( 'Branch \'{0}\' would be checked out ' 'and {1} to {2}'.format( branch, merge_action, _short_sha(remote_rev) ) ) else: if not revs_match: if update_head: if fast_forward is True: actions.append( 'Repository would be fast-forwarded from ' '{0} to {1}'.format( _short_sha(local_rev), _short_sha(remote_rev) ) ) else: actions.append( 'Repository would be {0} from {1} to {2}' .format( 'hard-reset' if force_reset and has_remote_rev else 'updated', _short_sha(local_rev), _short_sha(remote_rev) ) ) else: actions.append( 'Local HEAD ({0}) does not match {1} but ' 'update_head=False, HEAD would not be ' 'updated locally'.format( local_rev[:7], remote_loc ) ) # Check if upstream needs changing if not upstream and desired_upstream: actions.append( 'Tracking branch would be set to {0}'.format( desired_upstream ) ) elif upstream and desired_upstream is False: actions.append( 'Tracking branch would be unset' ) elif desired_upstream and upstream != desired_upstream: actions.append( 'Tracking branch would be ' 'updated to {0}'.format(desired_upstream) ) if ret['changes']: return _neutral_test(ret, _format_comments(actions)) else: formatted_actions = _format_comments(actions) if not revs_match \ and not update_head \ and formatted_actions: ret['comment'] = formatted_actions return ret return _uptodate(ret, target, _format_comments(actions)) if not upstream and desired_upstream: upstream_action = ( 'Tracking branch was set to {0}'.format( desired_upstream ) ) branch_opts = _get_branch_opts( branch, local_branch, all_local_branches, desired_upstream, git_ver) elif upstream and desired_upstream is False: # If the remote_rev is a tag or SHA1, and there is an # upstream tracking branch, we will unset it. However, we # can only do this if the git version is 1.8.0 or newer, as # the --unset-upstream option was not added until that # version. if git_ver >= _LooseVersion('1.8.0'): upstream_action = 'Tracking branch was unset' branch_opts = ['--unset-upstream'] else: branch_opts = None elif desired_upstream and upstream != desired_upstream: upstream_action = ( 'Tracking branch was updated to {0}'.format( desired_upstream ) ) branch_opts = _get_branch_opts( branch, local_branch, all_local_branches, desired_upstream, git_ver) else: branch_opts = None if branch_opts is not None and local_branch is None: return _fail( ret, 'Cannot set/unset upstream tracking branch, local ' 'HEAD refers to nonexistent branch. This may have ' 'been caused by cloning a remote repository for which ' 'the default branch was renamed or deleted. If you ' 'are unable to fix the remote repository, you can ' 'work around this by setting the \'branch\' argument ' '(which will ensure that the named branch is created ' 'if it does not already exist).', comments ) remote_tags = set([ x.replace('refs/tags/', '') for x in __salt__['git.ls_remote']( cwd=target, remote=remote, opts="--tags", user=user, password=password, identity=identity, saltenv=__env__, ignore_retcode=True, output_encoding=output_encoding) if '^{}' not in x ]) if all_local_tags != remote_tags: has_remote_rev = False new_tags = remote_tags - all_local_tags deleted_tags = all_local_tags - remote_tags if new_tags: ret['changes']['new_tags'] = new_tags if sync_tags and deleted_tags: # Delete the local copy of the tags to keep up with the # remote repository. for tag_name in deleted_tags: try: if not __opts__['test']: __salt__['git.tag']( target, tag_name, opts='-d', user=user, password=password, output_encoding=output_encoding) except CommandExecutionError as exc: ret.setdefault('warnings', []).append( 'Failed to remove local tag \'{0}\':\n\n' '{1}\n\n'.format(tag_name, exc) ) else: ret['changes'].setdefault( 'deleted_tags', []).append(tag_name) if ret['changes'].get('deleted_tags'): comments.append( 'The following tags {0} removed from the local ' 'checkout: {1}'.format( 'would be' if __opts__['test'] else 'were', ', '.join(ret['changes']['deleted_tags']) ) ) if not has_remote_rev: try: fetch_changes = __salt__['git.fetch']( target, remote=remote, force=force_fetch, refspecs=refspecs, user=user, password=password, identity=identity, saltenv=__env__, output_encoding=output_encoding) except CommandExecutionError as exc: return _failed_fetch(ret, exc, comments) else: if fetch_changes: comments.append( '{0} was fetched, resulting in updated ' 'refs'.format(name) ) try: __salt__['git.rev_parse']( target, remote_rev + '^{commit}', user=user, password=password, ignore_retcode=True, output_encoding=output_encoding) except CommandExecutionError as exc: return _fail( ret, 'Fetch did not successfully retrieve rev \'{0}\' ' 'from {1}: {2}'.format(rev, name, exc) ) if (not revs_match and not update_head) \ and (branch is None or branch == local_branch): # Rev now exists locally (was fetched), and since we're # not updating HEAD we'll just exit here. ret['comment'] = remote_loc.capitalize() \ if rev == 'HEAD' \ else remote_loc ret['comment'] += ( ' is already present and local HEAD ({0}) does not ' 'match, but update_head=False. HEAD has not been ' 'updated locally.'.format(local_rev[:7]) ) return ret # Now that we've fetched, check again whether or not # the update is a fast-forward. if base_rev is None: fast_forward = True else: fast_forward = __salt__['git.merge_base']( target, refs=[base_rev, remote_rev], is_ancestor=True, user=user, password=password, output_encoding=output_encoding) if fast_forward is force_reset is False \ or (fast_forward is True and local_changes and force_reset is False): return _not_fast_forward( ret, rev, base_rev, remote_rev, branch, local_branch, default_branch, local_changes, comments) if _need_branch_change(branch, local_branch): if local_changes and not force_checkout: return _fail( ret, 'Local branch \'{0}\' has uncommitted ' 'changes. Set \'force_checkout\' to True to ' 'discard them and proceed.'.format(local_branch) ) # TODO: Maybe re-retrieve all_local_branches to handle # the corner case where the destination branch was # added to the local checkout during a fetch that takes # a long time to complete. if branch not in all_local_branches: if rev == 'HEAD': checkout_rev = remote_rev else: checkout_rev = desired_upstream \ if desired_upstream \ else rev checkout_opts = ['-b', branch] else: checkout_rev = branch checkout_opts = [] __salt__['git.checkout'](target, checkout_rev, force=force_checkout, opts=checkout_opts, user=user, password=password, output_encoding=output_encoding) if '-b' in checkout_opts: comments.append( 'New branch \'{0}\' was checked out, with {1} ' 'as a starting point'.format( branch, remote_loc ) ) else: comments.append( '\'{0}\' was checked out'.format(checkout_rev) ) if fast_forward is False: __salt__['git.reset']( target, opts=['--hard', remote_rev], user=user, password=password, output_encoding=output_encoding, **lfs_opts) ret['changes']['forced update'] = True if local_changes: comments.append('Uncommitted changes were discarded') comments.append( 'Repository was hard-reset to {0}'.format(remote_loc) ) elif fast_forward is True \ and local_changes \ and force_reset is not False: __salt__['git.discard_local_changes']( target, user=user, password=password, output_encoding=output_encoding) comments.append('Uncommitted changes were discarded') if branch_opts is not None: __salt__['git.branch']( target, opts=branch_opts, user=user, password=password, output_encoding=output_encoding) comments.append(upstream_action) # Fast-forward to the desired revision if fast_forward is True \ and not _revs_equal(base_rev, remote_rev, remote_rev_type): if desired_upstream or rev == 'HEAD': # Check first to see if we are on a branch before # trying to merge changes. (The call to # git.symbolic_ref will only return output if HEAD # points to a branch.) if __salt__['git.symbolic_ref']( target, 'HEAD', opts=['--quiet'], user=user, password=password, ignore_retcode=True, output_encoding=output_encoding): if git_ver >= _LooseVersion('1.8.1.6'): # --ff-only added in version 1.8.1.6. It's not # 100% necessary, but if we can use it, we'll # ensure that the merge doesn't go through if # not a fast-forward. Granted, the logic that # gets us to this point shouldn't allow us to # attempt this merge if it's not a # fast-forward, but it's an extra layer of # protection. merge_opts = ['--ff-only'] else: merge_opts = [] __salt__['git.merge']( target, rev=remote_rev, opts=merge_opts, user=user, password=password, output_encoding=output_encoding, **lfs_opts) comments.append( 'Repository was fast-forwarded to {0}' .format(remote_loc) ) else: return _fail( ret, 'Unable to fast-forward, HEAD is detached', comments ) else: # Update is a fast forward, but we cannot merge to that # commit so we'll reset to it. __salt__['git.reset']( target, opts=['--hard', remote_rev if rev == 'HEAD' else rev], user=user, password=password, output_encoding=output_encoding, **lfs_opts) comments.append( 'Repository was reset to {0} (fast-forward)' .format(rev) ) # TODO: Figure out how to add submodule update info to # test=True return data, and changes dict. if submodules: try: __salt__['git.submodule']( target, 'update', opts=['--init', '--recursive'], user=user, password=password, identity=identity, saltenv=__env__, output_encoding=output_encoding) except CommandExecutionError as exc: return _failed_submodule_update(ret, exc, comments) elif bare: if __opts__['test']: msg = ( 'Bare repository at {0} would be fetched' .format(target) ) if ret['changes']: return _neutral_test(ret, msg) else: return _uptodate(ret, target, msg) try: fetch_changes = __salt__['git.fetch']( target, remote=remote, force=force_fetch, refspecs=refspecs, user=user, password=password, identity=identity, saltenv=__env__, output_encoding=output_encoding) except CommandExecutionError as exc: return _failed_fetch(ret, exc, comments) else: comments.append( 'Bare repository at {0} was fetched{1}'.format( target, ', resulting in updated refs' if fetch_changes else '' ) ) try: new_rev = __salt__['git.revision']( cwd=target, user=user, password=password, ignore_retcode=True, output_encoding=output_encoding) except CommandExecutionError: new_rev = None except Exception as exc: log.error( 'Unexpected exception in git.latest state', exc_info=True ) if isinstance(exc, CommandExecutionError): msg = _strip_exc(exc) else: msg = six.text_type(exc) return _fail(ret, msg, comments) if not bare and not _revs_equal(new_rev, remote_rev, remote_rev_type): return _fail(ret, 'Failed to update repository', comments) if local_rev != new_rev: log.info( 'Repository %s updated: %s => %s', target, local_rev, new_rev ) ret['comment'] = _format_comments(comments) ret['changes']['revision'] = {'old': local_rev, 'new': new_rev} else: return _uptodate(ret, target, _format_comments(comments)) else: if os.path.isdir(target): target_contents = os.listdir(target) if force_clone: # Clone is required, and target directory exists, but the # ``force`` option is enabled, so we need to clear out its # contents to proceed. if __opts__['test']: ret['changes']['forced clone'] = True ret['changes']['new'] = name + ' => ' + target return _neutral_test( ret, 'Target directory {0} exists. Since force_clone=True, ' 'the contents of {0} would be deleted, and {1} would ' 'be cloned into this directory.'.format(target, name) ) log.debug( 'Removing contents of %s to clone repository %s in its ' 'place (force_clone=True set in git.latest state)', target, name ) removal_errors = {} for target_object in target_contents: target_path = os.path.join(target, target_object) try: salt.utils.files.rm_rf(target_path) except OSError as exc: if exc.errno != errno.ENOENT: removal_errors[target_path] = exc if removal_errors: err_strings = [ ' {0}\n {1}'.format(k, v) for k, v in six.iteritems(removal_errors) ] return _fail( ret, 'Unable to remove\n{0}'.format('\n'.join(err_strings)), comments ) ret['changes']['forced clone'] = True # Clone is required, but target dir exists and is non-empty. We # can't proceed. elif target_contents: return _fail( ret, 'Target \'{0}\' exists, is non-empty and is not a git ' 'repository. Set the \'force_clone\' option to True to ' 'remove this directory\'s contents and proceed with ' 'cloning the remote repository'.format(target) ) log.debug('Target %s is not found, \'git clone\' is required', target) if __opts__['test']: ret['changes']['new'] = name + ' => ' + target return _neutral_test( ret, 'Repository {0} would be cloned to {1}'.format( name, target ) ) try: clone_opts = ['--mirror'] if mirror else ['--bare'] if bare else [] if remote != 'origin': clone_opts.extend(['--origin', remote]) if depth is not None: clone_opts.extend(['--depth', six.text_type(depth), '--branch', rev]) # We're cloning a fresh repo, there is no local branch or revision local_branch = local_rev = None try: __salt__['git.clone'](target, name, user=user, password=password, opts=clone_opts, identity=identity, https_user=https_user, https_pass=https_pass, saltenv=__env__, output_encoding=output_encoding) except CommandExecutionError as exc: msg = 'Clone failed: {0}'.format(_strip_exc(exc)) return _fail(ret, msg, comments) ret['changes']['new'] = name + ' => ' + target comments.append( '{0} cloned to {1}{2}'.format( name, target, ' as mirror' if mirror else ' as bare repository' if bare else '' ) ) if not bare: if not remote_rev: if rev != 'HEAD': # No HEAD means the remote repo is empty, which means # our new clone will also be empty. This state has # failed, since a rev was specified but no matching rev # exists on the remote host. msg = ( '%s was cloned but is empty, so {0}/{1} ' 'cannot be checked out'.format(remote, rev) ) log.error(msg, name) # Disable check for string substitution return _fail(ret, msg % 'Repository', comments) # pylint: disable=E1321 else: if remote_rev_type == 'tag' \ and rev not in __salt__['git.list_tags']( target, user=user, password=password, output_encoding=output_encoding): return _fail( ret, 'Revision \'{0}\' does not exist in clone' .format(rev), comments ) if branch is not None: if branch not in \ __salt__['git.list_branches']( target, user=user, password=password, output_encoding=output_encoding): if rev == 'HEAD': checkout_rev = remote_rev else: checkout_rev = desired_upstream \ if desired_upstream \ else rev __salt__['git.checkout']( target, checkout_rev, opts=['-b', branch], user=user, password=password, output_encoding=output_encoding) comments.append( 'Branch \'{0}\' checked out, with {1} ' 'as a starting point'.format( branch, remote_loc ) ) local_rev, local_branch = _get_local_rev_and_branch( target, user, password, output_encoding=output_encoding) if local_branch is None \ and remote_rev is not None \ and 'HEAD' not in all_remote_refs: return _fail( ret, 'Remote HEAD refers to a ref that does not exist. ' 'This can happen when the default branch on the ' 'remote repository is renamed or deleted. If you ' 'are unable to fix the remote repository, you can ' 'work around this by setting the \'branch\' argument ' '(which will ensure that the named branch is created ' 'if it does not already exist).', comments ) if not _revs_equal(local_rev, remote_rev, remote_rev_type): __salt__['git.reset']( target, opts=['--hard', remote_rev], user=user, password=password, output_encoding=output_encoding) comments.append( 'Repository was reset to {0}'.format(remote_loc) ) try: upstream = __salt__['git.rev_parse']( target, local_branch + '@{upstream}', opts=['--abbrev-ref'], user=user, password=password, ignore_retcode=True, output_encoding=output_encoding) except CommandExecutionError: upstream = False if not upstream and desired_upstream: upstream_action = ( 'Tracking branch was set to {0}'.format( desired_upstream ) ) branch_opts = _get_branch_opts( branch, local_branch, __salt__['git.list_branches']( target, user=user, password=password, output_encoding=output_encoding), desired_upstream, git_ver) elif upstream and desired_upstream is False: # If the remote_rev is a tag or SHA1, and there is an # upstream tracking branch, we will unset it. However, # we can only do this if the git version is 1.8.0 or # newer, as the --unset-upstream option was not added # until that version. if git_ver >= _LooseVersion('1.8.0'): upstream_action = 'Tracking branch was unset' branch_opts = ['--unset-upstream'] else: branch_opts = None elif desired_upstream and upstream != desired_upstream: upstream_action = ( 'Tracking branch was updated to {0}'.format( desired_upstream ) ) branch_opts = _get_branch_opts( branch, local_branch, __salt__['git.list_branches']( target, user=user, password=password, output_encoding=output_encoding), desired_upstream, git_ver) else: branch_opts = None if branch_opts is not None: __salt__['git.branch']( target, opts=branch_opts, user=user, password=password, output_encoding=output_encoding) comments.append(upstream_action) if submodules and remote_rev: try: __salt__['git.submodule']( target, 'update', opts=['--init', '--recursive'], user=user, password=password, identity=identity, output_encoding=output_encoding) except CommandExecutionError as exc: return _failed_submodule_update(ret, exc, comments) try: new_rev = __salt__['git.revision']( cwd=target, user=user, password=password, ignore_retcode=True, output_encoding=output_encoding) except CommandExecutionError: new_rev = None except Exception as exc: log.error( 'Unexpected exception in git.latest state', exc_info=True ) if isinstance(exc, CommandExecutionError): msg = _strip_exc(exc) else: msg = six.text_type(exc) return _fail(ret, msg, comments) msg = _format_comments(comments) log.info(msg) ret['comment'] = msg if new_rev is not None: ret['changes']['revision'] = {'old': None, 'new': new_rev} return ret def present(name, force=False, bare=True, template=None, separate_git_dir=None, shared=None, user=None, password=None, output_encoding=None): ''' Ensure that a repository exists in the given directory .. warning:: If the minion has Git 2.5 or later installed, ``name`` points to a worktree_, and ``force`` is set to ``True``, then the worktree will be deleted. This has been corrected in Salt 2015.8.0. name Path to the directory .. versionchanged:: 2015.8.0 This path must now be absolute force : False If ``True``, and if ``name`` points to an existing directory which does not contain a git repository, then the contents of that directory will be recursively removed and a new repository will be initialized in its place. bare : True If ``True``, and a repository must be initialized, then the repository will be a bare repository. .. note:: This differs from the default behavior of :py:func:`git.init <salt.modules.git.init>`, make sure to set this value to ``False`` if a bare repo is not desired. template If a new repository is initialized, this argument will specify an alternate template directory. .. versionadded:: 2015.8.0 separate_git_dir If a new repository is initialized, this argument will specify an alternate ``$GIT_DIR`` .. versionadded:: 2015.8.0 shared Set sharing permissions on git repo. See `git-init(1)`_ for more details. .. versionadded:: 2015.5.0 user User under which to run git commands. By default, commands are run by the user under which the minion is running. .. versionadded:: 0.17.0 password Windows only. Required when specifying ``user``. This parameter will be ignored on non-Windows platforms. .. versionadded:: 2016.3.4 output_encoding Use this option to specify which encoding to use to decode the output from any git commands which are run. This should not be needed in most cases. .. note:: This should only be needed if the files in the repository were created with filenames using an encoding other than UTF-8 to handle Unicode characters. .. versionadded:: 2018.3.1 .. _`git-init(1)`: http://git-scm.com/docs/git-init .. _`worktree`: http://git-scm.com/docs/git-worktree ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} # If the named directory is a git repo return True if os.path.isdir(name): if bare and os.path.isfile(os.path.join(name, 'HEAD')): return ret elif not bare and \ (os.path.isdir(os.path.join(name, '.git')) or __salt__['git.is_worktree'](name, user=user, password=password, output_encoding=output_encoding)): return ret # Directory exists and is not a git repo, if force is set destroy the # directory and recreate, otherwise throw an error elif force: # Directory exists, and the ``force`` option is enabled, so we need # to clear out its contents to proceed. if __opts__['test']: ret['changes']['new'] = name ret['changes']['forced init'] = True return _neutral_test( ret, 'Target directory {0} exists. Since force=True, the ' 'contents of {0} would be deleted, and a {1}repository ' 'would be initialized in its place.' .format(name, 'bare ' if bare else '') ) log.debug( 'Removing contents of %s to initialize %srepository in its ' 'place (force=True set in git.present state)', name, 'bare ' if bare else '' ) try: if os.path.islink(name): os.unlink(name) else: salt.utils.files.rm_rf(name) except OSError as exc: return _fail( ret, 'Unable to remove {0}: {1}'.format(name, exc) ) else: ret['changes']['forced init'] = True elif os.listdir(name): return _fail( ret, 'Target \'{0}\' exists, is non-empty, and is not a git ' 'repository. Set the \'force\' option to True to remove ' 'this directory\'s contents and proceed with initializing a ' 'repository'.format(name) ) # Run test is set if __opts__['test']: ret['changes']['new'] = name return _neutral_test( ret, 'New {0}repository would be created'.format( 'bare ' if bare else '' ) ) __salt__['git.init'](cwd=name, bare=bare, template=template, separate_git_dir=separate_git_dir, shared=shared, user=user, password=password, output_encoding=output_encoding) actions = [ 'Initialized {0}repository in {1}'.format( 'bare ' if bare else '', name ) ] if template: actions.append('Template directory set to {0}'.format(template)) if separate_git_dir: actions.append('Gitdir set to {0}'.format(separate_git_dir)) message = '. '.join(actions) if len(actions) > 1: message += '.' log.info(message) ret['changes']['new'] = name ret['comment'] = message return ret def detached(name, rev, target=None, remote='origin', user=None, password=None, force_clone=False, force_checkout=False, fetch_remote=True, hard_reset=False, submodules=False, identity=None, https_user=None, https_pass=None, onlyif=None, unless=None, output_encoding=None, **kwargs): ''' .. versionadded:: 2016.3.0 Make sure a repository is cloned to the given target directory and is a detached HEAD checkout of the commit ID resolved from ``rev``. name Address of the remote repository. rev The branch, tag, or commit ID to checkout after clone. If a branch or tag is specified it will be resolved to a commit ID and checked out. target Name of the target directory where repository is about to be cloned. remote : origin Git remote to use. If this state needs to clone the repo, it will clone it using this value as the initial remote name. If the repository already exists, and a remote by this name is not present, one will be added. user User under which to run git commands. By default, commands are run by the user under which the minion is running. password Windows only. Required when specifying ``user``. This parameter will be ignored on non-Windows platforms. .. versionadded:: 2016.3.4 force_clone : False If the ``target`` directory exists and is not a git repository, then this state will fail. Set this argument to ``True`` to remove the contents of the target directory and clone the repo into it. force_checkout : False When checking out the revision ID, the state will fail if there are unwritten changes. Set this argument to ``True`` to discard unwritten changes when checking out. fetch_remote : True If ``False`` a fetch will not be performed and only local refs will be reachable. hard_reset : False If ``True`` a hard reset will be performed before the checkout and any uncommitted modifications to the working directory will be discarded. Untracked files will remain in place. .. note:: Changes resulting from a hard reset will not trigger requisites. submodules : False Update submodules identity A path on the minion (or a SaltStack fileserver URL, e.g. ``salt://path/to/identity_file``) to a private key to use for SSH authentication. https_user HTTP Basic Auth username for HTTPS (only) clones https_pass HTTP Basic Auth password for HTTPS (only) clones onlyif A command to run as a check, run the named command only if the command passed to the ``onlyif`` option returns true unless A command to run as a check, only run the named command if the command passed to the ``unless`` option returns false output_encoding Use this option to specify which encoding to use to decode the output from any git commands which are run. This should not be needed in most cases. .. note:: This should only be needed if the files in the repository were created with filenames using an encoding other than UTF-8 to handle Unicode characters. .. versionadded:: 2018.3.1 ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} kwargs = salt.utils.args.clean_kwargs(**kwargs) if kwargs: return _fail( ret, salt.utils.args.invalid_kwargs(kwargs, raise_exc=False) ) if not rev: return _fail( ret, '\'{0}\' is not a valid value for the \'rev\' argument'.format(rev) ) if not target: return _fail( ret, '\'{0}\' is not a valid value for the \'target\' argument'.format(rev) ) # Ensure that certain arguments are strings to ensure that comparisons work if not isinstance(rev, six.string_types): rev = six.text_type(rev) if target is not None: if not isinstance(target, six.string_types): target = six.text_type(target) if not os.path.isabs(target): return _fail( ret, 'Target \'{0}\' is not an absolute path'.format(target) ) if user is not None and not isinstance(user, six.string_types): user = six.text_type(user) if remote is not None and not isinstance(remote, six.string_types): remote = six.text_type(remote) if identity is not None: if isinstance(identity, six.string_types): identity = [identity] elif not isinstance(identity, list): return _fail(ret, 'Identity must be either a list or a string') identity = [os.path.expanduser(x) for x in identity] for ident_path in identity: if 'salt://' in ident_path: try: ident_path = __salt__['cp.cache_file'](ident_path) except IOError as exc: log.error('Failed to cache %s: %s', ident_path, exc) return _fail( ret, 'Identity \'{0}\' does not exist.'.format( ident_path ) ) if not os.path.isabs(ident_path): return _fail( ret, 'Identity \'{0}\' is not an absolute path'.format( ident_path ) ) if https_user is not None and not isinstance(https_user, six.string_types): https_user = six.text_type(https_user) if https_pass is not None and not isinstance(https_pass, six.string_types): https_pass = six.text_type(https_pass) if os.path.isfile(target): return _fail( ret, 'Target \'{0}\' exists and is a regular file, cannot proceed' .format(target) ) try: desired_fetch_url = salt.utils.url.add_http_basic_auth( name, https_user, https_pass, https_only=True ) except ValueError as exc: return _fail(ret, exc.__str__()) redacted_fetch_url = salt.utils.url.redact_http_basic_auth(desired_fetch_url) # Check if onlyif or unless conditions match run_check_cmd_kwargs = {'runas': user} if 'shell' in __grains__: run_check_cmd_kwargs['shell'] = __grains__['shell'] cret = mod_run_check( run_check_cmd_kwargs, onlyif, unless ) if isinstance(cret, dict): ret.update(cret) return ret # Determine if supplied ref is a hash remote_rev_type = 'ref' if len(rev) <= 40 \ and all(x in string.hexdigits for x in rev): rev = rev.lower() remote_rev_type = 'hash' comments = [] hash_exists_locally = False local_commit_id = None gitdir = os.path.join(target, '.git') if os.path.isdir(gitdir) \ or __salt__['git.is_worktree'](target, user=user, password=password, output_encoding=output_encoding): # Target directory is a git repository or git worktree local_commit_id = _get_local_rev_and_branch( target, user, password, output_encoding=output_encoding)[0] if remote_rev_type is 'hash': try: __salt__['git.describe'](target, rev, user=user, password=password, ignore_retcode=True, output_encoding=output_encoding) except CommandExecutionError: hash_exists_locally = False else: # The rev is a hash and it exists locally so skip to checkout hash_exists_locally = True else: # Check that remote is present and set to correct url remotes = __salt__['git.remotes'](target, user=user, password=password, redact_auth=False, output_encoding=output_encoding) if remote in remotes and name in remotes[remote]['fetch']: pass else: # The fetch_url for the desired remote does not match the # specified URL (or the remote does not exist), so set the # remote URL. current_fetch_url = None if remote in remotes: current_fetch_url = remotes[remote]['fetch'] if __opts__['test']: return _neutral_test( ret, 'Remote {0} would be set to {1}'.format( remote, name ) ) __salt__['git.remote_set'](target, url=name, remote=remote, user=user, password=password, https_user=https_user, https_pass=https_pass, output_encoding=output_encoding) comments.append( 'Remote {0} updated from \'{1}\' to \'{2}\''.format( remote, current_fetch_url, name ) ) else: # Clone repository if os.path.isdir(target): target_contents = os.listdir(target) if force_clone: # Clone is required, and target directory exists, but the # ``force`` option is enabled, so we need to clear out its # contents to proceed. if __opts__['test']: return _neutral_test( ret, 'Target directory {0} exists. Since force_clone=True, ' 'the contents of {0} would be deleted, and {1} would ' 'be cloned into this directory.'.format(target, name) ) log.debug( 'Removing contents of %s to clone repository %s in its ' 'place (force_clone=True set in git.detached state)', target, name ) removal_errors = {} for target_object in target_contents: target_path = os.path.join(target, target_object) try: salt.utils.files.rm_rf(target_path) except OSError as exc: if exc.errno != errno.ENOENT: removal_errors[target_path] = exc if removal_errors: err_strings = [ ' {0}\n {1}'.format(k, v) for k, v in six.iteritems(removal_errors) ] return _fail( ret, 'Unable to remove\n{0}'.format('\n'.join(err_strings)), comments ) ret['changes']['forced clone'] = True elif target_contents: # Clone is required, but target dir exists and is non-empty. We # can't proceed. return _fail( ret, 'Target \'{0}\' exists, is non-empty and is not a git ' 'repository. Set the \'force_clone\' option to True to ' 'remove this directory\'s contents and proceed with ' 'cloning the remote repository'.format(target) ) log.debug('Target %s is not found, \'git clone\' is required', target) if __opts__['test']: return _neutral_test( ret, 'Repository {0} would be cloned to {1}'.format( name, target ) ) try: clone_opts = ['--no-checkout'] if remote != 'origin': clone_opts.extend(['--origin', remote]) __salt__['git.clone'](target, name, user=user, password=password, opts=clone_opts, identity=identity, https_user=https_user, https_pass=https_pass, saltenv=__env__, output_encoding=output_encoding) comments.append('{0} cloned to {1}'.format(name, target)) except Exception as exc: log.error( 'Unexpected exception in git.detached state', exc_info=True ) if isinstance(exc, CommandExecutionError): msg = _strip_exc(exc) else: msg = six.text_type(exc) return _fail(ret, msg, comments) # Repository exists and is ready for fetch/checkout refspecs = [ 'refs/heads/*:refs/remotes/{0}/*'.format(remote), '+refs/tags/*:refs/tags/*' ] if hash_exists_locally or fetch_remote is False: pass else: # Fetch refs from remote if __opts__['test']: return _neutral_test( ret, 'Repository remote {0} would be fetched'.format(remote) ) try: fetch_changes = __salt__['git.fetch']( target, remote=remote, force=True, refspecs=refspecs, user=user, password=password, identity=identity, saltenv=__env__, output_encoding=output_encoding) except CommandExecutionError as exc: msg = 'Fetch failed' msg += ':\n\n' + six.text_type(exc) return _fail(ret, msg, comments) else: if fetch_changes: comments.append( 'Remote {0} was fetched, resulting in updated ' 'refs'.format(remote) ) # get refs and checkout checkout_commit_id = '' if remote_rev_type is 'hash': if __salt__['git.describe']( target, rev, user=user, password=password, output_encoding=output_encoding): checkout_commit_id = rev else: return _fail( ret, 'Revision \'{0}\' does not exist'.format(rev) ) else: try: all_remote_refs = __salt__['git.remote_refs']( target, user=user, password=password, identity=identity, https_user=https_user, https_pass=https_pass, ignore_retcode=False, output_encoding=output_encoding) if 'refs/remotes/'+remote+'/'+rev in all_remote_refs: checkout_commit_id = all_remote_refs['refs/remotes/' + remote + '/' + rev] elif 'refs/tags/' + rev in all_remote_refs: checkout_commit_id = all_remote_refs['refs/tags/' + rev] else: return _fail( ret, 'Revision \'{0}\' does not exist'.format(rev) ) except CommandExecutionError as exc: return _fail( ret, 'Failed to list refs for {0}: {1}'.format(remote, _strip_exc(exc)) ) if hard_reset: if __opts__['test']: return _neutral_test( ret, 'Hard reset to HEAD would be performed on {0}'.format(target) ) __salt__['git.reset']( target, opts=['--hard', 'HEAD'], user=user, password=password, output_encoding=output_encoding) comments.append( 'Repository was reset to HEAD before checking out revision' ) # TODO: implement clean function for git module and add clean flag if checkout_commit_id == local_commit_id: new_rev = None else: if __opts__['test']: ret['changes']['HEAD'] = {'old': local_commit_id, 'new': checkout_commit_id} return _neutral_test( ret, 'Commit ID {0} would be checked out at {1}'.format( checkout_commit_id, target ) ) __salt__['git.checkout'](target, checkout_commit_id, force=force_checkout, user=user, password=password, output_encoding=output_encoding) comments.append( 'Commit ID {0} was checked out at {1}'.format( checkout_commit_id, target ) ) try: new_rev = __salt__['git.revision']( cwd=target, user=user, password=password, ignore_retcode=True, output_encoding=output_encoding) except CommandExecutionError: new_rev = None if submodules: __salt__['git.submodule'](target, 'update', opts=['--init', '--recursive'], user=user, password=password, identity=identity, output_encoding=output_encoding) comments.append( 'Submodules were updated' ) if new_rev is not None: ret['changes']['HEAD'] = {'old': local_commit_id, 'new': new_rev} else: comments.append("Already checked out at correct revision") msg = _format_comments(comments) log.info(msg) ret['comment'] = msg return ret def cloned(name, target=None, branch=None, user=None, password=None, identity=None, https_user=None, https_pass=None, output_encoding=None): ''' .. versionadded:: 2018.3.3,2019.2.0 Ensure that a repository has been cloned to the specified target directory. If not, clone that repository. No fetches will be performed once cloned. name Address of the remote repository target Name of the target directory where repository should be cloned branch Remote branch to check out. If unspecified, the default branch (i.e. the one to the remote HEAD points) will be checked out. .. note:: The local branch name will match the remote branch name. If the branch name is changed, then that branch will be checked out locally, but keep in mind that remote repository will not be fetched. If your use case requires that you keep the clone up to date with the remote repository, then consider using :py:func:`git.latest <salt.states.git.latest>`. user User under which to run git commands. By default, commands are run by the user under which the minion is running. password Windows only. Required when specifying ``user``. This parameter will be ignored on non-Windows platforms. identity Path to a private key to use for ssh URLs. Works the same way as in :py:func:`git.latest <salt.states.git.latest>`, see that state's documentation for more information. https_user HTTP Basic Auth username for HTTPS (only) clones https_pass HTTP Basic Auth password for HTTPS (only) clones output_encoding Use this option to specify which encoding to use to decode the output from any git commands which are run. This should not be needed in most cases. .. note:: This should only be needed if the files in the repository were created with filenames using an encoding other than UTF-8 to handle Unicode characters. ''' ret = {'name': name, 'result': False, 'comment': '', 'changes': {}} if target is None: ret['comment'] = '\'target\' argument is required' return ret elif not isinstance(target, six.string_types): target = six.text_type(target) if not os.path.isabs(target): ret['comment'] = '\'target\' path must be absolute' return ret if branch is not None: if not isinstance(branch, six.string_types): branch = six.text_type(branch) if not branch: ret['comment'] = 'Invalid \'branch\' argument' return ret if not os.path.exists(target): need_clone = True else: try: __salt__['git.status'](target, user=user, password=password, output_encoding=output_encoding) except Exception as exc: ret['comment'] = six.text_type(exc) return ret else: need_clone = False comments = [] def _clone_changes(ret): ret['changes']['new'] = name + ' => ' + target def _branch_changes(ret, old, new): ret['changes']['branch'] = {'old': old, 'new': new} if need_clone: if __opts__['test']: _clone_changes(ret) comment = '{0} would be cloned to {1}{2}'.format( name, target, ' with branch \'{0}\''.format(branch) if branch is not None else '' ) return _neutral_test(ret, comment) clone_opts = ['--branch', branch] if branch is not None else None try: __salt__['git.clone'](target, name, opts=clone_opts, user=user, password=password, identity=identity, https_user=https_user, https_pass=https_pass, output_encoding=output_encoding) except CommandExecutionError as exc: msg = 'Clone failed: {0}'.format(_strip_exc(exc)) return _fail(ret, msg, comments) comments.append( '{0} cloned to {1}{2}'.format( name, target, ' with branch \'{0}\''.format(branch) if branch is not None else '' ) ) _clone_changes(ret) ret['comment'] = _format_comments(comments) ret['result'] = True return ret else: if branch is None: return _already_cloned(ret, target, branch, comments) else: current_branch = __salt__['git.current_branch']( target, user=user, password=password, output_encoding=output_encoding) if current_branch == branch: return _already_cloned(ret, target, branch, comments) else: if __opts__['test']: _branch_changes(ret, current_branch, branch) return _neutral_test( ret, 'Branch would be changed to \'{0}\''.format(branch)) try: __salt__['git.rev_parse']( target, rev=branch, user=user, password=password, ignore_retcode=True, output_encoding=output_encoding) except CommandExecutionError: # Local head does not exist, so we need to check out a new # branch at the remote rev checkout_rev = '/'.join(('origin', branch)) checkout_opts = ['-b', branch] else: # Local head exists, so we just need to check it out checkout_rev = branch checkout_opts = None try: __salt__['git.checkout']( target, rev=checkout_rev, opts=checkout_opts, user=user, password=password, output_encoding=output_encoding) except CommandExecutionError as exc: msg = 'Failed to change branch to \'{0}\': {1}'.format(branch, exc) return _fail(ret, msg, comments) else: comments.append('Branch changed to \'{0}\''.format(branch)) _branch_changes(ret, current_branch, branch) ret['comment'] = _format_comments(comments) ret['result'] = True return ret def config_unset(name, value_regex=None, repo=None, user=None, password=None, output_encoding=None, **kwargs): r''' .. versionadded:: 2015.8.0 Ensure that the named config key is not present name The name of the configuration key to unset. This value can be a regex, but the regex must match the entire key name. For example, ``foo\.`` would not match all keys in the ``foo`` section, it would be necessary to use ``foo\..+`` to do so. value_regex Regex indicating the values to unset for the matching key(s) .. note:: This option behaves differently depending on whether or not ``all`` is set to ``True``. If it is, then all values matching the regex will be deleted (this is the only way to delete multiple values from a multivar). If ``all`` is set to ``False``, then this state will fail if the regex matches more than one value in a multivar. all : False If ``True``, unset all matches repo Location of the git repository for which the config value should be set. Required unless ``global`` is set to ``True``. user User under which to run git commands. By default, commands are run by the user under which the minion is running. password Windows only. Required when specifying ``user``. This parameter will be ignored on non-Windows platforms. .. versionadded:: 2016.3.4 global : False If ``True``, this will set a global git config option output_encoding Use this option to specify which encoding to use to decode the output from any git commands which are run. This should not be needed in most cases. .. note:: This should only be needed if the files in the repository were created with filenames using an encoding other than UTF-8 to handle Unicode characters. .. versionadded:: 2018.3.1 **Examples:** .. code-block:: yaml # Value matching 'baz' mylocalrepo: git.config_unset: - name: foo.bar - value_regex: 'baz' - repo: /path/to/repo # Ensure entire multivar is unset mylocalrepo: git.config_unset: - name: foo.bar - all: True # Ensure all variables in 'foo' section are unset, including multivars mylocalrepo: git.config_unset: - name: 'foo\..+' - all: True # Ensure that global config value is unset mylocalrepo: git.config_unset: - name: foo.bar - global: True ''' ret = {'name': name, 'changes': {}, 'result': True, 'comment': 'No matching keys are set'} # Sanitize kwargs and make sure that no invalid ones were passed. This # allows us to accept 'global' as an argument to this function without # shadowing global(), while also not allowing unwanted arguments to be # passed. kwargs = salt.utils.args.clean_kwargs(**kwargs) global_ = kwargs.pop('global', False) all_ = kwargs.pop('all', False) if kwargs: return _fail( ret, salt.utils.args.invalid_kwargs(kwargs, raise_exc=False) ) if not global_ and not repo: return _fail( ret, 'Non-global config options require the \'repo\' argument to be ' 'set' ) if not isinstance(name, six.string_types): name = six.text_type(name) if value_regex is not None: if not isinstance(value_regex, six.string_types): value_regex = six.text_type(value_regex) # Ensure that the key regex matches the full key name key = '^' + name.lstrip('^').rstrip('$') + '$' # Get matching keys/values pre_matches = __salt__['git.config_get_regexp']( cwd=repo, key=key, value_regex=value_regex, user=user, password=password, ignore_retcode=True, output_encoding=output_encoding, **{'global': global_} ) if not pre_matches: # No changes need to be made return ret # Perform sanity check on the matches. We can't proceed if the value_regex # matches more than one value in a given key, and 'all' is not set to True if not all_: greedy_matches = ['{0} ({1})'.format(x, ', '.join(y)) for x, y in six.iteritems(pre_matches) if len(y) > 1] if greedy_matches: if value_regex is not None: return _fail( ret, 'Multiple values are matched by value_regex for the ' 'following keys (set \'all\' to True to force removal): ' '{0}'.format('; '.join(greedy_matches)) ) else: return _fail( ret, 'Multivar(s) matched by the key expression (set \'all\' ' 'to True to force removal): {0}'.format( '; '.join(greedy_matches) ) ) if __opts__['test']: ret['changes'] = pre_matches return _neutral_test( ret, '{0} key(s) would have value(s) unset'.format(len(pre_matches)) ) if value_regex is None: pre = pre_matches else: # Get all keys matching the key expression, so we can accurately report # on changes made. pre = __salt__['git.config_get_regexp']( cwd=repo, key=key, value_regex=None, user=user, password=password, ignore_retcode=True, output_encoding=output_encoding, **{'global': global_} ) failed = [] # Unset the specified value(s). There is no unset for regexes so loop # through the pre_matches dict and unset each matching key individually. for key_name in pre_matches: try: __salt__['git.config_unset']( cwd=repo, key=name, value_regex=value_regex, all=all_, user=user, password=password, output_encoding=output_encoding, **{'global': global_} ) except CommandExecutionError as exc: msg = 'Failed to unset \'{0}\''.format(key_name) if value_regex is not None: msg += ' using value_regex \'{1}\'' msg += ': ' + _strip_exc(exc) log.error(msg) failed.append(key_name) if failed: return _fail( ret, 'Error(s) occurred unsetting values for the following keys (see ' 'the minion log for details): {0}'.format(', '.join(failed)) ) post = __salt__['git.config_get_regexp']( cwd=repo, key=key, value_regex=None, user=user, password=password, ignore_retcode=True, output_encoding=output_encoding, **{'global': global_} ) for key_name in pre: if key_name not in post: ret['changes'][key_name] = pre[key_name] unset = [x for x in pre[key_name] if x not in post[key_name]] if unset: ret['changes'][key_name] = unset if value_regex is None: post_matches = post else: post_matches = __salt__['git.config_get_regexp']( cwd=repo, key=key, value_regex=value_regex, user=user, password=password, ignore_retcode=True, output_encoding=output_encoding, **{'global': global_} ) if post_matches: failed = ['{0} ({1})'.format(x, ', '.join(y)) for x, y in six.iteritems(post_matches)] return _fail( ret, 'Failed to unset value(s): {0}'.format('; '.join(failed)) ) ret['comment'] = 'Value(s) successfully unset' return ret def config_set(name, value=None, multivar=None, repo=None, user=None, password=None, output_encoding=None, **kwargs): ''' .. versionadded:: 2014.7.0 .. versionchanged:: 2015.8.0 Renamed from ``git.config`` to ``git.config_set``. For earlier versions, use ``git.config``. Ensure that a config value is set to the desired value(s) name Name of the git config value to set value Set a single value for the config item multivar Set multiple values for the config item .. note:: The order matters here, if the same parameters are set but in a different order, they will be removed and replaced in the order specified. .. versionadded:: 2015.8.0 repo Location of the git repository for which the config value should be set. Required unless ``global`` is set to ``True``. user User under which to run git commands. By default, the commands are run by the user under which the minion is running. password Windows only. Required when specifying ``user``. This parameter will be ignored on non-Windows platforms. .. versionadded:: 2016.3.4 global : False If ``True``, this will set a global git config option output_encoding Use this option to specify which encoding to use to decode the output from any git commands which are run. This should not be needed in most cases. .. note:: This should only be needed if the files in the repository were created with filenames using an encoding other than UTF-8 to handle Unicode characters. .. versionadded:: 2018.3.1 **Local Config Example:** .. code-block:: yaml # Single value mylocalrepo: git.config_set: - name: user.email - value: foo@bar.net - repo: /path/to/repo # Multiple values mylocalrepo: git.config_set: - name: mysection.myattribute - multivar: - foo - bar - baz - repo: /path/to/repo **Global Config Example (User ``foo``):** .. code-block:: yaml mylocalrepo: git.config_set: - name: user.name - value: Foo Bar - user: foo - global: True ''' ret = {'name': name, 'changes': {}, 'result': True, 'comment': ''} if value is not None and multivar is not None: return _fail( ret, 'Only one of \'value\' and \'multivar\' is permitted' ) # Sanitize kwargs and make sure that no invalid ones were passed. This # allows us to accept 'global' as an argument to this function without # shadowing global(), while also not allowing unwanted arguments to be # passed. kwargs = salt.utils.args.clean_kwargs(**kwargs) global_ = kwargs.pop('global', False) if kwargs: return _fail( ret, salt.utils.args.invalid_kwargs(kwargs, raise_exc=False) ) if not global_ and not repo: return _fail( ret, 'Non-global config options require the \'repo\' argument to be ' 'set' ) if not isinstance(name, six.string_types): name = six.text_type(name) if value is not None: if not isinstance(value, six.string_types): value = six.text_type(value) value_comment = '\'' + value + '\'' desired = [value] if multivar is not None: if not isinstance(multivar, list): try: multivar = multivar.split(',') except AttributeError: multivar = six.text_type(multivar).split(',') else: new_multivar = [] for item in multivar: if isinstance(item, six.string_types): new_multivar.append(item) else: new_multivar.append(six.text_type(item)) multivar = new_multivar value_comment = multivar desired = multivar # Get current value pre = __salt__['git.config_get']( cwd=repo, key=name, user=user, password=password, ignore_retcode=True, output_encoding=output_encoding, **{'all': True, 'global': global_} ) if desired == pre: ret['comment'] = '{0}\'{1}\' is already set to {2}'.format( 'Global key ' if global_ else '', name, value_comment ) return ret if __opts__['test']: ret['changes'] = {'old': pre, 'new': desired} msg = '{0}\'{1}\' would be {2} {3}'.format( 'Global key ' if global_ else '', name, 'added as' if pre is None else 'set to', value_comment ) return _neutral_test(ret, msg) try: # Set/update config value post = __salt__['git.config_set']( cwd=repo, key=name, value=value, multivar=multivar, user=user, password=password, output_encoding=output_encoding, **{'global': global_} ) except CommandExecutionError as exc: return _fail( ret, 'Failed to set {0}\'{1}\' to {2}: {3}'.format( 'global key ' if global_ else '', name, value_comment, _strip_exc(exc) ) ) if pre != post: ret['changes'][name] = {'old': pre, 'new': post} if post != desired: return _fail( ret, 'Failed to set {0}\'{1}\' to {2}'.format( 'global key ' if global_ else '', name, value_comment ) ) ret['comment'] = '{0}\'{1}\' was {2} {3}'.format( 'Global key ' if global_ else '', name, 'added as' if pre is None else 'set to', value_comment ) return ret def mod_run_check(cmd_kwargs, onlyif, unless): ''' Execute the onlyif and unless logic. Return a result dict if: * onlyif failed (onlyif != 0) * unless succeeded (unless == 0) Otherwise, returns ``True`` ''' cmd_kwargs = copy.deepcopy(cmd_kwargs) cmd_kwargs.update({ 'use_vt': False, 'bg': False, 'ignore_retcode': True, 'python_shell': True, }) if onlyif is not None: if not isinstance(onlyif, list): onlyif = [onlyif] for command in onlyif: if not isinstance(command, six.string_types) and command: # Boolean or some other non-string which resolves to True continue try: if __salt__['cmd.retcode'](command, **cmd_kwargs) == 0: # Command exited with a zero retcode continue except Exception as exc: log.exception( 'The following onlyif command raised an error: %s', command ) return { 'comment': 'onlyif raised error ({0}), see log for ' 'more details'.format(exc), 'result': False } return {'comment': 'onlyif condition is false', 'skip_watch': True, 'result': True} if unless is not None: if not isinstance(unless, list): unless = [unless] for command in unless: if not isinstance(command, six.string_types) and not command: # Boolean or some other non-string which resolves to False break try: if __salt__['cmd.retcode'](command, **cmd_kwargs) != 0: # Command exited with a non-zero retcode break except Exception as exc: log.exception( 'The following unless command raised an error: %s', command ) return { 'comment': 'unless raised error ({0}), see log for ' 'more details'.format(exc), 'result': False } else: return {'comment': 'unless condition is true', 'skip_watch': True, 'result': True} return True
saltstack/salt
salt/states/git.py
_get_branch_opts
python
def _get_branch_opts(branch, local_branch, all_local_branches, desired_upstream, git_ver=None): ''' DRY helper to build list of opts for git.branch, for the purposes of setting upstream tracking branch ''' if branch is not None and branch not in all_local_branches: # We won't be setting upstream because the act of checking out a new # branch will set upstream for us return None if git_ver is None: git_ver = _LooseVersion(__salt__['git.version'](versioninfo=False)) ret = [] if git_ver >= _LooseVersion('1.8.0'): ret.extend(['--set-upstream-to', desired_upstream]) else: ret.append('--set-upstream') # --set-upstream does not assume the current branch, so we have to # tell it which branch we'll be using ret.append(local_branch if branch is None else branch) ret.append(desired_upstream) return ret
DRY helper to build list of opts for git.branch, for the purposes of setting upstream tracking branch
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/git.py#L87-L110
null
# -*- coding: utf-8 -*- ''' States to manage git repositories and git configuration .. important:: Before using git over ssh, make sure your remote host fingerprint exists in your ``~/.ssh/known_hosts`` file. .. versionchanged:: 2015.8.8 This state module now requires git 1.6.5 (released 10 October 2009) or newer. ''' from __future__ import absolute_import, print_function, unicode_literals # Import python libs import copy import errno import logging import os import re import string # Import salt libs import salt.utils.args import salt.utils.files import salt.utils.url import salt.utils.versions from salt.exceptions import CommandExecutionError from salt.utils.versions import LooseVersion as _LooseVersion # Import 3rd-party libs from salt.ext import six log = logging.getLogger(__name__) def __virtual__(): ''' Only load if git is available ''' if 'git.version' not in __salt__: return False git_ver = _LooseVersion(__salt__['git.version'](versioninfo=False)) return git_ver >= _LooseVersion('1.6.5') def _revs_equal(rev1, rev2, rev_type): ''' Shorthand helper function for comparing SHA1s. If rev_type == 'sha1' then the comparison will be done using str.startwith() to allow short SHA1s to compare successfully. NOTE: This means that rev2 must be the short rev. ''' if (rev1 is None and rev2 is not None) \ or (rev2 is None and rev1 is not None): return False elif rev1 is rev2 is None: return True elif rev_type == 'sha1': return rev1.startswith(rev2) else: return rev1 == rev2 def _short_sha(sha1): return sha1[:7] if sha1 is not None else None def _format_comments(comments): ''' Return a joined list ''' ret = '. '.join(comments) if len(comments) > 1: ret += '.' return ret def _need_branch_change(branch, local_branch): ''' Short hand for telling when a new branch is needed ''' return branch is not None and branch != local_branch def _get_local_rev_and_branch(target, user, password, output_encoding=None): ''' Return the local revision for before/after comparisons ''' log.info('Checking local revision for %s', target) try: local_rev = __salt__['git.revision']( target, user=user, password=password, ignore_retcode=True, output_encoding=output_encoding) except CommandExecutionError: log.info('No local revision for %s', target) local_rev = None log.info('Checking local branch for %s', target) try: local_branch = __salt__['git.current_branch']( target, user=user, password=password, ignore_retcode=True, output_encoding=output_encoding) except CommandExecutionError: log.info('No local branch for %s', target) local_branch = None return local_rev, local_branch def _strip_exc(exc): ''' Strip the actual command that was run from exc.strerror to leave just the error message ''' return re.sub(r'^Command [\'"].+[\'"] failed: ', '', exc.strerror) def _uptodate(ret, target, comments=None, local_changes=False): ret['comment'] = 'Repository {0} is up-to-date'.format(target) if local_changes: ret['comment'] += ( ', but with uncommitted changes. Set \'force_reset\' to True to ' 'purge uncommitted changes.' ) if comments: # Shouldn't be making any changes if the repo was up to date, but # report on them so we are alerted to potential problems with our # logic. ret['comment'] += ( '\n\nChanges {0}made: {1}'.format( 'that would be ' if __opts__['test'] else '', _format_comments(comments) ) ) return ret def _neutral_test(ret, comment): ret['result'] = None ret['comment'] = comment return ret def _fail(ret, msg, comments=None): ret['result'] = False if comments: msg += '\n\nChanges already made: ' + _format_comments(comments) ret['comment'] = msg return ret def _already_cloned(ret, target, branch=None, comments=None): ret['result'] = True ret['comment'] = 'Repository already exists at {0}{1}'.format( target, ' and is checked out to branch \'{0}\''.format(branch) if branch else '' ) if comments: ret['comment'] += ( '\n\nChanges {0}made: {1}'.format( 'that would be ' if __opts__['test'] else '', _format_comments(comments) ) ) return ret def _failed_fetch(ret, exc, comments=None): msg = ( 'Fetch failed. Set \'force_fetch\' to True to force the fetch if the ' 'failure was due to not being able to fast-forward. Output of the fetch ' 'command follows:\n\n{0}'.format(_strip_exc(exc)) ) return _fail(ret, msg, comments) def _failed_submodule_update(ret, exc, comments=None): msg = 'Failed to update submodules: ' + _strip_exc(exc) return _fail(ret, msg, comments) def _not_fast_forward(ret, rev, pre, post, branch, local_branch, default_branch, local_changes, comments): branch_msg = '' if branch is None: if rev != 'HEAD': if local_branch != rev: branch_msg = ( ' The desired rev ({0}) differs from the name of the ' 'local branch ({1}), if the desired rev is a branch name ' 'then a forced update could possibly be avoided by ' 'setting the \'branch\' argument to \'{0}\' instead.' .format(rev, local_branch) ) else: if default_branch is not None and local_branch != default_branch: branch_msg = ( ' The default remote branch ({0}) differs from the ' 'local branch ({1}). This could be caused by changing the ' 'default remote branch, or if the local branch was ' 'manually changed. Rather than forcing an update, it ' 'may be advisable to set the \'branch\' argument to ' '\'{0}\' instead. To ensure that this state follows the ' '\'{0}\' branch instead of the remote HEAD, set the ' '\'rev\' argument to \'{0}\'.' .format(default_branch, local_branch) ) pre = _short_sha(pre) post = _short_sha(post) return _fail( ret, 'Repository would be updated {0}{1}, but {2}. Set \'force_reset\' to ' 'True{3} to force this update{4}.{5}'.format( 'from {0} to {1}'.format(pre, post) if local_changes and pre != post else 'to {0}'.format(post), ' (after checking out local branch \'{0}\')'.format(branch) if _need_branch_change(branch, local_branch) else '', 'this is not a fast-forward merge' if not local_changes else 'there are uncommitted changes', ' (or \'remote-changes\')' if local_changes else '', ' and discard these changes' if local_changes else '', branch_msg, ), comments ) def latest(name, rev='HEAD', target=None, branch=None, user=None, password=None, update_head=True, force_checkout=False, force_clone=False, force_fetch=False, force_reset=False, submodules=False, bare=False, mirror=False, remote='origin', fetch_tags=True, sync_tags=True, depth=None, identity=None, https_user=None, https_pass=None, onlyif=None, unless=None, refspec_branch='*', refspec_tag='*', output_encoding=None, **kwargs): ''' Make sure the repository is cloned to the given directory and is up-to-date. name Address of the remote repository, as passed to ``git clone`` .. note:: From the `Git documentation`_, there are two URL formats supported for SSH authentication. The below two examples are equivalent: .. code-block:: text # ssh:// URL ssh://user@server/project.git # SCP-like syntax user@server:project.git A common mistake is to use an ``ssh://`` URL, but with a colon after the domain instead of a slash. This is invalid syntax in Git, and will therefore not work in Salt. When in doubt, confirm that a ``git clone`` works for the URL before using it in Salt. It has been reported by some users that SCP-like syntax is incompatible with git repos hosted on `Atlassian Stash/BitBucket Server`_. In these cases, it may be necessary to use ``ssh://`` URLs for SSH authentication. .. _`Git documentation`: https://git-scm.com/book/en/v2/Git-on-the-Server-The-Protocols#The-SSH-Protocol .. _`Atlassian Stash/BitBucket Server`: https://www.atlassian.com/software/bitbucket/server rev : HEAD The remote branch, tag, or revision ID to checkout after clone / before update. If specified, then Salt will also ensure that the tracking branch is set to ``<remote>/<rev>``, unless ``rev`` refers to a tag or SHA1, in which case Salt will ensure that the tracking branch is unset. If ``rev`` is not specified, it will be assumed to be ``HEAD``, and Salt will not manage the tracking branch at all. .. versionchanged:: 2015.8.0 If not specified, ``rev`` now defaults to the remote repository's HEAD. target Name of the target directory where repository is about to be cloned branch Name of the local branch into which to checkout the specified rev. If not specified, then Salt will not care what branch is being used locally and will just use whatever branch is currently there. .. versionadded:: 2015.8.0 .. note:: If this argument is not specified, this means that Salt will not change the local branch if the repository is reset to another branch/tag/SHA1. For example, assume that the following state was run initially: .. code-block:: yaml foo_app: git.latest: - name: https://mydomain.tld/apps/foo.git - target: /var/www/foo - user: www This would have cloned the HEAD of that repo (since a ``rev`` wasn't specified), and because ``branch`` is not specified, the branch in the local clone at ``/var/www/foo`` would be whatever the default branch is on the remote repository (usually ``master``, but not always). Now, assume that it becomes necessary to switch this checkout to the ``dev`` branch. This would require ``rev`` to be set, and probably would also require ``force_reset`` to be enabled: .. code-block:: yaml foo_app: git.latest: - name: https://mydomain.tld/apps/foo.git - target: /var/www/foo - user: www - rev: dev - force_reset: True The result of this state would be to perform a hard-reset to ``origin/dev``. Since ``branch`` was not specified though, while ``/var/www/foo`` would reflect the contents of the remote repo's ``dev`` branch, the local branch would still remain whatever it was when it was cloned. To make the local branch match the remote one, set ``branch`` as well, like so: .. code-block:: yaml foo_app: git.latest: - name: https://mydomain.tld/apps/foo.git - target: /var/www/foo - user: www - rev: dev - branch: dev - force_reset: True This may seem redundant, but Salt tries to support a wide variety of use cases, and doing it this way allows for the use case where the local branch doesn't need to be strictly managed. user Local system user under which to run git commands. By default, commands are run by the user under which the minion is running. .. note:: This is not to be confused with the username for http(s)/SSH authentication. .. versionadded:: 0.17.0 password Windows only. Required when specifying ``user``. This parameter will be ignored on non-Windows platforms. .. versionadded:: 2016.3.4 update_head : True If set to ``False``, then the remote repository will be fetched (if necessary) to ensure that the commit to which ``rev`` points exists in the local checkout, but no changes will be made to the local HEAD. .. versionadded:: 2015.8.3 force_checkout : False When checking out the local branch, the state will fail if there are unwritten changes. Set this argument to ``True`` to discard unwritten changes when checking out. force_clone : False If the ``target`` directory exists and is not a git repository, then this state will fail. Set this argument to ``True`` to remove the contents of the target directory and clone the repo into it. force_fetch : False If a fetch needs to be performed, non-fast-forward fetches will cause this state to fail. Set this argument to ``True`` to force the fetch even if it is a non-fast-forward update. .. versionadded:: 2015.8.0 force_reset : False If the update is not a fast-forward, this state will fail. Set this argument to ``True`` to force a hard-reset to the remote revision in these cases. .. versionchanged:: 2019.2.0 This option can now be set to ``remote-changes``, which will instruct Salt not to discard local changes if the repo is up-to-date with the remote repository. submodules : False Update submodules on clone or branch change bare : False Set to ``True`` if the repository is to be a bare clone of the remote repository. .. note: Setting this option to ``True`` is incompatible with the ``rev`` argument. mirror Set to ``True`` if the repository is to be a mirror of the remote repository. This implies that ``bare`` set to ``True``, and thus is incompatible with ``rev``. remote : origin Git remote to use. If this state needs to clone the repo, it will clone it using this value as the initial remote name. If the repository already exists, and a remote by this name is not present, one will be added. fetch_tags : True If ``True``, then when a fetch is performed all tags will be fetched, even those which are not reachable by any branch on the remote. sync_tags : True If ``True``, then Salt will delete tags which exist in the local clone but are not found on the remote repository. .. versionadded:: 2018.3.4 depth Defines depth in history when git a clone is needed in order to ensure latest. E.g. ``depth: 1`` is useful when deploying from a repository with a long history. Use rev to specify branch or tag. This is not compatible with revision IDs. .. versionchanged:: 2019.2.0 This option now supports tags as well as branches, on Git 1.8.0 and newer. identity Path to a private key to use for ssh URLs. This can be either a single string, or a list of strings. For example: .. code-block:: yaml # Single key git@github.com:user/repo.git: git.latest: - user: deployer - identity: /home/deployer/.ssh/id_rsa # Two keys git@github.com:user/repo.git: git.latest: - user: deployer - identity: - /home/deployer/.ssh/id_rsa - /home/deployer/.ssh/id_rsa_alternate If multiple keys are specified, they will be tried one-by-one in order for each git command which needs to authenticate. .. warning:: Unless Salt is invoked from the minion using ``salt-call``, the key(s) must be passphraseless. For greater security with passphraseless private keys, see the `sshd(8)`_ manpage for information on securing the keypair from the remote side in the ``authorized_keys`` file. .. _`sshd(8)`: http://www.man7.org/linux/man-pages/man8/sshd.8.html#AUTHORIZED_KEYS_FILE%20FORMAT .. versionchanged:: 2015.8.7 Salt will no longer attempt to use passphrase-protected keys unless invoked from the minion using ``salt-call``, to prevent blocking waiting for user input. .. versionchanged:: 2016.3.0 Key can now be specified as a SaltStack fileserver URL (e.g. ``salt://path/to/identity_file``). https_user HTTP Basic Auth username for HTTPS (only) clones .. versionadded:: 2015.5.0 https_pass HTTP Basic Auth password for HTTPS (only) clones .. versionadded:: 2015.5.0 onlyif A command to run as a check, run the named command only if the command passed to the ``onlyif`` option returns true unless A command to run as a check, only run the named command if the command passed to the ``unless`` option returns false refspec_branch : * A glob expression defining which branches to retrieve when fetching. See `git-fetch(1)`_ for more information on how refspecs work. .. versionadded:: 2017.7.0 refspec_tag : * A glob expression defining which tags to retrieve when fetching. See `git-fetch(1)`_ for more information on how refspecs work. .. versionadded:: 2017.7.0 output_encoding Use this option to specify which encoding to use to decode the output from any git commands which are run. This should not be needed in most cases. .. note:: This should only be needed if the files in the repository were created with filenames using an encoding other than UTF-8 to handle Unicode characters. .. versionadded:: 2018.3.1 .. _`git-fetch(1)`: http://git-scm.com/docs/git-fetch .. note:: Clashing ID declarations can be avoided when including different branches from the same git repository in the same SLS file by using the ``name`` argument. The example below checks out the ``gh-pages`` and ``gh-pages-prod`` branches from the same repository into separate directories. The example also sets up the ``ssh_known_hosts`` ssh key required to perform the git checkout. Also, it has been reported that the SCP-like syntax for .. code-block:: yaml gitlab.example.com: ssh_known_hosts: - present - user: root - enc: ecdsa - fingerprint: 4e:94:b0:54:c1:5b:29:a2:70:0e:e1:a3:51:ee:ee:e3 git-website-staging: git.latest: - name: git@gitlab.example.com:user/website.git - rev: gh-pages - target: /usr/share/nginx/staging - identity: /root/.ssh/website_id_rsa - require: - pkg: git - ssh_known_hosts: gitlab.example.com git-website-staging: git.latest: - name: git@gitlab.example.com:user/website.git - rev: gh-pages - target: /usr/share/nginx/staging - identity: salt://website/id_rsa - require: - pkg: git - ssh_known_hosts: gitlab.example.com git-website-prod: git.latest: - name: git@gitlab.example.com:user/website.git - rev: gh-pages-prod - target: /usr/share/nginx/prod - identity: /root/.ssh/website_id_rsa - require: - pkg: git - ssh_known_hosts: gitlab.example.com ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} kwargs = salt.utils.args.clean_kwargs(**kwargs) if kwargs: return _fail( ret, salt.utils.args.invalid_kwargs(kwargs, raise_exc=False) ) if not remote: return _fail(ret, '\'remote\' argument is required') if not target: return _fail(ret, '\'target\' argument is required') if not rev: return _fail( ret, '\'{0}\' is not a valid value for the \'rev\' argument'.format(rev) ) if force_reset not in (True, False, 'remote-changes'): return _fail( ret, '\'force_reset\' must be one of True, False, or \'remote-changes\'' ) # Ensure that certain arguments are strings to ensure that comparisons work if not isinstance(rev, six.string_types): rev = six.text_type(rev) if target is not None: if not isinstance(target, six.string_types): target = six.text_type(target) if not os.path.isabs(target): return _fail( ret, 'target \'{0}\' is not an absolute path'.format(target) ) if branch is not None and not isinstance(branch, six.string_types): branch = six.text_type(branch) if user is not None and not isinstance(user, six.string_types): user = six.text_type(user) if password is not None and not isinstance(password, six.string_types): password = six.text_type(password) if remote is not None and not isinstance(remote, six.string_types): remote = six.text_type(remote) if identity is not None: if isinstance(identity, six.string_types): identity = [identity] elif not isinstance(identity, list): return _fail(ret, 'identity must be either a list or a string') identity = [os.path.expanduser(x) for x in identity] for ident_path in identity: if 'salt://' in ident_path: try: ident_path = __salt__['cp.cache_file'](ident_path, __env__) except IOError as exc: log.exception('Failed to cache %s', ident_path) return _fail( ret, 'identity \'{0}\' does not exist.'.format( ident_path ) ) if not os.path.isabs(ident_path): return _fail( ret, 'identity \'{0}\' is not an absolute path'.format( ident_path ) ) if https_user is not None and not isinstance(https_user, six.string_types): https_user = six.text_type(https_user) if https_pass is not None and not isinstance(https_pass, six.string_types): https_pass = six.text_type(https_pass) # Check for lfs filter settings, and setup lfs_opts accordingly. These opts # will be passed where appropriate to ensure that these commands are # authenticated and that the git LFS plugin can download files. use_lfs = bool( __salt__['git.config_get_regexp']( r'filter\.lfs\.', **{'global': True})) lfs_opts = {'identity': identity} if use_lfs else {} if os.path.isfile(target): return _fail( ret, 'Target \'{0}\' exists and is a regular file, cannot proceed' .format(target) ) try: desired_fetch_url = salt.utils.url.add_http_basic_auth( name, https_user, https_pass, https_only=True ) except ValueError as exc: return _fail(ret, exc.__str__()) redacted_fetch_url = \ salt.utils.url.redact_http_basic_auth(desired_fetch_url) if mirror: bare = True # Check to make sure rev and mirror/bare are not both in use if rev != 'HEAD' and bare: return _fail(ret, ('\'rev\' is not compatible with the \'mirror\' and ' '\'bare\' arguments')) run_check_cmd_kwargs = {'runas': user, 'password': password} if 'shell' in __grains__: run_check_cmd_kwargs['shell'] = __grains__['shell'] # check if git.latest should be applied cret = mod_run_check( run_check_cmd_kwargs, onlyif, unless ) if isinstance(cret, dict): ret.update(cret) return ret refspecs = [ 'refs/heads/{0}:refs/remotes/{1}/{0}'.format(refspec_branch, remote), '+refs/tags/{0}:refs/tags/{0}'.format(refspec_tag) ] if fetch_tags else [] log.info('Checking remote revision for %s', name) try: all_remote_refs = __salt__['git.remote_refs']( name, heads=False, tags=False, user=user, password=password, identity=identity, https_user=https_user, https_pass=https_pass, ignore_retcode=False, saltenv=__env__, output_encoding=output_encoding) except CommandExecutionError as exc: return _fail( ret, 'Failed to check remote refs: {0}'.format(_strip_exc(exc)) ) except NameError as exc: if 'global name' in exc.message: raise CommandExecutionError( 'Failed to check remote refs: You may need to install ' 'GitPython or PyGit2') raise if 'HEAD' in all_remote_refs: head_rev = all_remote_refs['HEAD'] for refname, refsha in six.iteritems(all_remote_refs): if refname.startswith('refs/heads/'): if refsha == head_rev: default_branch = refname.partition('refs/heads/')[-1] break else: default_branch = None else: head_rev = None default_branch = None desired_upstream = False if bare: remote_rev = None remote_rev_type = None else: if rev == 'HEAD': if head_rev is not None: remote_rev = head_rev # Just go with whatever the upstream currently is desired_upstream = None remote_rev_type = 'sha1' else: # Empty remote repo remote_rev = None remote_rev_type = None elif 'refs/heads/' + rev in all_remote_refs: remote_rev = all_remote_refs['refs/heads/' + rev] desired_upstream = '/'.join((remote, rev)) remote_rev_type = 'branch' elif 'refs/tags/' + rev + '^{}' in all_remote_refs: # Annotated tag remote_rev = all_remote_refs['refs/tags/' + rev + '^{}'] remote_rev_type = 'tag' elif 'refs/tags/' + rev in all_remote_refs: # Non-annotated tag remote_rev = all_remote_refs['refs/tags/' + rev] remote_rev_type = 'tag' else: if len(rev) <= 40 \ and all(x in string.hexdigits for x in rev): # git ls-remote did not find the rev, and because it's a # hex string <= 40 chars we're going to assume that the # desired rev is a SHA1 rev = rev.lower() remote_rev = rev remote_rev_type = 'sha1' else: remote_rev = None remote_rev_type = None # For the comment field of the state return dict, the remote location # (and short-sha1, if rev is not a sha1) is referenced several times, # determine it once here and reuse the value below. if remote_rev_type == 'sha1': if rev == 'HEAD': remote_loc = 'remote HEAD (' + remote_rev[:7] + ')' else: remote_loc = remote_rev[:7] elif remote_rev is not None: remote_loc = '{0} ({1})'.format( desired_upstream if remote_rev_type == 'branch' else rev, remote_rev[:7] ) else: # Shouldn't happen but log a warning here for future # troubleshooting purposes in the event we find a corner case. log.warning( 'Unable to determine remote_loc. rev is %s, remote_rev is ' '%s, remove_rev_type is %s, desired_upstream is %s, and bare ' 'is%s set', rev, remote_rev, remote_rev_type, desired_upstream, ' not' if not bare else '' ) remote_loc = None if depth is not None and remote_rev_type not in ('branch', 'tag'): return _fail( ret, 'When \'depth\' is used, \'rev\' must be set to the name of a ' 'branch or tag on the remote repository' ) if remote_rev is None and not bare: if rev != 'HEAD': # A specific rev is desired, but that rev doesn't exist on the # remote repo. return _fail( ret, 'No revision matching \'{0}\' exists in the remote ' 'repository'.format(rev) ) git_ver = _LooseVersion(__salt__['git.version'](versioninfo=False)) check = 'refs' if bare else '.git' gitdir = os.path.join(target, check) comments = [] if os.path.isdir(gitdir) \ or __salt__['git.is_worktree']( target, user=user, password=password, output_encoding=output_encoding): # Target directory is a git repository or git worktree try: all_local_branches = __salt__['git.list_branches']( target, user=user, password=password, output_encoding=output_encoding) all_local_tags = set( __salt__['git.list_tags']( target, user=user, password=password, output_encoding=output_encoding ) ) local_rev, local_branch = _get_local_rev_and_branch( target, user, password, output_encoding) if not bare and remote_rev is None and local_rev is not None: return _fail( ret, 'Remote repository is empty, cannot update from a ' 'non-empty to an empty repository' ) # Base rev and branch are the ones from which any reset or merge # will take place. If the branch is not being specified, the base # will be the "local" rev and branch, i.e. those we began with # before this state was run. If a branch is being specified and it # both exists and is not the one with which we started, then we'll # be checking that branch out first, and it instead becomes our # base. The base branch and rev will be used below in comparisons # to determine what changes to make. base_rev = local_rev base_branch = local_branch if _need_branch_change(branch, local_branch): if branch not in all_local_branches: # We're checking out a new branch, so the base_rev and # remote_rev will be identical. base_rev = remote_rev else: base_branch = branch # Desired branch exists locally and is not the current # branch. We'll be performing a checkout to that branch # eventually, but before we do that we need to find the # current SHA1. try: base_rev = __salt__['git.rev_parse']( target, branch + '^{commit}', user=user, password=password, ignore_retcode=True, output_encoding=output_encoding) except CommandExecutionError as exc: return _fail( ret, 'Unable to get position of local branch \'{0}\': ' '{1}'.format(branch, _strip_exc(exc)), comments ) remotes = __salt__['git.remotes'](target, user=user, password=password, redact_auth=False, output_encoding=output_encoding) revs_match = _revs_equal(local_rev, remote_rev, remote_rev_type) try: # If not a bare repo, check `git diff HEAD` to determine if # there are local changes. local_changes = bool( not bare and __salt__['git.diff'](target, 'HEAD', user=user, password=password, output_encoding=output_encoding) ) except CommandExecutionError: # No need to capture the error and log it, the _git_run() # helper in the git execution module will have already logged # the output from the command. log.warning( 'git.latest: Unable to determine if %s has local changes', target ) local_changes = False if local_changes and revs_match: if force_reset is True: msg = ( '{0} is up-to-date, but with uncommitted changes. ' 'Since \'force_reset\' is set to True, these local ' 'changes would be reset. To only reset when there are ' 'changes in the remote repository, set ' '\'force_reset\' to \'remote-changes\'.'.format(target) ) if __opts__['test']: ret['changes']['forced update'] = True if comments: msg += _format_comments(comments) return _neutral_test(ret, msg) log.debug(msg.replace('would', 'will')) else: log.debug( '%s up-to-date, but with uncommitted changes. Since ' '\'force_reset\' is set to %s, no changes will be ' 'made.', target, force_reset ) return _uptodate(ret, target, _format_comments(comments), local_changes) if remote_rev_type == 'sha1' \ and base_rev is not None \ and base_rev.startswith(remote_rev): # Either we're already checked out to the branch we need and it # is up-to-date, or the branch to which we need to switch is # on the same SHA1 as the desired remote revision. Either way, # we know we have the remote rev present already and no fetch # will be needed. has_remote_rev = True else: has_remote_rev = False if remote_rev is not None: try: __salt__['git.rev_parse']( target, remote_rev + '^{commit}', user=user, password=password, ignore_retcode=True, output_encoding=output_encoding) except CommandExecutionError: # Local checkout doesn't have the remote_rev pass else: # The object might exist enough to get a rev-parse to # work, while the local ref could have been # deleted/changed/force updated. Do some further sanity # checks to determine if we really do have the # remote_rev. if remote_rev_type == 'branch': if remote in remotes: try: # Do a rev-parse on <remote>/<rev> to get # the local SHA1 for it, so we can compare # it to the remote_rev SHA1. local_copy = __salt__['git.rev_parse']( target, desired_upstream, user=user, password=password, ignore_retcode=True, output_encoding=output_encoding) except CommandExecutionError: pass else: # If the SHA1s don't match, then the remote # branch was force-updated, and we need to # fetch to update our local copy the ref # for the remote branch. If they do match, # then we have the remote_rev and don't # need to fetch. if local_copy == remote_rev: has_remote_rev = True elif remote_rev_type == 'tag': if rev in all_local_tags: try: local_tag_sha1 = __salt__['git.rev_parse']( target, rev + '^{commit}', user=user, password=password, ignore_retcode=True, output_encoding=output_encoding) except CommandExecutionError: # Shouldn't happen if the tag exists # locally but account for this just in # case. local_tag_sha1 = None if local_tag_sha1 == remote_rev: has_remote_rev = True else: if not force_reset: # SHA1 of tag on remote repo is # different than local tag. Unless # we're doing a hard reset then we # don't need to proceed as we know that # the fetch will update the tag and the # only way to make the state succeed is # to reset the branch to point at the # tag's new location. return _fail( ret, '\'{0}\' is a tag, but the remote ' 'SHA1 for this tag ({1}) doesn\'t ' 'match the local SHA1 ({2}). Set ' '\'force_reset\' to True to force ' 'this update.'.format( rev, _short_sha(remote_rev), _short_sha(local_tag_sha1) ) ) elif remote_rev_type == 'sha1': has_remote_rev = True # If fast_forward is not boolean, then we don't yet know if this # will be a fast forward or not, because a fetch is required. fast_forward = False \ if (local_changes and force_reset != 'remote-changes') \ else None if has_remote_rev: if (not revs_match and not update_head) \ and (branch is None or branch == local_branch): ret['comment'] = ( '{0} is already present and local HEAD ({1}) does not ' 'match, but update_head=False. HEAD has not been ' 'updated locally.'.format( remote_loc.capitalize() if rev == 'HEAD' else remote_loc, local_rev[:7] ) ) return ret # No need to check if this is a fast_forward if we already know # that it won't be (due to local changes). if fast_forward is not False: if base_rev is None: # If we're here, the remote_rev exists in the local # checkout but there is still no HEAD locally. A # possible reason for this is that an empty repository # existed there and a remote was added and fetched, but # the repository was not fast-forwarded. Regardless, # going from no HEAD to a locally-present rev is # considered a fast-forward update. fast_forward = True else: fast_forward = __salt__['git.merge_base']( target, refs=[base_rev, remote_rev], is_ancestor=True, user=user, password=password, ignore_retcode=True, output_encoding=output_encoding) if fast_forward is False: if force_reset is False: return _not_fast_forward( ret, rev, base_rev, remote_rev, branch, local_branch, default_branch, local_changes, comments) merge_action = 'hard-reset' elif fast_forward is True: merge_action = 'fast-forwarded' else: merge_action = 'updated' if base_branch is None: # No local branch, no upstream tracking branch upstream = None else: try: upstream = __salt__['git.rev_parse']( target, base_branch + '@{upstream}', opts=['--abbrev-ref'], user=user, password=password, ignore_retcode=True, output_encoding=output_encoding) except CommandExecutionError: # There is a local branch but the rev-parse command # failed, so that means there is no upstream tracking # branch. This could be because it is just not set, or # because the branch was checked out to a SHA1 or tag # instead of a branch. Set upstream to False to make a # distinction between the case above where there is no # local_branch (when the local checkout is an empty # repository). upstream = False if remote in remotes: fetch_url = remotes[remote]['fetch'] else: log.debug( 'Remote \'%s\' not found in git checkout at %s', remote, target ) fetch_url = None if remote_rev is not None and desired_fetch_url != fetch_url: if __opts__['test']: actions = [ 'Remote \'{0}\' would be changed from {1} to {2}' .format( remote, salt.utils.url.redact_http_basic_auth(fetch_url), redacted_fetch_url ) ] if not has_remote_rev: actions.append('Remote would be fetched') if not revs_match: if update_head: ret['changes']['revision'] = { 'old': local_rev, 'new': remote_rev } if fast_forward is False: ret['changes']['forced update'] = True actions.append( 'Repository would be {0} to {1}'.format( merge_action, _short_sha(remote_rev) ) ) if ret['changes']: return _neutral_test(ret, _format_comments(actions)) else: if not revs_match and not update_head: # Repo content would not be modified but the remote # URL would be modified, so we can't just say that # the repo is up-to-date, we need to inform the # user of the actions taken. ret['comment'] = _format_comments(actions) return ret return _uptodate(ret, target, _format_comments(actions)) # The fetch_url for the desired remote does not match the # specified URL (or the remote does not exist), so set the # remote URL. __salt__['git.remote_set'](target, url=name, remote=remote, user=user, password=password, https_user=https_user, https_pass=https_pass, output_encoding=output_encoding) if fetch_url is None: comments.append( 'Remote \'{0}\' set to {1}'.format( remote, redacted_fetch_url ) ) ret['changes']['new'] = name + ' => ' + remote else: comments.append( 'Remote \'{0}\' changed from {1} to {2}'.format( remote, salt.utils.url.redact_http_basic_auth(fetch_url), redacted_fetch_url ) ) if remote_rev is not None: if __opts__['test']: actions = [] if not has_remote_rev: actions.append( 'Remote \'{0}\' would be fetched'.format(remote) ) if (not revs_match) \ and (update_head or (branch is not None and branch != local_branch)): ret['changes']['revision'] = { 'old': local_rev, 'new': remote_rev } if _need_branch_change(branch, local_branch): if branch not in all_local_branches: actions.append( 'New branch \'{0}\' would be checked ' 'out, with {1} as a starting ' 'point'.format(branch, remote_loc) ) if desired_upstream: actions.append( 'Tracking branch would be set to {0}' .format(desired_upstream) ) else: actions.append( 'Branch \'{0}\' would be checked out ' 'and {1} to {2}'.format( branch, merge_action, _short_sha(remote_rev) ) ) else: if not revs_match: if update_head: if fast_forward is True: actions.append( 'Repository would be fast-forwarded from ' '{0} to {1}'.format( _short_sha(local_rev), _short_sha(remote_rev) ) ) else: actions.append( 'Repository would be {0} from {1} to {2}' .format( 'hard-reset' if force_reset and has_remote_rev else 'updated', _short_sha(local_rev), _short_sha(remote_rev) ) ) else: actions.append( 'Local HEAD ({0}) does not match {1} but ' 'update_head=False, HEAD would not be ' 'updated locally'.format( local_rev[:7], remote_loc ) ) # Check if upstream needs changing if not upstream and desired_upstream: actions.append( 'Tracking branch would be set to {0}'.format( desired_upstream ) ) elif upstream and desired_upstream is False: actions.append( 'Tracking branch would be unset' ) elif desired_upstream and upstream != desired_upstream: actions.append( 'Tracking branch would be ' 'updated to {0}'.format(desired_upstream) ) if ret['changes']: return _neutral_test(ret, _format_comments(actions)) else: formatted_actions = _format_comments(actions) if not revs_match \ and not update_head \ and formatted_actions: ret['comment'] = formatted_actions return ret return _uptodate(ret, target, _format_comments(actions)) if not upstream and desired_upstream: upstream_action = ( 'Tracking branch was set to {0}'.format( desired_upstream ) ) branch_opts = _get_branch_opts( branch, local_branch, all_local_branches, desired_upstream, git_ver) elif upstream and desired_upstream is False: # If the remote_rev is a tag or SHA1, and there is an # upstream tracking branch, we will unset it. However, we # can only do this if the git version is 1.8.0 or newer, as # the --unset-upstream option was not added until that # version. if git_ver >= _LooseVersion('1.8.0'): upstream_action = 'Tracking branch was unset' branch_opts = ['--unset-upstream'] else: branch_opts = None elif desired_upstream and upstream != desired_upstream: upstream_action = ( 'Tracking branch was updated to {0}'.format( desired_upstream ) ) branch_opts = _get_branch_opts( branch, local_branch, all_local_branches, desired_upstream, git_ver) else: branch_opts = None if branch_opts is not None and local_branch is None: return _fail( ret, 'Cannot set/unset upstream tracking branch, local ' 'HEAD refers to nonexistent branch. This may have ' 'been caused by cloning a remote repository for which ' 'the default branch was renamed or deleted. If you ' 'are unable to fix the remote repository, you can ' 'work around this by setting the \'branch\' argument ' '(which will ensure that the named branch is created ' 'if it does not already exist).', comments ) remote_tags = set([ x.replace('refs/tags/', '') for x in __salt__['git.ls_remote']( cwd=target, remote=remote, opts="--tags", user=user, password=password, identity=identity, saltenv=__env__, ignore_retcode=True, output_encoding=output_encoding) if '^{}' not in x ]) if all_local_tags != remote_tags: has_remote_rev = False new_tags = remote_tags - all_local_tags deleted_tags = all_local_tags - remote_tags if new_tags: ret['changes']['new_tags'] = new_tags if sync_tags and deleted_tags: # Delete the local copy of the tags to keep up with the # remote repository. for tag_name in deleted_tags: try: if not __opts__['test']: __salt__['git.tag']( target, tag_name, opts='-d', user=user, password=password, output_encoding=output_encoding) except CommandExecutionError as exc: ret.setdefault('warnings', []).append( 'Failed to remove local tag \'{0}\':\n\n' '{1}\n\n'.format(tag_name, exc) ) else: ret['changes'].setdefault( 'deleted_tags', []).append(tag_name) if ret['changes'].get('deleted_tags'): comments.append( 'The following tags {0} removed from the local ' 'checkout: {1}'.format( 'would be' if __opts__['test'] else 'were', ', '.join(ret['changes']['deleted_tags']) ) ) if not has_remote_rev: try: fetch_changes = __salt__['git.fetch']( target, remote=remote, force=force_fetch, refspecs=refspecs, user=user, password=password, identity=identity, saltenv=__env__, output_encoding=output_encoding) except CommandExecutionError as exc: return _failed_fetch(ret, exc, comments) else: if fetch_changes: comments.append( '{0} was fetched, resulting in updated ' 'refs'.format(name) ) try: __salt__['git.rev_parse']( target, remote_rev + '^{commit}', user=user, password=password, ignore_retcode=True, output_encoding=output_encoding) except CommandExecutionError as exc: return _fail( ret, 'Fetch did not successfully retrieve rev \'{0}\' ' 'from {1}: {2}'.format(rev, name, exc) ) if (not revs_match and not update_head) \ and (branch is None or branch == local_branch): # Rev now exists locally (was fetched), and since we're # not updating HEAD we'll just exit here. ret['comment'] = remote_loc.capitalize() \ if rev == 'HEAD' \ else remote_loc ret['comment'] += ( ' is already present and local HEAD ({0}) does not ' 'match, but update_head=False. HEAD has not been ' 'updated locally.'.format(local_rev[:7]) ) return ret # Now that we've fetched, check again whether or not # the update is a fast-forward. if base_rev is None: fast_forward = True else: fast_forward = __salt__['git.merge_base']( target, refs=[base_rev, remote_rev], is_ancestor=True, user=user, password=password, output_encoding=output_encoding) if fast_forward is force_reset is False \ or (fast_forward is True and local_changes and force_reset is False): return _not_fast_forward( ret, rev, base_rev, remote_rev, branch, local_branch, default_branch, local_changes, comments) if _need_branch_change(branch, local_branch): if local_changes and not force_checkout: return _fail( ret, 'Local branch \'{0}\' has uncommitted ' 'changes. Set \'force_checkout\' to True to ' 'discard them and proceed.'.format(local_branch) ) # TODO: Maybe re-retrieve all_local_branches to handle # the corner case where the destination branch was # added to the local checkout during a fetch that takes # a long time to complete. if branch not in all_local_branches: if rev == 'HEAD': checkout_rev = remote_rev else: checkout_rev = desired_upstream \ if desired_upstream \ else rev checkout_opts = ['-b', branch] else: checkout_rev = branch checkout_opts = [] __salt__['git.checkout'](target, checkout_rev, force=force_checkout, opts=checkout_opts, user=user, password=password, output_encoding=output_encoding) if '-b' in checkout_opts: comments.append( 'New branch \'{0}\' was checked out, with {1} ' 'as a starting point'.format( branch, remote_loc ) ) else: comments.append( '\'{0}\' was checked out'.format(checkout_rev) ) if fast_forward is False: __salt__['git.reset']( target, opts=['--hard', remote_rev], user=user, password=password, output_encoding=output_encoding, **lfs_opts) ret['changes']['forced update'] = True if local_changes: comments.append('Uncommitted changes were discarded') comments.append( 'Repository was hard-reset to {0}'.format(remote_loc) ) elif fast_forward is True \ and local_changes \ and force_reset is not False: __salt__['git.discard_local_changes']( target, user=user, password=password, output_encoding=output_encoding) comments.append('Uncommitted changes were discarded') if branch_opts is not None: __salt__['git.branch']( target, opts=branch_opts, user=user, password=password, output_encoding=output_encoding) comments.append(upstream_action) # Fast-forward to the desired revision if fast_forward is True \ and not _revs_equal(base_rev, remote_rev, remote_rev_type): if desired_upstream or rev == 'HEAD': # Check first to see if we are on a branch before # trying to merge changes. (The call to # git.symbolic_ref will only return output if HEAD # points to a branch.) if __salt__['git.symbolic_ref']( target, 'HEAD', opts=['--quiet'], user=user, password=password, ignore_retcode=True, output_encoding=output_encoding): if git_ver >= _LooseVersion('1.8.1.6'): # --ff-only added in version 1.8.1.6. It's not # 100% necessary, but if we can use it, we'll # ensure that the merge doesn't go through if # not a fast-forward. Granted, the logic that # gets us to this point shouldn't allow us to # attempt this merge if it's not a # fast-forward, but it's an extra layer of # protection. merge_opts = ['--ff-only'] else: merge_opts = [] __salt__['git.merge']( target, rev=remote_rev, opts=merge_opts, user=user, password=password, output_encoding=output_encoding, **lfs_opts) comments.append( 'Repository was fast-forwarded to {0}' .format(remote_loc) ) else: return _fail( ret, 'Unable to fast-forward, HEAD is detached', comments ) else: # Update is a fast forward, but we cannot merge to that # commit so we'll reset to it. __salt__['git.reset']( target, opts=['--hard', remote_rev if rev == 'HEAD' else rev], user=user, password=password, output_encoding=output_encoding, **lfs_opts) comments.append( 'Repository was reset to {0} (fast-forward)' .format(rev) ) # TODO: Figure out how to add submodule update info to # test=True return data, and changes dict. if submodules: try: __salt__['git.submodule']( target, 'update', opts=['--init', '--recursive'], user=user, password=password, identity=identity, saltenv=__env__, output_encoding=output_encoding) except CommandExecutionError as exc: return _failed_submodule_update(ret, exc, comments) elif bare: if __opts__['test']: msg = ( 'Bare repository at {0} would be fetched' .format(target) ) if ret['changes']: return _neutral_test(ret, msg) else: return _uptodate(ret, target, msg) try: fetch_changes = __salt__['git.fetch']( target, remote=remote, force=force_fetch, refspecs=refspecs, user=user, password=password, identity=identity, saltenv=__env__, output_encoding=output_encoding) except CommandExecutionError as exc: return _failed_fetch(ret, exc, comments) else: comments.append( 'Bare repository at {0} was fetched{1}'.format( target, ', resulting in updated refs' if fetch_changes else '' ) ) try: new_rev = __salt__['git.revision']( cwd=target, user=user, password=password, ignore_retcode=True, output_encoding=output_encoding) except CommandExecutionError: new_rev = None except Exception as exc: log.error( 'Unexpected exception in git.latest state', exc_info=True ) if isinstance(exc, CommandExecutionError): msg = _strip_exc(exc) else: msg = six.text_type(exc) return _fail(ret, msg, comments) if not bare and not _revs_equal(new_rev, remote_rev, remote_rev_type): return _fail(ret, 'Failed to update repository', comments) if local_rev != new_rev: log.info( 'Repository %s updated: %s => %s', target, local_rev, new_rev ) ret['comment'] = _format_comments(comments) ret['changes']['revision'] = {'old': local_rev, 'new': new_rev} else: return _uptodate(ret, target, _format_comments(comments)) else: if os.path.isdir(target): target_contents = os.listdir(target) if force_clone: # Clone is required, and target directory exists, but the # ``force`` option is enabled, so we need to clear out its # contents to proceed. if __opts__['test']: ret['changes']['forced clone'] = True ret['changes']['new'] = name + ' => ' + target return _neutral_test( ret, 'Target directory {0} exists. Since force_clone=True, ' 'the contents of {0} would be deleted, and {1} would ' 'be cloned into this directory.'.format(target, name) ) log.debug( 'Removing contents of %s to clone repository %s in its ' 'place (force_clone=True set in git.latest state)', target, name ) removal_errors = {} for target_object in target_contents: target_path = os.path.join(target, target_object) try: salt.utils.files.rm_rf(target_path) except OSError as exc: if exc.errno != errno.ENOENT: removal_errors[target_path] = exc if removal_errors: err_strings = [ ' {0}\n {1}'.format(k, v) for k, v in six.iteritems(removal_errors) ] return _fail( ret, 'Unable to remove\n{0}'.format('\n'.join(err_strings)), comments ) ret['changes']['forced clone'] = True # Clone is required, but target dir exists and is non-empty. We # can't proceed. elif target_contents: return _fail( ret, 'Target \'{0}\' exists, is non-empty and is not a git ' 'repository. Set the \'force_clone\' option to True to ' 'remove this directory\'s contents and proceed with ' 'cloning the remote repository'.format(target) ) log.debug('Target %s is not found, \'git clone\' is required', target) if __opts__['test']: ret['changes']['new'] = name + ' => ' + target return _neutral_test( ret, 'Repository {0} would be cloned to {1}'.format( name, target ) ) try: clone_opts = ['--mirror'] if mirror else ['--bare'] if bare else [] if remote != 'origin': clone_opts.extend(['--origin', remote]) if depth is not None: clone_opts.extend(['--depth', six.text_type(depth), '--branch', rev]) # We're cloning a fresh repo, there is no local branch or revision local_branch = local_rev = None try: __salt__['git.clone'](target, name, user=user, password=password, opts=clone_opts, identity=identity, https_user=https_user, https_pass=https_pass, saltenv=__env__, output_encoding=output_encoding) except CommandExecutionError as exc: msg = 'Clone failed: {0}'.format(_strip_exc(exc)) return _fail(ret, msg, comments) ret['changes']['new'] = name + ' => ' + target comments.append( '{0} cloned to {1}{2}'.format( name, target, ' as mirror' if mirror else ' as bare repository' if bare else '' ) ) if not bare: if not remote_rev: if rev != 'HEAD': # No HEAD means the remote repo is empty, which means # our new clone will also be empty. This state has # failed, since a rev was specified but no matching rev # exists on the remote host. msg = ( '%s was cloned but is empty, so {0}/{1} ' 'cannot be checked out'.format(remote, rev) ) log.error(msg, name) # Disable check for string substitution return _fail(ret, msg % 'Repository', comments) # pylint: disable=E1321 else: if remote_rev_type == 'tag' \ and rev not in __salt__['git.list_tags']( target, user=user, password=password, output_encoding=output_encoding): return _fail( ret, 'Revision \'{0}\' does not exist in clone' .format(rev), comments ) if branch is not None: if branch not in \ __salt__['git.list_branches']( target, user=user, password=password, output_encoding=output_encoding): if rev == 'HEAD': checkout_rev = remote_rev else: checkout_rev = desired_upstream \ if desired_upstream \ else rev __salt__['git.checkout']( target, checkout_rev, opts=['-b', branch], user=user, password=password, output_encoding=output_encoding) comments.append( 'Branch \'{0}\' checked out, with {1} ' 'as a starting point'.format( branch, remote_loc ) ) local_rev, local_branch = _get_local_rev_and_branch( target, user, password, output_encoding=output_encoding) if local_branch is None \ and remote_rev is not None \ and 'HEAD' not in all_remote_refs: return _fail( ret, 'Remote HEAD refers to a ref that does not exist. ' 'This can happen when the default branch on the ' 'remote repository is renamed or deleted. If you ' 'are unable to fix the remote repository, you can ' 'work around this by setting the \'branch\' argument ' '(which will ensure that the named branch is created ' 'if it does not already exist).', comments ) if not _revs_equal(local_rev, remote_rev, remote_rev_type): __salt__['git.reset']( target, opts=['--hard', remote_rev], user=user, password=password, output_encoding=output_encoding) comments.append( 'Repository was reset to {0}'.format(remote_loc) ) try: upstream = __salt__['git.rev_parse']( target, local_branch + '@{upstream}', opts=['--abbrev-ref'], user=user, password=password, ignore_retcode=True, output_encoding=output_encoding) except CommandExecutionError: upstream = False if not upstream and desired_upstream: upstream_action = ( 'Tracking branch was set to {0}'.format( desired_upstream ) ) branch_opts = _get_branch_opts( branch, local_branch, __salt__['git.list_branches']( target, user=user, password=password, output_encoding=output_encoding), desired_upstream, git_ver) elif upstream and desired_upstream is False: # If the remote_rev is a tag or SHA1, and there is an # upstream tracking branch, we will unset it. However, # we can only do this if the git version is 1.8.0 or # newer, as the --unset-upstream option was not added # until that version. if git_ver >= _LooseVersion('1.8.0'): upstream_action = 'Tracking branch was unset' branch_opts = ['--unset-upstream'] else: branch_opts = None elif desired_upstream and upstream != desired_upstream: upstream_action = ( 'Tracking branch was updated to {0}'.format( desired_upstream ) ) branch_opts = _get_branch_opts( branch, local_branch, __salt__['git.list_branches']( target, user=user, password=password, output_encoding=output_encoding), desired_upstream, git_ver) else: branch_opts = None if branch_opts is not None: __salt__['git.branch']( target, opts=branch_opts, user=user, password=password, output_encoding=output_encoding) comments.append(upstream_action) if submodules and remote_rev: try: __salt__['git.submodule']( target, 'update', opts=['--init', '--recursive'], user=user, password=password, identity=identity, output_encoding=output_encoding) except CommandExecutionError as exc: return _failed_submodule_update(ret, exc, comments) try: new_rev = __salt__['git.revision']( cwd=target, user=user, password=password, ignore_retcode=True, output_encoding=output_encoding) except CommandExecutionError: new_rev = None except Exception as exc: log.error( 'Unexpected exception in git.latest state', exc_info=True ) if isinstance(exc, CommandExecutionError): msg = _strip_exc(exc) else: msg = six.text_type(exc) return _fail(ret, msg, comments) msg = _format_comments(comments) log.info(msg) ret['comment'] = msg if new_rev is not None: ret['changes']['revision'] = {'old': None, 'new': new_rev} return ret def present(name, force=False, bare=True, template=None, separate_git_dir=None, shared=None, user=None, password=None, output_encoding=None): ''' Ensure that a repository exists in the given directory .. warning:: If the minion has Git 2.5 or later installed, ``name`` points to a worktree_, and ``force`` is set to ``True``, then the worktree will be deleted. This has been corrected in Salt 2015.8.0. name Path to the directory .. versionchanged:: 2015.8.0 This path must now be absolute force : False If ``True``, and if ``name`` points to an existing directory which does not contain a git repository, then the contents of that directory will be recursively removed and a new repository will be initialized in its place. bare : True If ``True``, and a repository must be initialized, then the repository will be a bare repository. .. note:: This differs from the default behavior of :py:func:`git.init <salt.modules.git.init>`, make sure to set this value to ``False`` if a bare repo is not desired. template If a new repository is initialized, this argument will specify an alternate template directory. .. versionadded:: 2015.8.0 separate_git_dir If a new repository is initialized, this argument will specify an alternate ``$GIT_DIR`` .. versionadded:: 2015.8.0 shared Set sharing permissions on git repo. See `git-init(1)`_ for more details. .. versionadded:: 2015.5.0 user User under which to run git commands. By default, commands are run by the user under which the minion is running. .. versionadded:: 0.17.0 password Windows only. Required when specifying ``user``. This parameter will be ignored on non-Windows platforms. .. versionadded:: 2016.3.4 output_encoding Use this option to specify which encoding to use to decode the output from any git commands which are run. This should not be needed in most cases. .. note:: This should only be needed if the files in the repository were created with filenames using an encoding other than UTF-8 to handle Unicode characters. .. versionadded:: 2018.3.1 .. _`git-init(1)`: http://git-scm.com/docs/git-init .. _`worktree`: http://git-scm.com/docs/git-worktree ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} # If the named directory is a git repo return True if os.path.isdir(name): if bare and os.path.isfile(os.path.join(name, 'HEAD')): return ret elif not bare and \ (os.path.isdir(os.path.join(name, '.git')) or __salt__['git.is_worktree'](name, user=user, password=password, output_encoding=output_encoding)): return ret # Directory exists and is not a git repo, if force is set destroy the # directory and recreate, otherwise throw an error elif force: # Directory exists, and the ``force`` option is enabled, so we need # to clear out its contents to proceed. if __opts__['test']: ret['changes']['new'] = name ret['changes']['forced init'] = True return _neutral_test( ret, 'Target directory {0} exists. Since force=True, the ' 'contents of {0} would be deleted, and a {1}repository ' 'would be initialized in its place.' .format(name, 'bare ' if bare else '') ) log.debug( 'Removing contents of %s to initialize %srepository in its ' 'place (force=True set in git.present state)', name, 'bare ' if bare else '' ) try: if os.path.islink(name): os.unlink(name) else: salt.utils.files.rm_rf(name) except OSError as exc: return _fail( ret, 'Unable to remove {0}: {1}'.format(name, exc) ) else: ret['changes']['forced init'] = True elif os.listdir(name): return _fail( ret, 'Target \'{0}\' exists, is non-empty, and is not a git ' 'repository. Set the \'force\' option to True to remove ' 'this directory\'s contents and proceed with initializing a ' 'repository'.format(name) ) # Run test is set if __opts__['test']: ret['changes']['new'] = name return _neutral_test( ret, 'New {0}repository would be created'.format( 'bare ' if bare else '' ) ) __salt__['git.init'](cwd=name, bare=bare, template=template, separate_git_dir=separate_git_dir, shared=shared, user=user, password=password, output_encoding=output_encoding) actions = [ 'Initialized {0}repository in {1}'.format( 'bare ' if bare else '', name ) ] if template: actions.append('Template directory set to {0}'.format(template)) if separate_git_dir: actions.append('Gitdir set to {0}'.format(separate_git_dir)) message = '. '.join(actions) if len(actions) > 1: message += '.' log.info(message) ret['changes']['new'] = name ret['comment'] = message return ret def detached(name, rev, target=None, remote='origin', user=None, password=None, force_clone=False, force_checkout=False, fetch_remote=True, hard_reset=False, submodules=False, identity=None, https_user=None, https_pass=None, onlyif=None, unless=None, output_encoding=None, **kwargs): ''' .. versionadded:: 2016.3.0 Make sure a repository is cloned to the given target directory and is a detached HEAD checkout of the commit ID resolved from ``rev``. name Address of the remote repository. rev The branch, tag, or commit ID to checkout after clone. If a branch or tag is specified it will be resolved to a commit ID and checked out. target Name of the target directory where repository is about to be cloned. remote : origin Git remote to use. If this state needs to clone the repo, it will clone it using this value as the initial remote name. If the repository already exists, and a remote by this name is not present, one will be added. user User under which to run git commands. By default, commands are run by the user under which the minion is running. password Windows only. Required when specifying ``user``. This parameter will be ignored on non-Windows platforms. .. versionadded:: 2016.3.4 force_clone : False If the ``target`` directory exists and is not a git repository, then this state will fail. Set this argument to ``True`` to remove the contents of the target directory and clone the repo into it. force_checkout : False When checking out the revision ID, the state will fail if there are unwritten changes. Set this argument to ``True`` to discard unwritten changes when checking out. fetch_remote : True If ``False`` a fetch will not be performed and only local refs will be reachable. hard_reset : False If ``True`` a hard reset will be performed before the checkout and any uncommitted modifications to the working directory will be discarded. Untracked files will remain in place. .. note:: Changes resulting from a hard reset will not trigger requisites. submodules : False Update submodules identity A path on the minion (or a SaltStack fileserver URL, e.g. ``salt://path/to/identity_file``) to a private key to use for SSH authentication. https_user HTTP Basic Auth username for HTTPS (only) clones https_pass HTTP Basic Auth password for HTTPS (only) clones onlyif A command to run as a check, run the named command only if the command passed to the ``onlyif`` option returns true unless A command to run as a check, only run the named command if the command passed to the ``unless`` option returns false output_encoding Use this option to specify which encoding to use to decode the output from any git commands which are run. This should not be needed in most cases. .. note:: This should only be needed if the files in the repository were created with filenames using an encoding other than UTF-8 to handle Unicode characters. .. versionadded:: 2018.3.1 ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} kwargs = salt.utils.args.clean_kwargs(**kwargs) if kwargs: return _fail( ret, salt.utils.args.invalid_kwargs(kwargs, raise_exc=False) ) if not rev: return _fail( ret, '\'{0}\' is not a valid value for the \'rev\' argument'.format(rev) ) if not target: return _fail( ret, '\'{0}\' is not a valid value for the \'target\' argument'.format(rev) ) # Ensure that certain arguments are strings to ensure that comparisons work if not isinstance(rev, six.string_types): rev = six.text_type(rev) if target is not None: if not isinstance(target, six.string_types): target = six.text_type(target) if not os.path.isabs(target): return _fail( ret, 'Target \'{0}\' is not an absolute path'.format(target) ) if user is not None and not isinstance(user, six.string_types): user = six.text_type(user) if remote is not None and not isinstance(remote, six.string_types): remote = six.text_type(remote) if identity is not None: if isinstance(identity, six.string_types): identity = [identity] elif not isinstance(identity, list): return _fail(ret, 'Identity must be either a list or a string') identity = [os.path.expanduser(x) for x in identity] for ident_path in identity: if 'salt://' in ident_path: try: ident_path = __salt__['cp.cache_file'](ident_path) except IOError as exc: log.error('Failed to cache %s: %s', ident_path, exc) return _fail( ret, 'Identity \'{0}\' does not exist.'.format( ident_path ) ) if not os.path.isabs(ident_path): return _fail( ret, 'Identity \'{0}\' is not an absolute path'.format( ident_path ) ) if https_user is not None and not isinstance(https_user, six.string_types): https_user = six.text_type(https_user) if https_pass is not None and not isinstance(https_pass, six.string_types): https_pass = six.text_type(https_pass) if os.path.isfile(target): return _fail( ret, 'Target \'{0}\' exists and is a regular file, cannot proceed' .format(target) ) try: desired_fetch_url = salt.utils.url.add_http_basic_auth( name, https_user, https_pass, https_only=True ) except ValueError as exc: return _fail(ret, exc.__str__()) redacted_fetch_url = salt.utils.url.redact_http_basic_auth(desired_fetch_url) # Check if onlyif or unless conditions match run_check_cmd_kwargs = {'runas': user} if 'shell' in __grains__: run_check_cmd_kwargs['shell'] = __grains__['shell'] cret = mod_run_check( run_check_cmd_kwargs, onlyif, unless ) if isinstance(cret, dict): ret.update(cret) return ret # Determine if supplied ref is a hash remote_rev_type = 'ref' if len(rev) <= 40 \ and all(x in string.hexdigits for x in rev): rev = rev.lower() remote_rev_type = 'hash' comments = [] hash_exists_locally = False local_commit_id = None gitdir = os.path.join(target, '.git') if os.path.isdir(gitdir) \ or __salt__['git.is_worktree'](target, user=user, password=password, output_encoding=output_encoding): # Target directory is a git repository or git worktree local_commit_id = _get_local_rev_and_branch( target, user, password, output_encoding=output_encoding)[0] if remote_rev_type is 'hash': try: __salt__['git.describe'](target, rev, user=user, password=password, ignore_retcode=True, output_encoding=output_encoding) except CommandExecutionError: hash_exists_locally = False else: # The rev is a hash and it exists locally so skip to checkout hash_exists_locally = True else: # Check that remote is present and set to correct url remotes = __salt__['git.remotes'](target, user=user, password=password, redact_auth=False, output_encoding=output_encoding) if remote in remotes and name in remotes[remote]['fetch']: pass else: # The fetch_url for the desired remote does not match the # specified URL (or the remote does not exist), so set the # remote URL. current_fetch_url = None if remote in remotes: current_fetch_url = remotes[remote]['fetch'] if __opts__['test']: return _neutral_test( ret, 'Remote {0} would be set to {1}'.format( remote, name ) ) __salt__['git.remote_set'](target, url=name, remote=remote, user=user, password=password, https_user=https_user, https_pass=https_pass, output_encoding=output_encoding) comments.append( 'Remote {0} updated from \'{1}\' to \'{2}\''.format( remote, current_fetch_url, name ) ) else: # Clone repository if os.path.isdir(target): target_contents = os.listdir(target) if force_clone: # Clone is required, and target directory exists, but the # ``force`` option is enabled, so we need to clear out its # contents to proceed. if __opts__['test']: return _neutral_test( ret, 'Target directory {0} exists. Since force_clone=True, ' 'the contents of {0} would be deleted, and {1} would ' 'be cloned into this directory.'.format(target, name) ) log.debug( 'Removing contents of %s to clone repository %s in its ' 'place (force_clone=True set in git.detached state)', target, name ) removal_errors = {} for target_object in target_contents: target_path = os.path.join(target, target_object) try: salt.utils.files.rm_rf(target_path) except OSError as exc: if exc.errno != errno.ENOENT: removal_errors[target_path] = exc if removal_errors: err_strings = [ ' {0}\n {1}'.format(k, v) for k, v in six.iteritems(removal_errors) ] return _fail( ret, 'Unable to remove\n{0}'.format('\n'.join(err_strings)), comments ) ret['changes']['forced clone'] = True elif target_contents: # Clone is required, but target dir exists and is non-empty. We # can't proceed. return _fail( ret, 'Target \'{0}\' exists, is non-empty and is not a git ' 'repository. Set the \'force_clone\' option to True to ' 'remove this directory\'s contents and proceed with ' 'cloning the remote repository'.format(target) ) log.debug('Target %s is not found, \'git clone\' is required', target) if __opts__['test']: return _neutral_test( ret, 'Repository {0} would be cloned to {1}'.format( name, target ) ) try: clone_opts = ['--no-checkout'] if remote != 'origin': clone_opts.extend(['--origin', remote]) __salt__['git.clone'](target, name, user=user, password=password, opts=clone_opts, identity=identity, https_user=https_user, https_pass=https_pass, saltenv=__env__, output_encoding=output_encoding) comments.append('{0} cloned to {1}'.format(name, target)) except Exception as exc: log.error( 'Unexpected exception in git.detached state', exc_info=True ) if isinstance(exc, CommandExecutionError): msg = _strip_exc(exc) else: msg = six.text_type(exc) return _fail(ret, msg, comments) # Repository exists and is ready for fetch/checkout refspecs = [ 'refs/heads/*:refs/remotes/{0}/*'.format(remote), '+refs/tags/*:refs/tags/*' ] if hash_exists_locally or fetch_remote is False: pass else: # Fetch refs from remote if __opts__['test']: return _neutral_test( ret, 'Repository remote {0} would be fetched'.format(remote) ) try: fetch_changes = __salt__['git.fetch']( target, remote=remote, force=True, refspecs=refspecs, user=user, password=password, identity=identity, saltenv=__env__, output_encoding=output_encoding) except CommandExecutionError as exc: msg = 'Fetch failed' msg += ':\n\n' + six.text_type(exc) return _fail(ret, msg, comments) else: if fetch_changes: comments.append( 'Remote {0} was fetched, resulting in updated ' 'refs'.format(remote) ) # get refs and checkout checkout_commit_id = '' if remote_rev_type is 'hash': if __salt__['git.describe']( target, rev, user=user, password=password, output_encoding=output_encoding): checkout_commit_id = rev else: return _fail( ret, 'Revision \'{0}\' does not exist'.format(rev) ) else: try: all_remote_refs = __salt__['git.remote_refs']( target, user=user, password=password, identity=identity, https_user=https_user, https_pass=https_pass, ignore_retcode=False, output_encoding=output_encoding) if 'refs/remotes/'+remote+'/'+rev in all_remote_refs: checkout_commit_id = all_remote_refs['refs/remotes/' + remote + '/' + rev] elif 'refs/tags/' + rev in all_remote_refs: checkout_commit_id = all_remote_refs['refs/tags/' + rev] else: return _fail( ret, 'Revision \'{0}\' does not exist'.format(rev) ) except CommandExecutionError as exc: return _fail( ret, 'Failed to list refs for {0}: {1}'.format(remote, _strip_exc(exc)) ) if hard_reset: if __opts__['test']: return _neutral_test( ret, 'Hard reset to HEAD would be performed on {0}'.format(target) ) __salt__['git.reset']( target, opts=['--hard', 'HEAD'], user=user, password=password, output_encoding=output_encoding) comments.append( 'Repository was reset to HEAD before checking out revision' ) # TODO: implement clean function for git module and add clean flag if checkout_commit_id == local_commit_id: new_rev = None else: if __opts__['test']: ret['changes']['HEAD'] = {'old': local_commit_id, 'new': checkout_commit_id} return _neutral_test( ret, 'Commit ID {0} would be checked out at {1}'.format( checkout_commit_id, target ) ) __salt__['git.checkout'](target, checkout_commit_id, force=force_checkout, user=user, password=password, output_encoding=output_encoding) comments.append( 'Commit ID {0} was checked out at {1}'.format( checkout_commit_id, target ) ) try: new_rev = __salt__['git.revision']( cwd=target, user=user, password=password, ignore_retcode=True, output_encoding=output_encoding) except CommandExecutionError: new_rev = None if submodules: __salt__['git.submodule'](target, 'update', opts=['--init', '--recursive'], user=user, password=password, identity=identity, output_encoding=output_encoding) comments.append( 'Submodules were updated' ) if new_rev is not None: ret['changes']['HEAD'] = {'old': local_commit_id, 'new': new_rev} else: comments.append("Already checked out at correct revision") msg = _format_comments(comments) log.info(msg) ret['comment'] = msg return ret def cloned(name, target=None, branch=None, user=None, password=None, identity=None, https_user=None, https_pass=None, output_encoding=None): ''' .. versionadded:: 2018.3.3,2019.2.0 Ensure that a repository has been cloned to the specified target directory. If not, clone that repository. No fetches will be performed once cloned. name Address of the remote repository target Name of the target directory where repository should be cloned branch Remote branch to check out. If unspecified, the default branch (i.e. the one to the remote HEAD points) will be checked out. .. note:: The local branch name will match the remote branch name. If the branch name is changed, then that branch will be checked out locally, but keep in mind that remote repository will not be fetched. If your use case requires that you keep the clone up to date with the remote repository, then consider using :py:func:`git.latest <salt.states.git.latest>`. user User under which to run git commands. By default, commands are run by the user under which the minion is running. password Windows only. Required when specifying ``user``. This parameter will be ignored on non-Windows platforms. identity Path to a private key to use for ssh URLs. Works the same way as in :py:func:`git.latest <salt.states.git.latest>`, see that state's documentation for more information. https_user HTTP Basic Auth username for HTTPS (only) clones https_pass HTTP Basic Auth password for HTTPS (only) clones output_encoding Use this option to specify which encoding to use to decode the output from any git commands which are run. This should not be needed in most cases. .. note:: This should only be needed if the files in the repository were created with filenames using an encoding other than UTF-8 to handle Unicode characters. ''' ret = {'name': name, 'result': False, 'comment': '', 'changes': {}} if target is None: ret['comment'] = '\'target\' argument is required' return ret elif not isinstance(target, six.string_types): target = six.text_type(target) if not os.path.isabs(target): ret['comment'] = '\'target\' path must be absolute' return ret if branch is not None: if not isinstance(branch, six.string_types): branch = six.text_type(branch) if not branch: ret['comment'] = 'Invalid \'branch\' argument' return ret if not os.path.exists(target): need_clone = True else: try: __salt__['git.status'](target, user=user, password=password, output_encoding=output_encoding) except Exception as exc: ret['comment'] = six.text_type(exc) return ret else: need_clone = False comments = [] def _clone_changes(ret): ret['changes']['new'] = name + ' => ' + target def _branch_changes(ret, old, new): ret['changes']['branch'] = {'old': old, 'new': new} if need_clone: if __opts__['test']: _clone_changes(ret) comment = '{0} would be cloned to {1}{2}'.format( name, target, ' with branch \'{0}\''.format(branch) if branch is not None else '' ) return _neutral_test(ret, comment) clone_opts = ['--branch', branch] if branch is not None else None try: __salt__['git.clone'](target, name, opts=clone_opts, user=user, password=password, identity=identity, https_user=https_user, https_pass=https_pass, output_encoding=output_encoding) except CommandExecutionError as exc: msg = 'Clone failed: {0}'.format(_strip_exc(exc)) return _fail(ret, msg, comments) comments.append( '{0} cloned to {1}{2}'.format( name, target, ' with branch \'{0}\''.format(branch) if branch is not None else '' ) ) _clone_changes(ret) ret['comment'] = _format_comments(comments) ret['result'] = True return ret else: if branch is None: return _already_cloned(ret, target, branch, comments) else: current_branch = __salt__['git.current_branch']( target, user=user, password=password, output_encoding=output_encoding) if current_branch == branch: return _already_cloned(ret, target, branch, comments) else: if __opts__['test']: _branch_changes(ret, current_branch, branch) return _neutral_test( ret, 'Branch would be changed to \'{0}\''.format(branch)) try: __salt__['git.rev_parse']( target, rev=branch, user=user, password=password, ignore_retcode=True, output_encoding=output_encoding) except CommandExecutionError: # Local head does not exist, so we need to check out a new # branch at the remote rev checkout_rev = '/'.join(('origin', branch)) checkout_opts = ['-b', branch] else: # Local head exists, so we just need to check it out checkout_rev = branch checkout_opts = None try: __salt__['git.checkout']( target, rev=checkout_rev, opts=checkout_opts, user=user, password=password, output_encoding=output_encoding) except CommandExecutionError as exc: msg = 'Failed to change branch to \'{0}\': {1}'.format(branch, exc) return _fail(ret, msg, comments) else: comments.append('Branch changed to \'{0}\''.format(branch)) _branch_changes(ret, current_branch, branch) ret['comment'] = _format_comments(comments) ret['result'] = True return ret def config_unset(name, value_regex=None, repo=None, user=None, password=None, output_encoding=None, **kwargs): r''' .. versionadded:: 2015.8.0 Ensure that the named config key is not present name The name of the configuration key to unset. This value can be a regex, but the regex must match the entire key name. For example, ``foo\.`` would not match all keys in the ``foo`` section, it would be necessary to use ``foo\..+`` to do so. value_regex Regex indicating the values to unset for the matching key(s) .. note:: This option behaves differently depending on whether or not ``all`` is set to ``True``. If it is, then all values matching the regex will be deleted (this is the only way to delete multiple values from a multivar). If ``all`` is set to ``False``, then this state will fail if the regex matches more than one value in a multivar. all : False If ``True``, unset all matches repo Location of the git repository for which the config value should be set. Required unless ``global`` is set to ``True``. user User under which to run git commands. By default, commands are run by the user under which the minion is running. password Windows only. Required when specifying ``user``. This parameter will be ignored on non-Windows platforms. .. versionadded:: 2016.3.4 global : False If ``True``, this will set a global git config option output_encoding Use this option to specify which encoding to use to decode the output from any git commands which are run. This should not be needed in most cases. .. note:: This should only be needed if the files in the repository were created with filenames using an encoding other than UTF-8 to handle Unicode characters. .. versionadded:: 2018.3.1 **Examples:** .. code-block:: yaml # Value matching 'baz' mylocalrepo: git.config_unset: - name: foo.bar - value_regex: 'baz' - repo: /path/to/repo # Ensure entire multivar is unset mylocalrepo: git.config_unset: - name: foo.bar - all: True # Ensure all variables in 'foo' section are unset, including multivars mylocalrepo: git.config_unset: - name: 'foo\..+' - all: True # Ensure that global config value is unset mylocalrepo: git.config_unset: - name: foo.bar - global: True ''' ret = {'name': name, 'changes': {}, 'result': True, 'comment': 'No matching keys are set'} # Sanitize kwargs and make sure that no invalid ones were passed. This # allows us to accept 'global' as an argument to this function without # shadowing global(), while also not allowing unwanted arguments to be # passed. kwargs = salt.utils.args.clean_kwargs(**kwargs) global_ = kwargs.pop('global', False) all_ = kwargs.pop('all', False) if kwargs: return _fail( ret, salt.utils.args.invalid_kwargs(kwargs, raise_exc=False) ) if not global_ and not repo: return _fail( ret, 'Non-global config options require the \'repo\' argument to be ' 'set' ) if not isinstance(name, six.string_types): name = six.text_type(name) if value_regex is not None: if not isinstance(value_regex, six.string_types): value_regex = six.text_type(value_regex) # Ensure that the key regex matches the full key name key = '^' + name.lstrip('^').rstrip('$') + '$' # Get matching keys/values pre_matches = __salt__['git.config_get_regexp']( cwd=repo, key=key, value_regex=value_regex, user=user, password=password, ignore_retcode=True, output_encoding=output_encoding, **{'global': global_} ) if not pre_matches: # No changes need to be made return ret # Perform sanity check on the matches. We can't proceed if the value_regex # matches more than one value in a given key, and 'all' is not set to True if not all_: greedy_matches = ['{0} ({1})'.format(x, ', '.join(y)) for x, y in six.iteritems(pre_matches) if len(y) > 1] if greedy_matches: if value_regex is not None: return _fail( ret, 'Multiple values are matched by value_regex for the ' 'following keys (set \'all\' to True to force removal): ' '{0}'.format('; '.join(greedy_matches)) ) else: return _fail( ret, 'Multivar(s) matched by the key expression (set \'all\' ' 'to True to force removal): {0}'.format( '; '.join(greedy_matches) ) ) if __opts__['test']: ret['changes'] = pre_matches return _neutral_test( ret, '{0} key(s) would have value(s) unset'.format(len(pre_matches)) ) if value_regex is None: pre = pre_matches else: # Get all keys matching the key expression, so we can accurately report # on changes made. pre = __salt__['git.config_get_regexp']( cwd=repo, key=key, value_regex=None, user=user, password=password, ignore_retcode=True, output_encoding=output_encoding, **{'global': global_} ) failed = [] # Unset the specified value(s). There is no unset for regexes so loop # through the pre_matches dict and unset each matching key individually. for key_name in pre_matches: try: __salt__['git.config_unset']( cwd=repo, key=name, value_regex=value_regex, all=all_, user=user, password=password, output_encoding=output_encoding, **{'global': global_} ) except CommandExecutionError as exc: msg = 'Failed to unset \'{0}\''.format(key_name) if value_regex is not None: msg += ' using value_regex \'{1}\'' msg += ': ' + _strip_exc(exc) log.error(msg) failed.append(key_name) if failed: return _fail( ret, 'Error(s) occurred unsetting values for the following keys (see ' 'the minion log for details): {0}'.format(', '.join(failed)) ) post = __salt__['git.config_get_regexp']( cwd=repo, key=key, value_regex=None, user=user, password=password, ignore_retcode=True, output_encoding=output_encoding, **{'global': global_} ) for key_name in pre: if key_name not in post: ret['changes'][key_name] = pre[key_name] unset = [x for x in pre[key_name] if x not in post[key_name]] if unset: ret['changes'][key_name] = unset if value_regex is None: post_matches = post else: post_matches = __salt__['git.config_get_regexp']( cwd=repo, key=key, value_regex=value_regex, user=user, password=password, ignore_retcode=True, output_encoding=output_encoding, **{'global': global_} ) if post_matches: failed = ['{0} ({1})'.format(x, ', '.join(y)) for x, y in six.iteritems(post_matches)] return _fail( ret, 'Failed to unset value(s): {0}'.format('; '.join(failed)) ) ret['comment'] = 'Value(s) successfully unset' return ret def config_set(name, value=None, multivar=None, repo=None, user=None, password=None, output_encoding=None, **kwargs): ''' .. versionadded:: 2014.7.0 .. versionchanged:: 2015.8.0 Renamed from ``git.config`` to ``git.config_set``. For earlier versions, use ``git.config``. Ensure that a config value is set to the desired value(s) name Name of the git config value to set value Set a single value for the config item multivar Set multiple values for the config item .. note:: The order matters here, if the same parameters are set but in a different order, they will be removed and replaced in the order specified. .. versionadded:: 2015.8.0 repo Location of the git repository for which the config value should be set. Required unless ``global`` is set to ``True``. user User under which to run git commands. By default, the commands are run by the user under which the minion is running. password Windows only. Required when specifying ``user``. This parameter will be ignored on non-Windows platforms. .. versionadded:: 2016.3.4 global : False If ``True``, this will set a global git config option output_encoding Use this option to specify which encoding to use to decode the output from any git commands which are run. This should not be needed in most cases. .. note:: This should only be needed if the files in the repository were created with filenames using an encoding other than UTF-8 to handle Unicode characters. .. versionadded:: 2018.3.1 **Local Config Example:** .. code-block:: yaml # Single value mylocalrepo: git.config_set: - name: user.email - value: foo@bar.net - repo: /path/to/repo # Multiple values mylocalrepo: git.config_set: - name: mysection.myattribute - multivar: - foo - bar - baz - repo: /path/to/repo **Global Config Example (User ``foo``):** .. code-block:: yaml mylocalrepo: git.config_set: - name: user.name - value: Foo Bar - user: foo - global: True ''' ret = {'name': name, 'changes': {}, 'result': True, 'comment': ''} if value is not None and multivar is not None: return _fail( ret, 'Only one of \'value\' and \'multivar\' is permitted' ) # Sanitize kwargs and make sure that no invalid ones were passed. This # allows us to accept 'global' as an argument to this function without # shadowing global(), while also not allowing unwanted arguments to be # passed. kwargs = salt.utils.args.clean_kwargs(**kwargs) global_ = kwargs.pop('global', False) if kwargs: return _fail( ret, salt.utils.args.invalid_kwargs(kwargs, raise_exc=False) ) if not global_ and not repo: return _fail( ret, 'Non-global config options require the \'repo\' argument to be ' 'set' ) if not isinstance(name, six.string_types): name = six.text_type(name) if value is not None: if not isinstance(value, six.string_types): value = six.text_type(value) value_comment = '\'' + value + '\'' desired = [value] if multivar is not None: if not isinstance(multivar, list): try: multivar = multivar.split(',') except AttributeError: multivar = six.text_type(multivar).split(',') else: new_multivar = [] for item in multivar: if isinstance(item, six.string_types): new_multivar.append(item) else: new_multivar.append(six.text_type(item)) multivar = new_multivar value_comment = multivar desired = multivar # Get current value pre = __salt__['git.config_get']( cwd=repo, key=name, user=user, password=password, ignore_retcode=True, output_encoding=output_encoding, **{'all': True, 'global': global_} ) if desired == pre: ret['comment'] = '{0}\'{1}\' is already set to {2}'.format( 'Global key ' if global_ else '', name, value_comment ) return ret if __opts__['test']: ret['changes'] = {'old': pre, 'new': desired} msg = '{0}\'{1}\' would be {2} {3}'.format( 'Global key ' if global_ else '', name, 'added as' if pre is None else 'set to', value_comment ) return _neutral_test(ret, msg) try: # Set/update config value post = __salt__['git.config_set']( cwd=repo, key=name, value=value, multivar=multivar, user=user, password=password, output_encoding=output_encoding, **{'global': global_} ) except CommandExecutionError as exc: return _fail( ret, 'Failed to set {0}\'{1}\' to {2}: {3}'.format( 'global key ' if global_ else '', name, value_comment, _strip_exc(exc) ) ) if pre != post: ret['changes'][name] = {'old': pre, 'new': post} if post != desired: return _fail( ret, 'Failed to set {0}\'{1}\' to {2}'.format( 'global key ' if global_ else '', name, value_comment ) ) ret['comment'] = '{0}\'{1}\' was {2} {3}'.format( 'Global key ' if global_ else '', name, 'added as' if pre is None else 'set to', value_comment ) return ret def mod_run_check(cmd_kwargs, onlyif, unless): ''' Execute the onlyif and unless logic. Return a result dict if: * onlyif failed (onlyif != 0) * unless succeeded (unless == 0) Otherwise, returns ``True`` ''' cmd_kwargs = copy.deepcopy(cmd_kwargs) cmd_kwargs.update({ 'use_vt': False, 'bg': False, 'ignore_retcode': True, 'python_shell': True, }) if onlyif is not None: if not isinstance(onlyif, list): onlyif = [onlyif] for command in onlyif: if not isinstance(command, six.string_types) and command: # Boolean or some other non-string which resolves to True continue try: if __salt__['cmd.retcode'](command, **cmd_kwargs) == 0: # Command exited with a zero retcode continue except Exception as exc: log.exception( 'The following onlyif command raised an error: %s', command ) return { 'comment': 'onlyif raised error ({0}), see log for ' 'more details'.format(exc), 'result': False } return {'comment': 'onlyif condition is false', 'skip_watch': True, 'result': True} if unless is not None: if not isinstance(unless, list): unless = [unless] for command in unless: if not isinstance(command, six.string_types) and not command: # Boolean or some other non-string which resolves to False break try: if __salt__['cmd.retcode'](command, **cmd_kwargs) != 0: # Command exited with a non-zero retcode break except Exception as exc: log.exception( 'The following unless command raised an error: %s', command ) return { 'comment': 'unless raised error ({0}), see log for ' 'more details'.format(exc), 'result': False } else: return {'comment': 'unless condition is true', 'skip_watch': True, 'result': True} return True
saltstack/salt
salt/states/git.py
_get_local_rev_and_branch
python
def _get_local_rev_and_branch(target, user, password, output_encoding=None): ''' Return the local revision for before/after comparisons ''' log.info('Checking local revision for %s', target) try: local_rev = __salt__['git.revision']( target, user=user, password=password, ignore_retcode=True, output_encoding=output_encoding) except CommandExecutionError: log.info('No local revision for %s', target) local_rev = None log.info('Checking local branch for %s', target) try: local_branch = __salt__['git.current_branch']( target, user=user, password=password, ignore_retcode=True, output_encoding=output_encoding) except CommandExecutionError: log.info('No local branch for %s', target) local_branch = None return local_rev, local_branch
Return the local revision for before/after comparisons
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/git.py#L113-L141
null
# -*- coding: utf-8 -*- ''' States to manage git repositories and git configuration .. important:: Before using git over ssh, make sure your remote host fingerprint exists in your ``~/.ssh/known_hosts`` file. .. versionchanged:: 2015.8.8 This state module now requires git 1.6.5 (released 10 October 2009) or newer. ''' from __future__ import absolute_import, print_function, unicode_literals # Import python libs import copy import errno import logging import os import re import string # Import salt libs import salt.utils.args import salt.utils.files import salt.utils.url import salt.utils.versions from salt.exceptions import CommandExecutionError from salt.utils.versions import LooseVersion as _LooseVersion # Import 3rd-party libs from salt.ext import six log = logging.getLogger(__name__) def __virtual__(): ''' Only load if git is available ''' if 'git.version' not in __salt__: return False git_ver = _LooseVersion(__salt__['git.version'](versioninfo=False)) return git_ver >= _LooseVersion('1.6.5') def _revs_equal(rev1, rev2, rev_type): ''' Shorthand helper function for comparing SHA1s. If rev_type == 'sha1' then the comparison will be done using str.startwith() to allow short SHA1s to compare successfully. NOTE: This means that rev2 must be the short rev. ''' if (rev1 is None and rev2 is not None) \ or (rev2 is None and rev1 is not None): return False elif rev1 is rev2 is None: return True elif rev_type == 'sha1': return rev1.startswith(rev2) else: return rev1 == rev2 def _short_sha(sha1): return sha1[:7] if sha1 is not None else None def _format_comments(comments): ''' Return a joined list ''' ret = '. '.join(comments) if len(comments) > 1: ret += '.' return ret def _need_branch_change(branch, local_branch): ''' Short hand for telling when a new branch is needed ''' return branch is not None and branch != local_branch def _get_branch_opts(branch, local_branch, all_local_branches, desired_upstream, git_ver=None): ''' DRY helper to build list of opts for git.branch, for the purposes of setting upstream tracking branch ''' if branch is not None and branch not in all_local_branches: # We won't be setting upstream because the act of checking out a new # branch will set upstream for us return None if git_ver is None: git_ver = _LooseVersion(__salt__['git.version'](versioninfo=False)) ret = [] if git_ver >= _LooseVersion('1.8.0'): ret.extend(['--set-upstream-to', desired_upstream]) else: ret.append('--set-upstream') # --set-upstream does not assume the current branch, so we have to # tell it which branch we'll be using ret.append(local_branch if branch is None else branch) ret.append(desired_upstream) return ret def _strip_exc(exc): ''' Strip the actual command that was run from exc.strerror to leave just the error message ''' return re.sub(r'^Command [\'"].+[\'"] failed: ', '', exc.strerror) def _uptodate(ret, target, comments=None, local_changes=False): ret['comment'] = 'Repository {0} is up-to-date'.format(target) if local_changes: ret['comment'] += ( ', but with uncommitted changes. Set \'force_reset\' to True to ' 'purge uncommitted changes.' ) if comments: # Shouldn't be making any changes if the repo was up to date, but # report on them so we are alerted to potential problems with our # logic. ret['comment'] += ( '\n\nChanges {0}made: {1}'.format( 'that would be ' if __opts__['test'] else '', _format_comments(comments) ) ) return ret def _neutral_test(ret, comment): ret['result'] = None ret['comment'] = comment return ret def _fail(ret, msg, comments=None): ret['result'] = False if comments: msg += '\n\nChanges already made: ' + _format_comments(comments) ret['comment'] = msg return ret def _already_cloned(ret, target, branch=None, comments=None): ret['result'] = True ret['comment'] = 'Repository already exists at {0}{1}'.format( target, ' and is checked out to branch \'{0}\''.format(branch) if branch else '' ) if comments: ret['comment'] += ( '\n\nChanges {0}made: {1}'.format( 'that would be ' if __opts__['test'] else '', _format_comments(comments) ) ) return ret def _failed_fetch(ret, exc, comments=None): msg = ( 'Fetch failed. Set \'force_fetch\' to True to force the fetch if the ' 'failure was due to not being able to fast-forward. Output of the fetch ' 'command follows:\n\n{0}'.format(_strip_exc(exc)) ) return _fail(ret, msg, comments) def _failed_submodule_update(ret, exc, comments=None): msg = 'Failed to update submodules: ' + _strip_exc(exc) return _fail(ret, msg, comments) def _not_fast_forward(ret, rev, pre, post, branch, local_branch, default_branch, local_changes, comments): branch_msg = '' if branch is None: if rev != 'HEAD': if local_branch != rev: branch_msg = ( ' The desired rev ({0}) differs from the name of the ' 'local branch ({1}), if the desired rev is a branch name ' 'then a forced update could possibly be avoided by ' 'setting the \'branch\' argument to \'{0}\' instead.' .format(rev, local_branch) ) else: if default_branch is not None and local_branch != default_branch: branch_msg = ( ' The default remote branch ({0}) differs from the ' 'local branch ({1}). This could be caused by changing the ' 'default remote branch, or if the local branch was ' 'manually changed. Rather than forcing an update, it ' 'may be advisable to set the \'branch\' argument to ' '\'{0}\' instead. To ensure that this state follows the ' '\'{0}\' branch instead of the remote HEAD, set the ' '\'rev\' argument to \'{0}\'.' .format(default_branch, local_branch) ) pre = _short_sha(pre) post = _short_sha(post) return _fail( ret, 'Repository would be updated {0}{1}, but {2}. Set \'force_reset\' to ' 'True{3} to force this update{4}.{5}'.format( 'from {0} to {1}'.format(pre, post) if local_changes and pre != post else 'to {0}'.format(post), ' (after checking out local branch \'{0}\')'.format(branch) if _need_branch_change(branch, local_branch) else '', 'this is not a fast-forward merge' if not local_changes else 'there are uncommitted changes', ' (or \'remote-changes\')' if local_changes else '', ' and discard these changes' if local_changes else '', branch_msg, ), comments ) def latest(name, rev='HEAD', target=None, branch=None, user=None, password=None, update_head=True, force_checkout=False, force_clone=False, force_fetch=False, force_reset=False, submodules=False, bare=False, mirror=False, remote='origin', fetch_tags=True, sync_tags=True, depth=None, identity=None, https_user=None, https_pass=None, onlyif=None, unless=None, refspec_branch='*', refspec_tag='*', output_encoding=None, **kwargs): ''' Make sure the repository is cloned to the given directory and is up-to-date. name Address of the remote repository, as passed to ``git clone`` .. note:: From the `Git documentation`_, there are two URL formats supported for SSH authentication. The below two examples are equivalent: .. code-block:: text # ssh:// URL ssh://user@server/project.git # SCP-like syntax user@server:project.git A common mistake is to use an ``ssh://`` URL, but with a colon after the domain instead of a slash. This is invalid syntax in Git, and will therefore not work in Salt. When in doubt, confirm that a ``git clone`` works for the URL before using it in Salt. It has been reported by some users that SCP-like syntax is incompatible with git repos hosted on `Atlassian Stash/BitBucket Server`_. In these cases, it may be necessary to use ``ssh://`` URLs for SSH authentication. .. _`Git documentation`: https://git-scm.com/book/en/v2/Git-on-the-Server-The-Protocols#The-SSH-Protocol .. _`Atlassian Stash/BitBucket Server`: https://www.atlassian.com/software/bitbucket/server rev : HEAD The remote branch, tag, or revision ID to checkout after clone / before update. If specified, then Salt will also ensure that the tracking branch is set to ``<remote>/<rev>``, unless ``rev`` refers to a tag or SHA1, in which case Salt will ensure that the tracking branch is unset. If ``rev`` is not specified, it will be assumed to be ``HEAD``, and Salt will not manage the tracking branch at all. .. versionchanged:: 2015.8.0 If not specified, ``rev`` now defaults to the remote repository's HEAD. target Name of the target directory where repository is about to be cloned branch Name of the local branch into which to checkout the specified rev. If not specified, then Salt will not care what branch is being used locally and will just use whatever branch is currently there. .. versionadded:: 2015.8.0 .. note:: If this argument is not specified, this means that Salt will not change the local branch if the repository is reset to another branch/tag/SHA1. For example, assume that the following state was run initially: .. code-block:: yaml foo_app: git.latest: - name: https://mydomain.tld/apps/foo.git - target: /var/www/foo - user: www This would have cloned the HEAD of that repo (since a ``rev`` wasn't specified), and because ``branch`` is not specified, the branch in the local clone at ``/var/www/foo`` would be whatever the default branch is on the remote repository (usually ``master``, but not always). Now, assume that it becomes necessary to switch this checkout to the ``dev`` branch. This would require ``rev`` to be set, and probably would also require ``force_reset`` to be enabled: .. code-block:: yaml foo_app: git.latest: - name: https://mydomain.tld/apps/foo.git - target: /var/www/foo - user: www - rev: dev - force_reset: True The result of this state would be to perform a hard-reset to ``origin/dev``. Since ``branch`` was not specified though, while ``/var/www/foo`` would reflect the contents of the remote repo's ``dev`` branch, the local branch would still remain whatever it was when it was cloned. To make the local branch match the remote one, set ``branch`` as well, like so: .. code-block:: yaml foo_app: git.latest: - name: https://mydomain.tld/apps/foo.git - target: /var/www/foo - user: www - rev: dev - branch: dev - force_reset: True This may seem redundant, but Salt tries to support a wide variety of use cases, and doing it this way allows for the use case where the local branch doesn't need to be strictly managed. user Local system user under which to run git commands. By default, commands are run by the user under which the minion is running. .. note:: This is not to be confused with the username for http(s)/SSH authentication. .. versionadded:: 0.17.0 password Windows only. Required when specifying ``user``. This parameter will be ignored on non-Windows platforms. .. versionadded:: 2016.3.4 update_head : True If set to ``False``, then the remote repository will be fetched (if necessary) to ensure that the commit to which ``rev`` points exists in the local checkout, but no changes will be made to the local HEAD. .. versionadded:: 2015.8.3 force_checkout : False When checking out the local branch, the state will fail if there are unwritten changes. Set this argument to ``True`` to discard unwritten changes when checking out. force_clone : False If the ``target`` directory exists and is not a git repository, then this state will fail. Set this argument to ``True`` to remove the contents of the target directory and clone the repo into it. force_fetch : False If a fetch needs to be performed, non-fast-forward fetches will cause this state to fail. Set this argument to ``True`` to force the fetch even if it is a non-fast-forward update. .. versionadded:: 2015.8.0 force_reset : False If the update is not a fast-forward, this state will fail. Set this argument to ``True`` to force a hard-reset to the remote revision in these cases. .. versionchanged:: 2019.2.0 This option can now be set to ``remote-changes``, which will instruct Salt not to discard local changes if the repo is up-to-date with the remote repository. submodules : False Update submodules on clone or branch change bare : False Set to ``True`` if the repository is to be a bare clone of the remote repository. .. note: Setting this option to ``True`` is incompatible with the ``rev`` argument. mirror Set to ``True`` if the repository is to be a mirror of the remote repository. This implies that ``bare`` set to ``True``, and thus is incompatible with ``rev``. remote : origin Git remote to use. If this state needs to clone the repo, it will clone it using this value as the initial remote name. If the repository already exists, and a remote by this name is not present, one will be added. fetch_tags : True If ``True``, then when a fetch is performed all tags will be fetched, even those which are not reachable by any branch on the remote. sync_tags : True If ``True``, then Salt will delete tags which exist in the local clone but are not found on the remote repository. .. versionadded:: 2018.3.4 depth Defines depth in history when git a clone is needed in order to ensure latest. E.g. ``depth: 1`` is useful when deploying from a repository with a long history. Use rev to specify branch or tag. This is not compatible with revision IDs. .. versionchanged:: 2019.2.0 This option now supports tags as well as branches, on Git 1.8.0 and newer. identity Path to a private key to use for ssh URLs. This can be either a single string, or a list of strings. For example: .. code-block:: yaml # Single key git@github.com:user/repo.git: git.latest: - user: deployer - identity: /home/deployer/.ssh/id_rsa # Two keys git@github.com:user/repo.git: git.latest: - user: deployer - identity: - /home/deployer/.ssh/id_rsa - /home/deployer/.ssh/id_rsa_alternate If multiple keys are specified, they will be tried one-by-one in order for each git command which needs to authenticate. .. warning:: Unless Salt is invoked from the minion using ``salt-call``, the key(s) must be passphraseless. For greater security with passphraseless private keys, see the `sshd(8)`_ manpage for information on securing the keypair from the remote side in the ``authorized_keys`` file. .. _`sshd(8)`: http://www.man7.org/linux/man-pages/man8/sshd.8.html#AUTHORIZED_KEYS_FILE%20FORMAT .. versionchanged:: 2015.8.7 Salt will no longer attempt to use passphrase-protected keys unless invoked from the minion using ``salt-call``, to prevent blocking waiting for user input. .. versionchanged:: 2016.3.0 Key can now be specified as a SaltStack fileserver URL (e.g. ``salt://path/to/identity_file``). https_user HTTP Basic Auth username for HTTPS (only) clones .. versionadded:: 2015.5.0 https_pass HTTP Basic Auth password for HTTPS (only) clones .. versionadded:: 2015.5.0 onlyif A command to run as a check, run the named command only if the command passed to the ``onlyif`` option returns true unless A command to run as a check, only run the named command if the command passed to the ``unless`` option returns false refspec_branch : * A glob expression defining which branches to retrieve when fetching. See `git-fetch(1)`_ for more information on how refspecs work. .. versionadded:: 2017.7.0 refspec_tag : * A glob expression defining which tags to retrieve when fetching. See `git-fetch(1)`_ for more information on how refspecs work. .. versionadded:: 2017.7.0 output_encoding Use this option to specify which encoding to use to decode the output from any git commands which are run. This should not be needed in most cases. .. note:: This should only be needed if the files in the repository were created with filenames using an encoding other than UTF-8 to handle Unicode characters. .. versionadded:: 2018.3.1 .. _`git-fetch(1)`: http://git-scm.com/docs/git-fetch .. note:: Clashing ID declarations can be avoided when including different branches from the same git repository in the same SLS file by using the ``name`` argument. The example below checks out the ``gh-pages`` and ``gh-pages-prod`` branches from the same repository into separate directories. The example also sets up the ``ssh_known_hosts`` ssh key required to perform the git checkout. Also, it has been reported that the SCP-like syntax for .. code-block:: yaml gitlab.example.com: ssh_known_hosts: - present - user: root - enc: ecdsa - fingerprint: 4e:94:b0:54:c1:5b:29:a2:70:0e:e1:a3:51:ee:ee:e3 git-website-staging: git.latest: - name: git@gitlab.example.com:user/website.git - rev: gh-pages - target: /usr/share/nginx/staging - identity: /root/.ssh/website_id_rsa - require: - pkg: git - ssh_known_hosts: gitlab.example.com git-website-staging: git.latest: - name: git@gitlab.example.com:user/website.git - rev: gh-pages - target: /usr/share/nginx/staging - identity: salt://website/id_rsa - require: - pkg: git - ssh_known_hosts: gitlab.example.com git-website-prod: git.latest: - name: git@gitlab.example.com:user/website.git - rev: gh-pages-prod - target: /usr/share/nginx/prod - identity: /root/.ssh/website_id_rsa - require: - pkg: git - ssh_known_hosts: gitlab.example.com ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} kwargs = salt.utils.args.clean_kwargs(**kwargs) if kwargs: return _fail( ret, salt.utils.args.invalid_kwargs(kwargs, raise_exc=False) ) if not remote: return _fail(ret, '\'remote\' argument is required') if not target: return _fail(ret, '\'target\' argument is required') if not rev: return _fail( ret, '\'{0}\' is not a valid value for the \'rev\' argument'.format(rev) ) if force_reset not in (True, False, 'remote-changes'): return _fail( ret, '\'force_reset\' must be one of True, False, or \'remote-changes\'' ) # Ensure that certain arguments are strings to ensure that comparisons work if not isinstance(rev, six.string_types): rev = six.text_type(rev) if target is not None: if not isinstance(target, six.string_types): target = six.text_type(target) if not os.path.isabs(target): return _fail( ret, 'target \'{0}\' is not an absolute path'.format(target) ) if branch is not None and not isinstance(branch, six.string_types): branch = six.text_type(branch) if user is not None and not isinstance(user, six.string_types): user = six.text_type(user) if password is not None and not isinstance(password, six.string_types): password = six.text_type(password) if remote is not None and not isinstance(remote, six.string_types): remote = six.text_type(remote) if identity is not None: if isinstance(identity, six.string_types): identity = [identity] elif not isinstance(identity, list): return _fail(ret, 'identity must be either a list or a string') identity = [os.path.expanduser(x) for x in identity] for ident_path in identity: if 'salt://' in ident_path: try: ident_path = __salt__['cp.cache_file'](ident_path, __env__) except IOError as exc: log.exception('Failed to cache %s', ident_path) return _fail( ret, 'identity \'{0}\' does not exist.'.format( ident_path ) ) if not os.path.isabs(ident_path): return _fail( ret, 'identity \'{0}\' is not an absolute path'.format( ident_path ) ) if https_user is not None and not isinstance(https_user, six.string_types): https_user = six.text_type(https_user) if https_pass is not None and not isinstance(https_pass, six.string_types): https_pass = six.text_type(https_pass) # Check for lfs filter settings, and setup lfs_opts accordingly. These opts # will be passed where appropriate to ensure that these commands are # authenticated and that the git LFS plugin can download files. use_lfs = bool( __salt__['git.config_get_regexp']( r'filter\.lfs\.', **{'global': True})) lfs_opts = {'identity': identity} if use_lfs else {} if os.path.isfile(target): return _fail( ret, 'Target \'{0}\' exists and is a regular file, cannot proceed' .format(target) ) try: desired_fetch_url = salt.utils.url.add_http_basic_auth( name, https_user, https_pass, https_only=True ) except ValueError as exc: return _fail(ret, exc.__str__()) redacted_fetch_url = \ salt.utils.url.redact_http_basic_auth(desired_fetch_url) if mirror: bare = True # Check to make sure rev and mirror/bare are not both in use if rev != 'HEAD' and bare: return _fail(ret, ('\'rev\' is not compatible with the \'mirror\' and ' '\'bare\' arguments')) run_check_cmd_kwargs = {'runas': user, 'password': password} if 'shell' in __grains__: run_check_cmd_kwargs['shell'] = __grains__['shell'] # check if git.latest should be applied cret = mod_run_check( run_check_cmd_kwargs, onlyif, unless ) if isinstance(cret, dict): ret.update(cret) return ret refspecs = [ 'refs/heads/{0}:refs/remotes/{1}/{0}'.format(refspec_branch, remote), '+refs/tags/{0}:refs/tags/{0}'.format(refspec_tag) ] if fetch_tags else [] log.info('Checking remote revision for %s', name) try: all_remote_refs = __salt__['git.remote_refs']( name, heads=False, tags=False, user=user, password=password, identity=identity, https_user=https_user, https_pass=https_pass, ignore_retcode=False, saltenv=__env__, output_encoding=output_encoding) except CommandExecutionError as exc: return _fail( ret, 'Failed to check remote refs: {0}'.format(_strip_exc(exc)) ) except NameError as exc: if 'global name' in exc.message: raise CommandExecutionError( 'Failed to check remote refs: You may need to install ' 'GitPython or PyGit2') raise if 'HEAD' in all_remote_refs: head_rev = all_remote_refs['HEAD'] for refname, refsha in six.iteritems(all_remote_refs): if refname.startswith('refs/heads/'): if refsha == head_rev: default_branch = refname.partition('refs/heads/')[-1] break else: default_branch = None else: head_rev = None default_branch = None desired_upstream = False if bare: remote_rev = None remote_rev_type = None else: if rev == 'HEAD': if head_rev is not None: remote_rev = head_rev # Just go with whatever the upstream currently is desired_upstream = None remote_rev_type = 'sha1' else: # Empty remote repo remote_rev = None remote_rev_type = None elif 'refs/heads/' + rev in all_remote_refs: remote_rev = all_remote_refs['refs/heads/' + rev] desired_upstream = '/'.join((remote, rev)) remote_rev_type = 'branch' elif 'refs/tags/' + rev + '^{}' in all_remote_refs: # Annotated tag remote_rev = all_remote_refs['refs/tags/' + rev + '^{}'] remote_rev_type = 'tag' elif 'refs/tags/' + rev in all_remote_refs: # Non-annotated tag remote_rev = all_remote_refs['refs/tags/' + rev] remote_rev_type = 'tag' else: if len(rev) <= 40 \ and all(x in string.hexdigits for x in rev): # git ls-remote did not find the rev, and because it's a # hex string <= 40 chars we're going to assume that the # desired rev is a SHA1 rev = rev.lower() remote_rev = rev remote_rev_type = 'sha1' else: remote_rev = None remote_rev_type = None # For the comment field of the state return dict, the remote location # (and short-sha1, if rev is not a sha1) is referenced several times, # determine it once here and reuse the value below. if remote_rev_type == 'sha1': if rev == 'HEAD': remote_loc = 'remote HEAD (' + remote_rev[:7] + ')' else: remote_loc = remote_rev[:7] elif remote_rev is not None: remote_loc = '{0} ({1})'.format( desired_upstream if remote_rev_type == 'branch' else rev, remote_rev[:7] ) else: # Shouldn't happen but log a warning here for future # troubleshooting purposes in the event we find a corner case. log.warning( 'Unable to determine remote_loc. rev is %s, remote_rev is ' '%s, remove_rev_type is %s, desired_upstream is %s, and bare ' 'is%s set', rev, remote_rev, remote_rev_type, desired_upstream, ' not' if not bare else '' ) remote_loc = None if depth is not None and remote_rev_type not in ('branch', 'tag'): return _fail( ret, 'When \'depth\' is used, \'rev\' must be set to the name of a ' 'branch or tag on the remote repository' ) if remote_rev is None and not bare: if rev != 'HEAD': # A specific rev is desired, but that rev doesn't exist on the # remote repo. return _fail( ret, 'No revision matching \'{0}\' exists in the remote ' 'repository'.format(rev) ) git_ver = _LooseVersion(__salt__['git.version'](versioninfo=False)) check = 'refs' if bare else '.git' gitdir = os.path.join(target, check) comments = [] if os.path.isdir(gitdir) \ or __salt__['git.is_worktree']( target, user=user, password=password, output_encoding=output_encoding): # Target directory is a git repository or git worktree try: all_local_branches = __salt__['git.list_branches']( target, user=user, password=password, output_encoding=output_encoding) all_local_tags = set( __salt__['git.list_tags']( target, user=user, password=password, output_encoding=output_encoding ) ) local_rev, local_branch = _get_local_rev_and_branch( target, user, password, output_encoding) if not bare and remote_rev is None and local_rev is not None: return _fail( ret, 'Remote repository is empty, cannot update from a ' 'non-empty to an empty repository' ) # Base rev and branch are the ones from which any reset or merge # will take place. If the branch is not being specified, the base # will be the "local" rev and branch, i.e. those we began with # before this state was run. If a branch is being specified and it # both exists and is not the one with which we started, then we'll # be checking that branch out first, and it instead becomes our # base. The base branch and rev will be used below in comparisons # to determine what changes to make. base_rev = local_rev base_branch = local_branch if _need_branch_change(branch, local_branch): if branch not in all_local_branches: # We're checking out a new branch, so the base_rev and # remote_rev will be identical. base_rev = remote_rev else: base_branch = branch # Desired branch exists locally and is not the current # branch. We'll be performing a checkout to that branch # eventually, but before we do that we need to find the # current SHA1. try: base_rev = __salt__['git.rev_parse']( target, branch + '^{commit}', user=user, password=password, ignore_retcode=True, output_encoding=output_encoding) except CommandExecutionError as exc: return _fail( ret, 'Unable to get position of local branch \'{0}\': ' '{1}'.format(branch, _strip_exc(exc)), comments ) remotes = __salt__['git.remotes'](target, user=user, password=password, redact_auth=False, output_encoding=output_encoding) revs_match = _revs_equal(local_rev, remote_rev, remote_rev_type) try: # If not a bare repo, check `git diff HEAD` to determine if # there are local changes. local_changes = bool( not bare and __salt__['git.diff'](target, 'HEAD', user=user, password=password, output_encoding=output_encoding) ) except CommandExecutionError: # No need to capture the error and log it, the _git_run() # helper in the git execution module will have already logged # the output from the command. log.warning( 'git.latest: Unable to determine if %s has local changes', target ) local_changes = False if local_changes and revs_match: if force_reset is True: msg = ( '{0} is up-to-date, but with uncommitted changes. ' 'Since \'force_reset\' is set to True, these local ' 'changes would be reset. To only reset when there are ' 'changes in the remote repository, set ' '\'force_reset\' to \'remote-changes\'.'.format(target) ) if __opts__['test']: ret['changes']['forced update'] = True if comments: msg += _format_comments(comments) return _neutral_test(ret, msg) log.debug(msg.replace('would', 'will')) else: log.debug( '%s up-to-date, but with uncommitted changes. Since ' '\'force_reset\' is set to %s, no changes will be ' 'made.', target, force_reset ) return _uptodate(ret, target, _format_comments(comments), local_changes) if remote_rev_type == 'sha1' \ and base_rev is not None \ and base_rev.startswith(remote_rev): # Either we're already checked out to the branch we need and it # is up-to-date, or the branch to which we need to switch is # on the same SHA1 as the desired remote revision. Either way, # we know we have the remote rev present already and no fetch # will be needed. has_remote_rev = True else: has_remote_rev = False if remote_rev is not None: try: __salt__['git.rev_parse']( target, remote_rev + '^{commit}', user=user, password=password, ignore_retcode=True, output_encoding=output_encoding) except CommandExecutionError: # Local checkout doesn't have the remote_rev pass else: # The object might exist enough to get a rev-parse to # work, while the local ref could have been # deleted/changed/force updated. Do some further sanity # checks to determine if we really do have the # remote_rev. if remote_rev_type == 'branch': if remote in remotes: try: # Do a rev-parse on <remote>/<rev> to get # the local SHA1 for it, so we can compare # it to the remote_rev SHA1. local_copy = __salt__['git.rev_parse']( target, desired_upstream, user=user, password=password, ignore_retcode=True, output_encoding=output_encoding) except CommandExecutionError: pass else: # If the SHA1s don't match, then the remote # branch was force-updated, and we need to # fetch to update our local copy the ref # for the remote branch. If they do match, # then we have the remote_rev and don't # need to fetch. if local_copy == remote_rev: has_remote_rev = True elif remote_rev_type == 'tag': if rev in all_local_tags: try: local_tag_sha1 = __salt__['git.rev_parse']( target, rev + '^{commit}', user=user, password=password, ignore_retcode=True, output_encoding=output_encoding) except CommandExecutionError: # Shouldn't happen if the tag exists # locally but account for this just in # case. local_tag_sha1 = None if local_tag_sha1 == remote_rev: has_remote_rev = True else: if not force_reset: # SHA1 of tag on remote repo is # different than local tag. Unless # we're doing a hard reset then we # don't need to proceed as we know that # the fetch will update the tag and the # only way to make the state succeed is # to reset the branch to point at the # tag's new location. return _fail( ret, '\'{0}\' is a tag, but the remote ' 'SHA1 for this tag ({1}) doesn\'t ' 'match the local SHA1 ({2}). Set ' '\'force_reset\' to True to force ' 'this update.'.format( rev, _short_sha(remote_rev), _short_sha(local_tag_sha1) ) ) elif remote_rev_type == 'sha1': has_remote_rev = True # If fast_forward is not boolean, then we don't yet know if this # will be a fast forward or not, because a fetch is required. fast_forward = False \ if (local_changes and force_reset != 'remote-changes') \ else None if has_remote_rev: if (not revs_match and not update_head) \ and (branch is None or branch == local_branch): ret['comment'] = ( '{0} is already present and local HEAD ({1}) does not ' 'match, but update_head=False. HEAD has not been ' 'updated locally.'.format( remote_loc.capitalize() if rev == 'HEAD' else remote_loc, local_rev[:7] ) ) return ret # No need to check if this is a fast_forward if we already know # that it won't be (due to local changes). if fast_forward is not False: if base_rev is None: # If we're here, the remote_rev exists in the local # checkout but there is still no HEAD locally. A # possible reason for this is that an empty repository # existed there and a remote was added and fetched, but # the repository was not fast-forwarded. Regardless, # going from no HEAD to a locally-present rev is # considered a fast-forward update. fast_forward = True else: fast_forward = __salt__['git.merge_base']( target, refs=[base_rev, remote_rev], is_ancestor=True, user=user, password=password, ignore_retcode=True, output_encoding=output_encoding) if fast_forward is False: if force_reset is False: return _not_fast_forward( ret, rev, base_rev, remote_rev, branch, local_branch, default_branch, local_changes, comments) merge_action = 'hard-reset' elif fast_forward is True: merge_action = 'fast-forwarded' else: merge_action = 'updated' if base_branch is None: # No local branch, no upstream tracking branch upstream = None else: try: upstream = __salt__['git.rev_parse']( target, base_branch + '@{upstream}', opts=['--abbrev-ref'], user=user, password=password, ignore_retcode=True, output_encoding=output_encoding) except CommandExecutionError: # There is a local branch but the rev-parse command # failed, so that means there is no upstream tracking # branch. This could be because it is just not set, or # because the branch was checked out to a SHA1 or tag # instead of a branch. Set upstream to False to make a # distinction between the case above where there is no # local_branch (when the local checkout is an empty # repository). upstream = False if remote in remotes: fetch_url = remotes[remote]['fetch'] else: log.debug( 'Remote \'%s\' not found in git checkout at %s', remote, target ) fetch_url = None if remote_rev is not None and desired_fetch_url != fetch_url: if __opts__['test']: actions = [ 'Remote \'{0}\' would be changed from {1} to {2}' .format( remote, salt.utils.url.redact_http_basic_auth(fetch_url), redacted_fetch_url ) ] if not has_remote_rev: actions.append('Remote would be fetched') if not revs_match: if update_head: ret['changes']['revision'] = { 'old': local_rev, 'new': remote_rev } if fast_forward is False: ret['changes']['forced update'] = True actions.append( 'Repository would be {0} to {1}'.format( merge_action, _short_sha(remote_rev) ) ) if ret['changes']: return _neutral_test(ret, _format_comments(actions)) else: if not revs_match and not update_head: # Repo content would not be modified but the remote # URL would be modified, so we can't just say that # the repo is up-to-date, we need to inform the # user of the actions taken. ret['comment'] = _format_comments(actions) return ret return _uptodate(ret, target, _format_comments(actions)) # The fetch_url for the desired remote does not match the # specified URL (or the remote does not exist), so set the # remote URL. __salt__['git.remote_set'](target, url=name, remote=remote, user=user, password=password, https_user=https_user, https_pass=https_pass, output_encoding=output_encoding) if fetch_url is None: comments.append( 'Remote \'{0}\' set to {1}'.format( remote, redacted_fetch_url ) ) ret['changes']['new'] = name + ' => ' + remote else: comments.append( 'Remote \'{0}\' changed from {1} to {2}'.format( remote, salt.utils.url.redact_http_basic_auth(fetch_url), redacted_fetch_url ) ) if remote_rev is not None: if __opts__['test']: actions = [] if not has_remote_rev: actions.append( 'Remote \'{0}\' would be fetched'.format(remote) ) if (not revs_match) \ and (update_head or (branch is not None and branch != local_branch)): ret['changes']['revision'] = { 'old': local_rev, 'new': remote_rev } if _need_branch_change(branch, local_branch): if branch not in all_local_branches: actions.append( 'New branch \'{0}\' would be checked ' 'out, with {1} as a starting ' 'point'.format(branch, remote_loc) ) if desired_upstream: actions.append( 'Tracking branch would be set to {0}' .format(desired_upstream) ) else: actions.append( 'Branch \'{0}\' would be checked out ' 'and {1} to {2}'.format( branch, merge_action, _short_sha(remote_rev) ) ) else: if not revs_match: if update_head: if fast_forward is True: actions.append( 'Repository would be fast-forwarded from ' '{0} to {1}'.format( _short_sha(local_rev), _short_sha(remote_rev) ) ) else: actions.append( 'Repository would be {0} from {1} to {2}' .format( 'hard-reset' if force_reset and has_remote_rev else 'updated', _short_sha(local_rev), _short_sha(remote_rev) ) ) else: actions.append( 'Local HEAD ({0}) does not match {1} but ' 'update_head=False, HEAD would not be ' 'updated locally'.format( local_rev[:7], remote_loc ) ) # Check if upstream needs changing if not upstream and desired_upstream: actions.append( 'Tracking branch would be set to {0}'.format( desired_upstream ) ) elif upstream and desired_upstream is False: actions.append( 'Tracking branch would be unset' ) elif desired_upstream and upstream != desired_upstream: actions.append( 'Tracking branch would be ' 'updated to {0}'.format(desired_upstream) ) if ret['changes']: return _neutral_test(ret, _format_comments(actions)) else: formatted_actions = _format_comments(actions) if not revs_match \ and not update_head \ and formatted_actions: ret['comment'] = formatted_actions return ret return _uptodate(ret, target, _format_comments(actions)) if not upstream and desired_upstream: upstream_action = ( 'Tracking branch was set to {0}'.format( desired_upstream ) ) branch_opts = _get_branch_opts( branch, local_branch, all_local_branches, desired_upstream, git_ver) elif upstream and desired_upstream is False: # If the remote_rev is a tag or SHA1, and there is an # upstream tracking branch, we will unset it. However, we # can only do this if the git version is 1.8.0 or newer, as # the --unset-upstream option was not added until that # version. if git_ver >= _LooseVersion('1.8.0'): upstream_action = 'Tracking branch was unset' branch_opts = ['--unset-upstream'] else: branch_opts = None elif desired_upstream and upstream != desired_upstream: upstream_action = ( 'Tracking branch was updated to {0}'.format( desired_upstream ) ) branch_opts = _get_branch_opts( branch, local_branch, all_local_branches, desired_upstream, git_ver) else: branch_opts = None if branch_opts is not None and local_branch is None: return _fail( ret, 'Cannot set/unset upstream tracking branch, local ' 'HEAD refers to nonexistent branch. This may have ' 'been caused by cloning a remote repository for which ' 'the default branch was renamed or deleted. If you ' 'are unable to fix the remote repository, you can ' 'work around this by setting the \'branch\' argument ' '(which will ensure that the named branch is created ' 'if it does not already exist).', comments ) remote_tags = set([ x.replace('refs/tags/', '') for x in __salt__['git.ls_remote']( cwd=target, remote=remote, opts="--tags", user=user, password=password, identity=identity, saltenv=__env__, ignore_retcode=True, output_encoding=output_encoding) if '^{}' not in x ]) if all_local_tags != remote_tags: has_remote_rev = False new_tags = remote_tags - all_local_tags deleted_tags = all_local_tags - remote_tags if new_tags: ret['changes']['new_tags'] = new_tags if sync_tags and deleted_tags: # Delete the local copy of the tags to keep up with the # remote repository. for tag_name in deleted_tags: try: if not __opts__['test']: __salt__['git.tag']( target, tag_name, opts='-d', user=user, password=password, output_encoding=output_encoding) except CommandExecutionError as exc: ret.setdefault('warnings', []).append( 'Failed to remove local tag \'{0}\':\n\n' '{1}\n\n'.format(tag_name, exc) ) else: ret['changes'].setdefault( 'deleted_tags', []).append(tag_name) if ret['changes'].get('deleted_tags'): comments.append( 'The following tags {0} removed from the local ' 'checkout: {1}'.format( 'would be' if __opts__['test'] else 'were', ', '.join(ret['changes']['deleted_tags']) ) ) if not has_remote_rev: try: fetch_changes = __salt__['git.fetch']( target, remote=remote, force=force_fetch, refspecs=refspecs, user=user, password=password, identity=identity, saltenv=__env__, output_encoding=output_encoding) except CommandExecutionError as exc: return _failed_fetch(ret, exc, comments) else: if fetch_changes: comments.append( '{0} was fetched, resulting in updated ' 'refs'.format(name) ) try: __salt__['git.rev_parse']( target, remote_rev + '^{commit}', user=user, password=password, ignore_retcode=True, output_encoding=output_encoding) except CommandExecutionError as exc: return _fail( ret, 'Fetch did not successfully retrieve rev \'{0}\' ' 'from {1}: {2}'.format(rev, name, exc) ) if (not revs_match and not update_head) \ and (branch is None or branch == local_branch): # Rev now exists locally (was fetched), and since we're # not updating HEAD we'll just exit here. ret['comment'] = remote_loc.capitalize() \ if rev == 'HEAD' \ else remote_loc ret['comment'] += ( ' is already present and local HEAD ({0}) does not ' 'match, but update_head=False. HEAD has not been ' 'updated locally.'.format(local_rev[:7]) ) return ret # Now that we've fetched, check again whether or not # the update is a fast-forward. if base_rev is None: fast_forward = True else: fast_forward = __salt__['git.merge_base']( target, refs=[base_rev, remote_rev], is_ancestor=True, user=user, password=password, output_encoding=output_encoding) if fast_forward is force_reset is False \ or (fast_forward is True and local_changes and force_reset is False): return _not_fast_forward( ret, rev, base_rev, remote_rev, branch, local_branch, default_branch, local_changes, comments) if _need_branch_change(branch, local_branch): if local_changes and not force_checkout: return _fail( ret, 'Local branch \'{0}\' has uncommitted ' 'changes. Set \'force_checkout\' to True to ' 'discard them and proceed.'.format(local_branch) ) # TODO: Maybe re-retrieve all_local_branches to handle # the corner case where the destination branch was # added to the local checkout during a fetch that takes # a long time to complete. if branch not in all_local_branches: if rev == 'HEAD': checkout_rev = remote_rev else: checkout_rev = desired_upstream \ if desired_upstream \ else rev checkout_opts = ['-b', branch] else: checkout_rev = branch checkout_opts = [] __salt__['git.checkout'](target, checkout_rev, force=force_checkout, opts=checkout_opts, user=user, password=password, output_encoding=output_encoding) if '-b' in checkout_opts: comments.append( 'New branch \'{0}\' was checked out, with {1} ' 'as a starting point'.format( branch, remote_loc ) ) else: comments.append( '\'{0}\' was checked out'.format(checkout_rev) ) if fast_forward is False: __salt__['git.reset']( target, opts=['--hard', remote_rev], user=user, password=password, output_encoding=output_encoding, **lfs_opts) ret['changes']['forced update'] = True if local_changes: comments.append('Uncommitted changes were discarded') comments.append( 'Repository was hard-reset to {0}'.format(remote_loc) ) elif fast_forward is True \ and local_changes \ and force_reset is not False: __salt__['git.discard_local_changes']( target, user=user, password=password, output_encoding=output_encoding) comments.append('Uncommitted changes were discarded') if branch_opts is not None: __salt__['git.branch']( target, opts=branch_opts, user=user, password=password, output_encoding=output_encoding) comments.append(upstream_action) # Fast-forward to the desired revision if fast_forward is True \ and not _revs_equal(base_rev, remote_rev, remote_rev_type): if desired_upstream or rev == 'HEAD': # Check first to see if we are on a branch before # trying to merge changes. (The call to # git.symbolic_ref will only return output if HEAD # points to a branch.) if __salt__['git.symbolic_ref']( target, 'HEAD', opts=['--quiet'], user=user, password=password, ignore_retcode=True, output_encoding=output_encoding): if git_ver >= _LooseVersion('1.8.1.6'): # --ff-only added in version 1.8.1.6. It's not # 100% necessary, but if we can use it, we'll # ensure that the merge doesn't go through if # not a fast-forward. Granted, the logic that # gets us to this point shouldn't allow us to # attempt this merge if it's not a # fast-forward, but it's an extra layer of # protection. merge_opts = ['--ff-only'] else: merge_opts = [] __salt__['git.merge']( target, rev=remote_rev, opts=merge_opts, user=user, password=password, output_encoding=output_encoding, **lfs_opts) comments.append( 'Repository was fast-forwarded to {0}' .format(remote_loc) ) else: return _fail( ret, 'Unable to fast-forward, HEAD is detached', comments ) else: # Update is a fast forward, but we cannot merge to that # commit so we'll reset to it. __salt__['git.reset']( target, opts=['--hard', remote_rev if rev == 'HEAD' else rev], user=user, password=password, output_encoding=output_encoding, **lfs_opts) comments.append( 'Repository was reset to {0} (fast-forward)' .format(rev) ) # TODO: Figure out how to add submodule update info to # test=True return data, and changes dict. if submodules: try: __salt__['git.submodule']( target, 'update', opts=['--init', '--recursive'], user=user, password=password, identity=identity, saltenv=__env__, output_encoding=output_encoding) except CommandExecutionError as exc: return _failed_submodule_update(ret, exc, comments) elif bare: if __opts__['test']: msg = ( 'Bare repository at {0} would be fetched' .format(target) ) if ret['changes']: return _neutral_test(ret, msg) else: return _uptodate(ret, target, msg) try: fetch_changes = __salt__['git.fetch']( target, remote=remote, force=force_fetch, refspecs=refspecs, user=user, password=password, identity=identity, saltenv=__env__, output_encoding=output_encoding) except CommandExecutionError as exc: return _failed_fetch(ret, exc, comments) else: comments.append( 'Bare repository at {0} was fetched{1}'.format( target, ', resulting in updated refs' if fetch_changes else '' ) ) try: new_rev = __salt__['git.revision']( cwd=target, user=user, password=password, ignore_retcode=True, output_encoding=output_encoding) except CommandExecutionError: new_rev = None except Exception as exc: log.error( 'Unexpected exception in git.latest state', exc_info=True ) if isinstance(exc, CommandExecutionError): msg = _strip_exc(exc) else: msg = six.text_type(exc) return _fail(ret, msg, comments) if not bare and not _revs_equal(new_rev, remote_rev, remote_rev_type): return _fail(ret, 'Failed to update repository', comments) if local_rev != new_rev: log.info( 'Repository %s updated: %s => %s', target, local_rev, new_rev ) ret['comment'] = _format_comments(comments) ret['changes']['revision'] = {'old': local_rev, 'new': new_rev} else: return _uptodate(ret, target, _format_comments(comments)) else: if os.path.isdir(target): target_contents = os.listdir(target) if force_clone: # Clone is required, and target directory exists, but the # ``force`` option is enabled, so we need to clear out its # contents to proceed. if __opts__['test']: ret['changes']['forced clone'] = True ret['changes']['new'] = name + ' => ' + target return _neutral_test( ret, 'Target directory {0} exists. Since force_clone=True, ' 'the contents of {0} would be deleted, and {1} would ' 'be cloned into this directory.'.format(target, name) ) log.debug( 'Removing contents of %s to clone repository %s in its ' 'place (force_clone=True set in git.latest state)', target, name ) removal_errors = {} for target_object in target_contents: target_path = os.path.join(target, target_object) try: salt.utils.files.rm_rf(target_path) except OSError as exc: if exc.errno != errno.ENOENT: removal_errors[target_path] = exc if removal_errors: err_strings = [ ' {0}\n {1}'.format(k, v) for k, v in six.iteritems(removal_errors) ] return _fail( ret, 'Unable to remove\n{0}'.format('\n'.join(err_strings)), comments ) ret['changes']['forced clone'] = True # Clone is required, but target dir exists and is non-empty. We # can't proceed. elif target_contents: return _fail( ret, 'Target \'{0}\' exists, is non-empty and is not a git ' 'repository. Set the \'force_clone\' option to True to ' 'remove this directory\'s contents and proceed with ' 'cloning the remote repository'.format(target) ) log.debug('Target %s is not found, \'git clone\' is required', target) if __opts__['test']: ret['changes']['new'] = name + ' => ' + target return _neutral_test( ret, 'Repository {0} would be cloned to {1}'.format( name, target ) ) try: clone_opts = ['--mirror'] if mirror else ['--bare'] if bare else [] if remote != 'origin': clone_opts.extend(['--origin', remote]) if depth is not None: clone_opts.extend(['--depth', six.text_type(depth), '--branch', rev]) # We're cloning a fresh repo, there is no local branch or revision local_branch = local_rev = None try: __salt__['git.clone'](target, name, user=user, password=password, opts=clone_opts, identity=identity, https_user=https_user, https_pass=https_pass, saltenv=__env__, output_encoding=output_encoding) except CommandExecutionError as exc: msg = 'Clone failed: {0}'.format(_strip_exc(exc)) return _fail(ret, msg, comments) ret['changes']['new'] = name + ' => ' + target comments.append( '{0} cloned to {1}{2}'.format( name, target, ' as mirror' if mirror else ' as bare repository' if bare else '' ) ) if not bare: if not remote_rev: if rev != 'HEAD': # No HEAD means the remote repo is empty, which means # our new clone will also be empty. This state has # failed, since a rev was specified but no matching rev # exists on the remote host. msg = ( '%s was cloned but is empty, so {0}/{1} ' 'cannot be checked out'.format(remote, rev) ) log.error(msg, name) # Disable check for string substitution return _fail(ret, msg % 'Repository', comments) # pylint: disable=E1321 else: if remote_rev_type == 'tag' \ and rev not in __salt__['git.list_tags']( target, user=user, password=password, output_encoding=output_encoding): return _fail( ret, 'Revision \'{0}\' does not exist in clone' .format(rev), comments ) if branch is not None: if branch not in \ __salt__['git.list_branches']( target, user=user, password=password, output_encoding=output_encoding): if rev == 'HEAD': checkout_rev = remote_rev else: checkout_rev = desired_upstream \ if desired_upstream \ else rev __salt__['git.checkout']( target, checkout_rev, opts=['-b', branch], user=user, password=password, output_encoding=output_encoding) comments.append( 'Branch \'{0}\' checked out, with {1} ' 'as a starting point'.format( branch, remote_loc ) ) local_rev, local_branch = _get_local_rev_and_branch( target, user, password, output_encoding=output_encoding) if local_branch is None \ and remote_rev is not None \ and 'HEAD' not in all_remote_refs: return _fail( ret, 'Remote HEAD refers to a ref that does not exist. ' 'This can happen when the default branch on the ' 'remote repository is renamed or deleted. If you ' 'are unable to fix the remote repository, you can ' 'work around this by setting the \'branch\' argument ' '(which will ensure that the named branch is created ' 'if it does not already exist).', comments ) if not _revs_equal(local_rev, remote_rev, remote_rev_type): __salt__['git.reset']( target, opts=['--hard', remote_rev], user=user, password=password, output_encoding=output_encoding) comments.append( 'Repository was reset to {0}'.format(remote_loc) ) try: upstream = __salt__['git.rev_parse']( target, local_branch + '@{upstream}', opts=['--abbrev-ref'], user=user, password=password, ignore_retcode=True, output_encoding=output_encoding) except CommandExecutionError: upstream = False if not upstream and desired_upstream: upstream_action = ( 'Tracking branch was set to {0}'.format( desired_upstream ) ) branch_opts = _get_branch_opts( branch, local_branch, __salt__['git.list_branches']( target, user=user, password=password, output_encoding=output_encoding), desired_upstream, git_ver) elif upstream and desired_upstream is False: # If the remote_rev is a tag or SHA1, and there is an # upstream tracking branch, we will unset it. However, # we can only do this if the git version is 1.8.0 or # newer, as the --unset-upstream option was not added # until that version. if git_ver >= _LooseVersion('1.8.0'): upstream_action = 'Tracking branch was unset' branch_opts = ['--unset-upstream'] else: branch_opts = None elif desired_upstream and upstream != desired_upstream: upstream_action = ( 'Tracking branch was updated to {0}'.format( desired_upstream ) ) branch_opts = _get_branch_opts( branch, local_branch, __salt__['git.list_branches']( target, user=user, password=password, output_encoding=output_encoding), desired_upstream, git_ver) else: branch_opts = None if branch_opts is not None: __salt__['git.branch']( target, opts=branch_opts, user=user, password=password, output_encoding=output_encoding) comments.append(upstream_action) if submodules and remote_rev: try: __salt__['git.submodule']( target, 'update', opts=['--init', '--recursive'], user=user, password=password, identity=identity, output_encoding=output_encoding) except CommandExecutionError as exc: return _failed_submodule_update(ret, exc, comments) try: new_rev = __salt__['git.revision']( cwd=target, user=user, password=password, ignore_retcode=True, output_encoding=output_encoding) except CommandExecutionError: new_rev = None except Exception as exc: log.error( 'Unexpected exception in git.latest state', exc_info=True ) if isinstance(exc, CommandExecutionError): msg = _strip_exc(exc) else: msg = six.text_type(exc) return _fail(ret, msg, comments) msg = _format_comments(comments) log.info(msg) ret['comment'] = msg if new_rev is not None: ret['changes']['revision'] = {'old': None, 'new': new_rev} return ret def present(name, force=False, bare=True, template=None, separate_git_dir=None, shared=None, user=None, password=None, output_encoding=None): ''' Ensure that a repository exists in the given directory .. warning:: If the minion has Git 2.5 or later installed, ``name`` points to a worktree_, and ``force`` is set to ``True``, then the worktree will be deleted. This has been corrected in Salt 2015.8.0. name Path to the directory .. versionchanged:: 2015.8.0 This path must now be absolute force : False If ``True``, and if ``name`` points to an existing directory which does not contain a git repository, then the contents of that directory will be recursively removed and a new repository will be initialized in its place. bare : True If ``True``, and a repository must be initialized, then the repository will be a bare repository. .. note:: This differs from the default behavior of :py:func:`git.init <salt.modules.git.init>`, make sure to set this value to ``False`` if a bare repo is not desired. template If a new repository is initialized, this argument will specify an alternate template directory. .. versionadded:: 2015.8.0 separate_git_dir If a new repository is initialized, this argument will specify an alternate ``$GIT_DIR`` .. versionadded:: 2015.8.0 shared Set sharing permissions on git repo. See `git-init(1)`_ for more details. .. versionadded:: 2015.5.0 user User under which to run git commands. By default, commands are run by the user under which the minion is running. .. versionadded:: 0.17.0 password Windows only. Required when specifying ``user``. This parameter will be ignored on non-Windows platforms. .. versionadded:: 2016.3.4 output_encoding Use this option to specify which encoding to use to decode the output from any git commands which are run. This should not be needed in most cases. .. note:: This should only be needed if the files in the repository were created with filenames using an encoding other than UTF-8 to handle Unicode characters. .. versionadded:: 2018.3.1 .. _`git-init(1)`: http://git-scm.com/docs/git-init .. _`worktree`: http://git-scm.com/docs/git-worktree ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} # If the named directory is a git repo return True if os.path.isdir(name): if bare and os.path.isfile(os.path.join(name, 'HEAD')): return ret elif not bare and \ (os.path.isdir(os.path.join(name, '.git')) or __salt__['git.is_worktree'](name, user=user, password=password, output_encoding=output_encoding)): return ret # Directory exists and is not a git repo, if force is set destroy the # directory and recreate, otherwise throw an error elif force: # Directory exists, and the ``force`` option is enabled, so we need # to clear out its contents to proceed. if __opts__['test']: ret['changes']['new'] = name ret['changes']['forced init'] = True return _neutral_test( ret, 'Target directory {0} exists. Since force=True, the ' 'contents of {0} would be deleted, and a {1}repository ' 'would be initialized in its place.' .format(name, 'bare ' if bare else '') ) log.debug( 'Removing contents of %s to initialize %srepository in its ' 'place (force=True set in git.present state)', name, 'bare ' if bare else '' ) try: if os.path.islink(name): os.unlink(name) else: salt.utils.files.rm_rf(name) except OSError as exc: return _fail( ret, 'Unable to remove {0}: {1}'.format(name, exc) ) else: ret['changes']['forced init'] = True elif os.listdir(name): return _fail( ret, 'Target \'{0}\' exists, is non-empty, and is not a git ' 'repository. Set the \'force\' option to True to remove ' 'this directory\'s contents and proceed with initializing a ' 'repository'.format(name) ) # Run test is set if __opts__['test']: ret['changes']['new'] = name return _neutral_test( ret, 'New {0}repository would be created'.format( 'bare ' if bare else '' ) ) __salt__['git.init'](cwd=name, bare=bare, template=template, separate_git_dir=separate_git_dir, shared=shared, user=user, password=password, output_encoding=output_encoding) actions = [ 'Initialized {0}repository in {1}'.format( 'bare ' if bare else '', name ) ] if template: actions.append('Template directory set to {0}'.format(template)) if separate_git_dir: actions.append('Gitdir set to {0}'.format(separate_git_dir)) message = '. '.join(actions) if len(actions) > 1: message += '.' log.info(message) ret['changes']['new'] = name ret['comment'] = message return ret def detached(name, rev, target=None, remote='origin', user=None, password=None, force_clone=False, force_checkout=False, fetch_remote=True, hard_reset=False, submodules=False, identity=None, https_user=None, https_pass=None, onlyif=None, unless=None, output_encoding=None, **kwargs): ''' .. versionadded:: 2016.3.0 Make sure a repository is cloned to the given target directory and is a detached HEAD checkout of the commit ID resolved from ``rev``. name Address of the remote repository. rev The branch, tag, or commit ID to checkout after clone. If a branch or tag is specified it will be resolved to a commit ID and checked out. target Name of the target directory where repository is about to be cloned. remote : origin Git remote to use. If this state needs to clone the repo, it will clone it using this value as the initial remote name. If the repository already exists, and a remote by this name is not present, one will be added. user User under which to run git commands. By default, commands are run by the user under which the minion is running. password Windows only. Required when specifying ``user``. This parameter will be ignored on non-Windows platforms. .. versionadded:: 2016.3.4 force_clone : False If the ``target`` directory exists and is not a git repository, then this state will fail. Set this argument to ``True`` to remove the contents of the target directory and clone the repo into it. force_checkout : False When checking out the revision ID, the state will fail if there are unwritten changes. Set this argument to ``True`` to discard unwritten changes when checking out. fetch_remote : True If ``False`` a fetch will not be performed and only local refs will be reachable. hard_reset : False If ``True`` a hard reset will be performed before the checkout and any uncommitted modifications to the working directory will be discarded. Untracked files will remain in place. .. note:: Changes resulting from a hard reset will not trigger requisites. submodules : False Update submodules identity A path on the minion (or a SaltStack fileserver URL, e.g. ``salt://path/to/identity_file``) to a private key to use for SSH authentication. https_user HTTP Basic Auth username for HTTPS (only) clones https_pass HTTP Basic Auth password for HTTPS (only) clones onlyif A command to run as a check, run the named command only if the command passed to the ``onlyif`` option returns true unless A command to run as a check, only run the named command if the command passed to the ``unless`` option returns false output_encoding Use this option to specify which encoding to use to decode the output from any git commands which are run. This should not be needed in most cases. .. note:: This should only be needed if the files in the repository were created with filenames using an encoding other than UTF-8 to handle Unicode characters. .. versionadded:: 2018.3.1 ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} kwargs = salt.utils.args.clean_kwargs(**kwargs) if kwargs: return _fail( ret, salt.utils.args.invalid_kwargs(kwargs, raise_exc=False) ) if not rev: return _fail( ret, '\'{0}\' is not a valid value for the \'rev\' argument'.format(rev) ) if not target: return _fail( ret, '\'{0}\' is not a valid value for the \'target\' argument'.format(rev) ) # Ensure that certain arguments are strings to ensure that comparisons work if not isinstance(rev, six.string_types): rev = six.text_type(rev) if target is not None: if not isinstance(target, six.string_types): target = six.text_type(target) if not os.path.isabs(target): return _fail( ret, 'Target \'{0}\' is not an absolute path'.format(target) ) if user is not None and not isinstance(user, six.string_types): user = six.text_type(user) if remote is not None and not isinstance(remote, six.string_types): remote = six.text_type(remote) if identity is not None: if isinstance(identity, six.string_types): identity = [identity] elif not isinstance(identity, list): return _fail(ret, 'Identity must be either a list or a string') identity = [os.path.expanduser(x) for x in identity] for ident_path in identity: if 'salt://' in ident_path: try: ident_path = __salt__['cp.cache_file'](ident_path) except IOError as exc: log.error('Failed to cache %s: %s', ident_path, exc) return _fail( ret, 'Identity \'{0}\' does not exist.'.format( ident_path ) ) if not os.path.isabs(ident_path): return _fail( ret, 'Identity \'{0}\' is not an absolute path'.format( ident_path ) ) if https_user is not None and not isinstance(https_user, six.string_types): https_user = six.text_type(https_user) if https_pass is not None and not isinstance(https_pass, six.string_types): https_pass = six.text_type(https_pass) if os.path.isfile(target): return _fail( ret, 'Target \'{0}\' exists and is a regular file, cannot proceed' .format(target) ) try: desired_fetch_url = salt.utils.url.add_http_basic_auth( name, https_user, https_pass, https_only=True ) except ValueError as exc: return _fail(ret, exc.__str__()) redacted_fetch_url = salt.utils.url.redact_http_basic_auth(desired_fetch_url) # Check if onlyif or unless conditions match run_check_cmd_kwargs = {'runas': user} if 'shell' in __grains__: run_check_cmd_kwargs['shell'] = __grains__['shell'] cret = mod_run_check( run_check_cmd_kwargs, onlyif, unless ) if isinstance(cret, dict): ret.update(cret) return ret # Determine if supplied ref is a hash remote_rev_type = 'ref' if len(rev) <= 40 \ and all(x in string.hexdigits for x in rev): rev = rev.lower() remote_rev_type = 'hash' comments = [] hash_exists_locally = False local_commit_id = None gitdir = os.path.join(target, '.git') if os.path.isdir(gitdir) \ or __salt__['git.is_worktree'](target, user=user, password=password, output_encoding=output_encoding): # Target directory is a git repository or git worktree local_commit_id = _get_local_rev_and_branch( target, user, password, output_encoding=output_encoding)[0] if remote_rev_type is 'hash': try: __salt__['git.describe'](target, rev, user=user, password=password, ignore_retcode=True, output_encoding=output_encoding) except CommandExecutionError: hash_exists_locally = False else: # The rev is a hash and it exists locally so skip to checkout hash_exists_locally = True else: # Check that remote is present and set to correct url remotes = __salt__['git.remotes'](target, user=user, password=password, redact_auth=False, output_encoding=output_encoding) if remote in remotes and name in remotes[remote]['fetch']: pass else: # The fetch_url for the desired remote does not match the # specified URL (or the remote does not exist), so set the # remote URL. current_fetch_url = None if remote in remotes: current_fetch_url = remotes[remote]['fetch'] if __opts__['test']: return _neutral_test( ret, 'Remote {0} would be set to {1}'.format( remote, name ) ) __salt__['git.remote_set'](target, url=name, remote=remote, user=user, password=password, https_user=https_user, https_pass=https_pass, output_encoding=output_encoding) comments.append( 'Remote {0} updated from \'{1}\' to \'{2}\''.format( remote, current_fetch_url, name ) ) else: # Clone repository if os.path.isdir(target): target_contents = os.listdir(target) if force_clone: # Clone is required, and target directory exists, but the # ``force`` option is enabled, so we need to clear out its # contents to proceed. if __opts__['test']: return _neutral_test( ret, 'Target directory {0} exists. Since force_clone=True, ' 'the contents of {0} would be deleted, and {1} would ' 'be cloned into this directory.'.format(target, name) ) log.debug( 'Removing contents of %s to clone repository %s in its ' 'place (force_clone=True set in git.detached state)', target, name ) removal_errors = {} for target_object in target_contents: target_path = os.path.join(target, target_object) try: salt.utils.files.rm_rf(target_path) except OSError as exc: if exc.errno != errno.ENOENT: removal_errors[target_path] = exc if removal_errors: err_strings = [ ' {0}\n {1}'.format(k, v) for k, v in six.iteritems(removal_errors) ] return _fail( ret, 'Unable to remove\n{0}'.format('\n'.join(err_strings)), comments ) ret['changes']['forced clone'] = True elif target_contents: # Clone is required, but target dir exists and is non-empty. We # can't proceed. return _fail( ret, 'Target \'{0}\' exists, is non-empty and is not a git ' 'repository. Set the \'force_clone\' option to True to ' 'remove this directory\'s contents and proceed with ' 'cloning the remote repository'.format(target) ) log.debug('Target %s is not found, \'git clone\' is required', target) if __opts__['test']: return _neutral_test( ret, 'Repository {0} would be cloned to {1}'.format( name, target ) ) try: clone_opts = ['--no-checkout'] if remote != 'origin': clone_opts.extend(['--origin', remote]) __salt__['git.clone'](target, name, user=user, password=password, opts=clone_opts, identity=identity, https_user=https_user, https_pass=https_pass, saltenv=__env__, output_encoding=output_encoding) comments.append('{0} cloned to {1}'.format(name, target)) except Exception as exc: log.error( 'Unexpected exception in git.detached state', exc_info=True ) if isinstance(exc, CommandExecutionError): msg = _strip_exc(exc) else: msg = six.text_type(exc) return _fail(ret, msg, comments) # Repository exists and is ready for fetch/checkout refspecs = [ 'refs/heads/*:refs/remotes/{0}/*'.format(remote), '+refs/tags/*:refs/tags/*' ] if hash_exists_locally or fetch_remote is False: pass else: # Fetch refs from remote if __opts__['test']: return _neutral_test( ret, 'Repository remote {0} would be fetched'.format(remote) ) try: fetch_changes = __salt__['git.fetch']( target, remote=remote, force=True, refspecs=refspecs, user=user, password=password, identity=identity, saltenv=__env__, output_encoding=output_encoding) except CommandExecutionError as exc: msg = 'Fetch failed' msg += ':\n\n' + six.text_type(exc) return _fail(ret, msg, comments) else: if fetch_changes: comments.append( 'Remote {0} was fetched, resulting in updated ' 'refs'.format(remote) ) # get refs and checkout checkout_commit_id = '' if remote_rev_type is 'hash': if __salt__['git.describe']( target, rev, user=user, password=password, output_encoding=output_encoding): checkout_commit_id = rev else: return _fail( ret, 'Revision \'{0}\' does not exist'.format(rev) ) else: try: all_remote_refs = __salt__['git.remote_refs']( target, user=user, password=password, identity=identity, https_user=https_user, https_pass=https_pass, ignore_retcode=False, output_encoding=output_encoding) if 'refs/remotes/'+remote+'/'+rev in all_remote_refs: checkout_commit_id = all_remote_refs['refs/remotes/' + remote + '/' + rev] elif 'refs/tags/' + rev in all_remote_refs: checkout_commit_id = all_remote_refs['refs/tags/' + rev] else: return _fail( ret, 'Revision \'{0}\' does not exist'.format(rev) ) except CommandExecutionError as exc: return _fail( ret, 'Failed to list refs for {0}: {1}'.format(remote, _strip_exc(exc)) ) if hard_reset: if __opts__['test']: return _neutral_test( ret, 'Hard reset to HEAD would be performed on {0}'.format(target) ) __salt__['git.reset']( target, opts=['--hard', 'HEAD'], user=user, password=password, output_encoding=output_encoding) comments.append( 'Repository was reset to HEAD before checking out revision' ) # TODO: implement clean function for git module and add clean flag if checkout_commit_id == local_commit_id: new_rev = None else: if __opts__['test']: ret['changes']['HEAD'] = {'old': local_commit_id, 'new': checkout_commit_id} return _neutral_test( ret, 'Commit ID {0} would be checked out at {1}'.format( checkout_commit_id, target ) ) __salt__['git.checkout'](target, checkout_commit_id, force=force_checkout, user=user, password=password, output_encoding=output_encoding) comments.append( 'Commit ID {0} was checked out at {1}'.format( checkout_commit_id, target ) ) try: new_rev = __salt__['git.revision']( cwd=target, user=user, password=password, ignore_retcode=True, output_encoding=output_encoding) except CommandExecutionError: new_rev = None if submodules: __salt__['git.submodule'](target, 'update', opts=['--init', '--recursive'], user=user, password=password, identity=identity, output_encoding=output_encoding) comments.append( 'Submodules were updated' ) if new_rev is not None: ret['changes']['HEAD'] = {'old': local_commit_id, 'new': new_rev} else: comments.append("Already checked out at correct revision") msg = _format_comments(comments) log.info(msg) ret['comment'] = msg return ret def cloned(name, target=None, branch=None, user=None, password=None, identity=None, https_user=None, https_pass=None, output_encoding=None): ''' .. versionadded:: 2018.3.3,2019.2.0 Ensure that a repository has been cloned to the specified target directory. If not, clone that repository. No fetches will be performed once cloned. name Address of the remote repository target Name of the target directory where repository should be cloned branch Remote branch to check out. If unspecified, the default branch (i.e. the one to the remote HEAD points) will be checked out. .. note:: The local branch name will match the remote branch name. If the branch name is changed, then that branch will be checked out locally, but keep in mind that remote repository will not be fetched. If your use case requires that you keep the clone up to date with the remote repository, then consider using :py:func:`git.latest <salt.states.git.latest>`. user User under which to run git commands. By default, commands are run by the user under which the minion is running. password Windows only. Required when specifying ``user``. This parameter will be ignored on non-Windows platforms. identity Path to a private key to use for ssh URLs. Works the same way as in :py:func:`git.latest <salt.states.git.latest>`, see that state's documentation for more information. https_user HTTP Basic Auth username for HTTPS (only) clones https_pass HTTP Basic Auth password for HTTPS (only) clones output_encoding Use this option to specify which encoding to use to decode the output from any git commands which are run. This should not be needed in most cases. .. note:: This should only be needed if the files in the repository were created with filenames using an encoding other than UTF-8 to handle Unicode characters. ''' ret = {'name': name, 'result': False, 'comment': '', 'changes': {}} if target is None: ret['comment'] = '\'target\' argument is required' return ret elif not isinstance(target, six.string_types): target = six.text_type(target) if not os.path.isabs(target): ret['comment'] = '\'target\' path must be absolute' return ret if branch is not None: if not isinstance(branch, six.string_types): branch = six.text_type(branch) if not branch: ret['comment'] = 'Invalid \'branch\' argument' return ret if not os.path.exists(target): need_clone = True else: try: __salt__['git.status'](target, user=user, password=password, output_encoding=output_encoding) except Exception as exc: ret['comment'] = six.text_type(exc) return ret else: need_clone = False comments = [] def _clone_changes(ret): ret['changes']['new'] = name + ' => ' + target def _branch_changes(ret, old, new): ret['changes']['branch'] = {'old': old, 'new': new} if need_clone: if __opts__['test']: _clone_changes(ret) comment = '{0} would be cloned to {1}{2}'.format( name, target, ' with branch \'{0}\''.format(branch) if branch is not None else '' ) return _neutral_test(ret, comment) clone_opts = ['--branch', branch] if branch is not None else None try: __salt__['git.clone'](target, name, opts=clone_opts, user=user, password=password, identity=identity, https_user=https_user, https_pass=https_pass, output_encoding=output_encoding) except CommandExecutionError as exc: msg = 'Clone failed: {0}'.format(_strip_exc(exc)) return _fail(ret, msg, comments) comments.append( '{0} cloned to {1}{2}'.format( name, target, ' with branch \'{0}\''.format(branch) if branch is not None else '' ) ) _clone_changes(ret) ret['comment'] = _format_comments(comments) ret['result'] = True return ret else: if branch is None: return _already_cloned(ret, target, branch, comments) else: current_branch = __salt__['git.current_branch']( target, user=user, password=password, output_encoding=output_encoding) if current_branch == branch: return _already_cloned(ret, target, branch, comments) else: if __opts__['test']: _branch_changes(ret, current_branch, branch) return _neutral_test( ret, 'Branch would be changed to \'{0}\''.format(branch)) try: __salt__['git.rev_parse']( target, rev=branch, user=user, password=password, ignore_retcode=True, output_encoding=output_encoding) except CommandExecutionError: # Local head does not exist, so we need to check out a new # branch at the remote rev checkout_rev = '/'.join(('origin', branch)) checkout_opts = ['-b', branch] else: # Local head exists, so we just need to check it out checkout_rev = branch checkout_opts = None try: __salt__['git.checkout']( target, rev=checkout_rev, opts=checkout_opts, user=user, password=password, output_encoding=output_encoding) except CommandExecutionError as exc: msg = 'Failed to change branch to \'{0}\': {1}'.format(branch, exc) return _fail(ret, msg, comments) else: comments.append('Branch changed to \'{0}\''.format(branch)) _branch_changes(ret, current_branch, branch) ret['comment'] = _format_comments(comments) ret['result'] = True return ret def config_unset(name, value_regex=None, repo=None, user=None, password=None, output_encoding=None, **kwargs): r''' .. versionadded:: 2015.8.0 Ensure that the named config key is not present name The name of the configuration key to unset. This value can be a regex, but the regex must match the entire key name. For example, ``foo\.`` would not match all keys in the ``foo`` section, it would be necessary to use ``foo\..+`` to do so. value_regex Regex indicating the values to unset for the matching key(s) .. note:: This option behaves differently depending on whether or not ``all`` is set to ``True``. If it is, then all values matching the regex will be deleted (this is the only way to delete multiple values from a multivar). If ``all`` is set to ``False``, then this state will fail if the regex matches more than one value in a multivar. all : False If ``True``, unset all matches repo Location of the git repository for which the config value should be set. Required unless ``global`` is set to ``True``. user User under which to run git commands. By default, commands are run by the user under which the minion is running. password Windows only. Required when specifying ``user``. This parameter will be ignored on non-Windows platforms. .. versionadded:: 2016.3.4 global : False If ``True``, this will set a global git config option output_encoding Use this option to specify which encoding to use to decode the output from any git commands which are run. This should not be needed in most cases. .. note:: This should only be needed if the files in the repository were created with filenames using an encoding other than UTF-8 to handle Unicode characters. .. versionadded:: 2018.3.1 **Examples:** .. code-block:: yaml # Value matching 'baz' mylocalrepo: git.config_unset: - name: foo.bar - value_regex: 'baz' - repo: /path/to/repo # Ensure entire multivar is unset mylocalrepo: git.config_unset: - name: foo.bar - all: True # Ensure all variables in 'foo' section are unset, including multivars mylocalrepo: git.config_unset: - name: 'foo\..+' - all: True # Ensure that global config value is unset mylocalrepo: git.config_unset: - name: foo.bar - global: True ''' ret = {'name': name, 'changes': {}, 'result': True, 'comment': 'No matching keys are set'} # Sanitize kwargs and make sure that no invalid ones were passed. This # allows us to accept 'global' as an argument to this function without # shadowing global(), while also not allowing unwanted arguments to be # passed. kwargs = salt.utils.args.clean_kwargs(**kwargs) global_ = kwargs.pop('global', False) all_ = kwargs.pop('all', False) if kwargs: return _fail( ret, salt.utils.args.invalid_kwargs(kwargs, raise_exc=False) ) if not global_ and not repo: return _fail( ret, 'Non-global config options require the \'repo\' argument to be ' 'set' ) if not isinstance(name, six.string_types): name = six.text_type(name) if value_regex is not None: if not isinstance(value_regex, six.string_types): value_regex = six.text_type(value_regex) # Ensure that the key regex matches the full key name key = '^' + name.lstrip('^').rstrip('$') + '$' # Get matching keys/values pre_matches = __salt__['git.config_get_regexp']( cwd=repo, key=key, value_regex=value_regex, user=user, password=password, ignore_retcode=True, output_encoding=output_encoding, **{'global': global_} ) if not pre_matches: # No changes need to be made return ret # Perform sanity check on the matches. We can't proceed if the value_regex # matches more than one value in a given key, and 'all' is not set to True if not all_: greedy_matches = ['{0} ({1})'.format(x, ', '.join(y)) for x, y in six.iteritems(pre_matches) if len(y) > 1] if greedy_matches: if value_regex is not None: return _fail( ret, 'Multiple values are matched by value_regex for the ' 'following keys (set \'all\' to True to force removal): ' '{0}'.format('; '.join(greedy_matches)) ) else: return _fail( ret, 'Multivar(s) matched by the key expression (set \'all\' ' 'to True to force removal): {0}'.format( '; '.join(greedy_matches) ) ) if __opts__['test']: ret['changes'] = pre_matches return _neutral_test( ret, '{0} key(s) would have value(s) unset'.format(len(pre_matches)) ) if value_regex is None: pre = pre_matches else: # Get all keys matching the key expression, so we can accurately report # on changes made. pre = __salt__['git.config_get_regexp']( cwd=repo, key=key, value_regex=None, user=user, password=password, ignore_retcode=True, output_encoding=output_encoding, **{'global': global_} ) failed = [] # Unset the specified value(s). There is no unset for regexes so loop # through the pre_matches dict and unset each matching key individually. for key_name in pre_matches: try: __salt__['git.config_unset']( cwd=repo, key=name, value_regex=value_regex, all=all_, user=user, password=password, output_encoding=output_encoding, **{'global': global_} ) except CommandExecutionError as exc: msg = 'Failed to unset \'{0}\''.format(key_name) if value_regex is not None: msg += ' using value_regex \'{1}\'' msg += ': ' + _strip_exc(exc) log.error(msg) failed.append(key_name) if failed: return _fail( ret, 'Error(s) occurred unsetting values for the following keys (see ' 'the minion log for details): {0}'.format(', '.join(failed)) ) post = __salt__['git.config_get_regexp']( cwd=repo, key=key, value_regex=None, user=user, password=password, ignore_retcode=True, output_encoding=output_encoding, **{'global': global_} ) for key_name in pre: if key_name not in post: ret['changes'][key_name] = pre[key_name] unset = [x for x in pre[key_name] if x not in post[key_name]] if unset: ret['changes'][key_name] = unset if value_regex is None: post_matches = post else: post_matches = __salt__['git.config_get_regexp']( cwd=repo, key=key, value_regex=value_regex, user=user, password=password, ignore_retcode=True, output_encoding=output_encoding, **{'global': global_} ) if post_matches: failed = ['{0} ({1})'.format(x, ', '.join(y)) for x, y in six.iteritems(post_matches)] return _fail( ret, 'Failed to unset value(s): {0}'.format('; '.join(failed)) ) ret['comment'] = 'Value(s) successfully unset' return ret def config_set(name, value=None, multivar=None, repo=None, user=None, password=None, output_encoding=None, **kwargs): ''' .. versionadded:: 2014.7.0 .. versionchanged:: 2015.8.0 Renamed from ``git.config`` to ``git.config_set``. For earlier versions, use ``git.config``. Ensure that a config value is set to the desired value(s) name Name of the git config value to set value Set a single value for the config item multivar Set multiple values for the config item .. note:: The order matters here, if the same parameters are set but in a different order, they will be removed and replaced in the order specified. .. versionadded:: 2015.8.0 repo Location of the git repository for which the config value should be set. Required unless ``global`` is set to ``True``. user User under which to run git commands. By default, the commands are run by the user under which the minion is running. password Windows only. Required when specifying ``user``. This parameter will be ignored on non-Windows platforms. .. versionadded:: 2016.3.4 global : False If ``True``, this will set a global git config option output_encoding Use this option to specify which encoding to use to decode the output from any git commands which are run. This should not be needed in most cases. .. note:: This should only be needed if the files in the repository were created with filenames using an encoding other than UTF-8 to handle Unicode characters. .. versionadded:: 2018.3.1 **Local Config Example:** .. code-block:: yaml # Single value mylocalrepo: git.config_set: - name: user.email - value: foo@bar.net - repo: /path/to/repo # Multiple values mylocalrepo: git.config_set: - name: mysection.myattribute - multivar: - foo - bar - baz - repo: /path/to/repo **Global Config Example (User ``foo``):** .. code-block:: yaml mylocalrepo: git.config_set: - name: user.name - value: Foo Bar - user: foo - global: True ''' ret = {'name': name, 'changes': {}, 'result': True, 'comment': ''} if value is not None and multivar is not None: return _fail( ret, 'Only one of \'value\' and \'multivar\' is permitted' ) # Sanitize kwargs and make sure that no invalid ones were passed. This # allows us to accept 'global' as an argument to this function without # shadowing global(), while also not allowing unwanted arguments to be # passed. kwargs = salt.utils.args.clean_kwargs(**kwargs) global_ = kwargs.pop('global', False) if kwargs: return _fail( ret, salt.utils.args.invalid_kwargs(kwargs, raise_exc=False) ) if not global_ and not repo: return _fail( ret, 'Non-global config options require the \'repo\' argument to be ' 'set' ) if not isinstance(name, six.string_types): name = six.text_type(name) if value is not None: if not isinstance(value, six.string_types): value = six.text_type(value) value_comment = '\'' + value + '\'' desired = [value] if multivar is not None: if not isinstance(multivar, list): try: multivar = multivar.split(',') except AttributeError: multivar = six.text_type(multivar).split(',') else: new_multivar = [] for item in multivar: if isinstance(item, six.string_types): new_multivar.append(item) else: new_multivar.append(six.text_type(item)) multivar = new_multivar value_comment = multivar desired = multivar # Get current value pre = __salt__['git.config_get']( cwd=repo, key=name, user=user, password=password, ignore_retcode=True, output_encoding=output_encoding, **{'all': True, 'global': global_} ) if desired == pre: ret['comment'] = '{0}\'{1}\' is already set to {2}'.format( 'Global key ' if global_ else '', name, value_comment ) return ret if __opts__['test']: ret['changes'] = {'old': pre, 'new': desired} msg = '{0}\'{1}\' would be {2} {3}'.format( 'Global key ' if global_ else '', name, 'added as' if pre is None else 'set to', value_comment ) return _neutral_test(ret, msg) try: # Set/update config value post = __salt__['git.config_set']( cwd=repo, key=name, value=value, multivar=multivar, user=user, password=password, output_encoding=output_encoding, **{'global': global_} ) except CommandExecutionError as exc: return _fail( ret, 'Failed to set {0}\'{1}\' to {2}: {3}'.format( 'global key ' if global_ else '', name, value_comment, _strip_exc(exc) ) ) if pre != post: ret['changes'][name] = {'old': pre, 'new': post} if post != desired: return _fail( ret, 'Failed to set {0}\'{1}\' to {2}'.format( 'global key ' if global_ else '', name, value_comment ) ) ret['comment'] = '{0}\'{1}\' was {2} {3}'.format( 'Global key ' if global_ else '', name, 'added as' if pre is None else 'set to', value_comment ) return ret def mod_run_check(cmd_kwargs, onlyif, unless): ''' Execute the onlyif and unless logic. Return a result dict if: * onlyif failed (onlyif != 0) * unless succeeded (unless == 0) Otherwise, returns ``True`` ''' cmd_kwargs = copy.deepcopy(cmd_kwargs) cmd_kwargs.update({ 'use_vt': False, 'bg': False, 'ignore_retcode': True, 'python_shell': True, }) if onlyif is not None: if not isinstance(onlyif, list): onlyif = [onlyif] for command in onlyif: if not isinstance(command, six.string_types) and command: # Boolean or some other non-string which resolves to True continue try: if __salt__['cmd.retcode'](command, **cmd_kwargs) == 0: # Command exited with a zero retcode continue except Exception as exc: log.exception( 'The following onlyif command raised an error: %s', command ) return { 'comment': 'onlyif raised error ({0}), see log for ' 'more details'.format(exc), 'result': False } return {'comment': 'onlyif condition is false', 'skip_watch': True, 'result': True} if unless is not None: if not isinstance(unless, list): unless = [unless] for command in unless: if not isinstance(command, six.string_types) and not command: # Boolean or some other non-string which resolves to False break try: if __salt__['cmd.retcode'](command, **cmd_kwargs) != 0: # Command exited with a non-zero retcode break except Exception as exc: log.exception( 'The following unless command raised an error: %s', command ) return { 'comment': 'unless raised error ({0}), see log for ' 'more details'.format(exc), 'result': False } else: return {'comment': 'unless condition is true', 'skip_watch': True, 'result': True} return True
saltstack/salt
salt/states/git.py
present
python
def present(name, force=False, bare=True, template=None, separate_git_dir=None, shared=None, user=None, password=None, output_encoding=None): ''' Ensure that a repository exists in the given directory .. warning:: If the minion has Git 2.5 or later installed, ``name`` points to a worktree_, and ``force`` is set to ``True``, then the worktree will be deleted. This has been corrected in Salt 2015.8.0. name Path to the directory .. versionchanged:: 2015.8.0 This path must now be absolute force : False If ``True``, and if ``name`` points to an existing directory which does not contain a git repository, then the contents of that directory will be recursively removed and a new repository will be initialized in its place. bare : True If ``True``, and a repository must be initialized, then the repository will be a bare repository. .. note:: This differs from the default behavior of :py:func:`git.init <salt.modules.git.init>`, make sure to set this value to ``False`` if a bare repo is not desired. template If a new repository is initialized, this argument will specify an alternate template directory. .. versionadded:: 2015.8.0 separate_git_dir If a new repository is initialized, this argument will specify an alternate ``$GIT_DIR`` .. versionadded:: 2015.8.0 shared Set sharing permissions on git repo. See `git-init(1)`_ for more details. .. versionadded:: 2015.5.0 user User under which to run git commands. By default, commands are run by the user under which the minion is running. .. versionadded:: 0.17.0 password Windows only. Required when specifying ``user``. This parameter will be ignored on non-Windows platforms. .. versionadded:: 2016.3.4 output_encoding Use this option to specify which encoding to use to decode the output from any git commands which are run. This should not be needed in most cases. .. note:: This should only be needed if the files in the repository were created with filenames using an encoding other than UTF-8 to handle Unicode characters. .. versionadded:: 2018.3.1 .. _`git-init(1)`: http://git-scm.com/docs/git-init .. _`worktree`: http://git-scm.com/docs/git-worktree ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} # If the named directory is a git repo return True if os.path.isdir(name): if bare and os.path.isfile(os.path.join(name, 'HEAD')): return ret elif not bare and \ (os.path.isdir(os.path.join(name, '.git')) or __salt__['git.is_worktree'](name, user=user, password=password, output_encoding=output_encoding)): return ret # Directory exists and is not a git repo, if force is set destroy the # directory and recreate, otherwise throw an error elif force: # Directory exists, and the ``force`` option is enabled, so we need # to clear out its contents to proceed. if __opts__['test']: ret['changes']['new'] = name ret['changes']['forced init'] = True return _neutral_test( ret, 'Target directory {0} exists. Since force=True, the ' 'contents of {0} would be deleted, and a {1}repository ' 'would be initialized in its place.' .format(name, 'bare ' if bare else '') ) log.debug( 'Removing contents of %s to initialize %srepository in its ' 'place (force=True set in git.present state)', name, 'bare ' if bare else '' ) try: if os.path.islink(name): os.unlink(name) else: salt.utils.files.rm_rf(name) except OSError as exc: return _fail( ret, 'Unable to remove {0}: {1}'.format(name, exc) ) else: ret['changes']['forced init'] = True elif os.listdir(name): return _fail( ret, 'Target \'{0}\' exists, is non-empty, and is not a git ' 'repository. Set the \'force\' option to True to remove ' 'this directory\'s contents and proceed with initializing a ' 'repository'.format(name) ) # Run test is set if __opts__['test']: ret['changes']['new'] = name return _neutral_test( ret, 'New {0}repository would be created'.format( 'bare ' if bare else '' ) ) __salt__['git.init'](cwd=name, bare=bare, template=template, separate_git_dir=separate_git_dir, shared=shared, user=user, password=password, output_encoding=output_encoding) actions = [ 'Initialized {0}repository in {1}'.format( 'bare ' if bare else '', name ) ] if template: actions.append('Template directory set to {0}'.format(template)) if separate_git_dir: actions.append('Gitdir set to {0}'.format(separate_git_dir)) message = '. '.join(actions) if len(actions) > 1: message += '.' log.info(message) ret['changes']['new'] = name ret['comment'] = message return ret
Ensure that a repository exists in the given directory .. warning:: If the minion has Git 2.5 or later installed, ``name`` points to a worktree_, and ``force`` is set to ``True``, then the worktree will be deleted. This has been corrected in Salt 2015.8.0. name Path to the directory .. versionchanged:: 2015.8.0 This path must now be absolute force : False If ``True``, and if ``name`` points to an existing directory which does not contain a git repository, then the contents of that directory will be recursively removed and a new repository will be initialized in its place. bare : True If ``True``, and a repository must be initialized, then the repository will be a bare repository. .. note:: This differs from the default behavior of :py:func:`git.init <salt.modules.git.init>`, make sure to set this value to ``False`` if a bare repo is not desired. template If a new repository is initialized, this argument will specify an alternate template directory. .. versionadded:: 2015.8.0 separate_git_dir If a new repository is initialized, this argument will specify an alternate ``$GIT_DIR`` .. versionadded:: 2015.8.0 shared Set sharing permissions on git repo. See `git-init(1)`_ for more details. .. versionadded:: 2015.5.0 user User under which to run git commands. By default, commands are run by the user under which the minion is running. .. versionadded:: 0.17.0 password Windows only. Required when specifying ``user``. This parameter will be ignored on non-Windows platforms. .. versionadded:: 2016.3.4 output_encoding Use this option to specify which encoding to use to decode the output from any git commands which are run. This should not be needed in most cases. .. note:: This should only be needed if the files in the repository were created with filenames using an encoding other than UTF-8 to handle Unicode characters. .. versionadded:: 2018.3.1 .. _`git-init(1)`: http://git-scm.com/docs/git-init .. _`worktree`: http://git-scm.com/docs/git-worktree
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/git.py#L2063-L2235
[ "def _neutral_test(ret, comment):\n ret['result'] = None\n ret['comment'] = comment\n return ret\n" ]
# -*- coding: utf-8 -*- ''' States to manage git repositories and git configuration .. important:: Before using git over ssh, make sure your remote host fingerprint exists in your ``~/.ssh/known_hosts`` file. .. versionchanged:: 2015.8.8 This state module now requires git 1.6.5 (released 10 October 2009) or newer. ''' from __future__ import absolute_import, print_function, unicode_literals # Import python libs import copy import errno import logging import os import re import string # Import salt libs import salt.utils.args import salt.utils.files import salt.utils.url import salt.utils.versions from salt.exceptions import CommandExecutionError from salt.utils.versions import LooseVersion as _LooseVersion # Import 3rd-party libs from salt.ext import six log = logging.getLogger(__name__) def __virtual__(): ''' Only load if git is available ''' if 'git.version' not in __salt__: return False git_ver = _LooseVersion(__salt__['git.version'](versioninfo=False)) return git_ver >= _LooseVersion('1.6.5') def _revs_equal(rev1, rev2, rev_type): ''' Shorthand helper function for comparing SHA1s. If rev_type == 'sha1' then the comparison will be done using str.startwith() to allow short SHA1s to compare successfully. NOTE: This means that rev2 must be the short rev. ''' if (rev1 is None and rev2 is not None) \ or (rev2 is None and rev1 is not None): return False elif rev1 is rev2 is None: return True elif rev_type == 'sha1': return rev1.startswith(rev2) else: return rev1 == rev2 def _short_sha(sha1): return sha1[:7] if sha1 is not None else None def _format_comments(comments): ''' Return a joined list ''' ret = '. '.join(comments) if len(comments) > 1: ret += '.' return ret def _need_branch_change(branch, local_branch): ''' Short hand for telling when a new branch is needed ''' return branch is not None and branch != local_branch def _get_branch_opts(branch, local_branch, all_local_branches, desired_upstream, git_ver=None): ''' DRY helper to build list of opts for git.branch, for the purposes of setting upstream tracking branch ''' if branch is not None and branch not in all_local_branches: # We won't be setting upstream because the act of checking out a new # branch will set upstream for us return None if git_ver is None: git_ver = _LooseVersion(__salt__['git.version'](versioninfo=False)) ret = [] if git_ver >= _LooseVersion('1.8.0'): ret.extend(['--set-upstream-to', desired_upstream]) else: ret.append('--set-upstream') # --set-upstream does not assume the current branch, so we have to # tell it which branch we'll be using ret.append(local_branch if branch is None else branch) ret.append(desired_upstream) return ret def _get_local_rev_and_branch(target, user, password, output_encoding=None): ''' Return the local revision for before/after comparisons ''' log.info('Checking local revision for %s', target) try: local_rev = __salt__['git.revision']( target, user=user, password=password, ignore_retcode=True, output_encoding=output_encoding) except CommandExecutionError: log.info('No local revision for %s', target) local_rev = None log.info('Checking local branch for %s', target) try: local_branch = __salt__['git.current_branch']( target, user=user, password=password, ignore_retcode=True, output_encoding=output_encoding) except CommandExecutionError: log.info('No local branch for %s', target) local_branch = None return local_rev, local_branch def _strip_exc(exc): ''' Strip the actual command that was run from exc.strerror to leave just the error message ''' return re.sub(r'^Command [\'"].+[\'"] failed: ', '', exc.strerror) def _uptodate(ret, target, comments=None, local_changes=False): ret['comment'] = 'Repository {0} is up-to-date'.format(target) if local_changes: ret['comment'] += ( ', but with uncommitted changes. Set \'force_reset\' to True to ' 'purge uncommitted changes.' ) if comments: # Shouldn't be making any changes if the repo was up to date, but # report on them so we are alerted to potential problems with our # logic. ret['comment'] += ( '\n\nChanges {0}made: {1}'.format( 'that would be ' if __opts__['test'] else '', _format_comments(comments) ) ) return ret def _neutral_test(ret, comment): ret['result'] = None ret['comment'] = comment return ret def _fail(ret, msg, comments=None): ret['result'] = False if comments: msg += '\n\nChanges already made: ' + _format_comments(comments) ret['comment'] = msg return ret def _already_cloned(ret, target, branch=None, comments=None): ret['result'] = True ret['comment'] = 'Repository already exists at {0}{1}'.format( target, ' and is checked out to branch \'{0}\''.format(branch) if branch else '' ) if comments: ret['comment'] += ( '\n\nChanges {0}made: {1}'.format( 'that would be ' if __opts__['test'] else '', _format_comments(comments) ) ) return ret def _failed_fetch(ret, exc, comments=None): msg = ( 'Fetch failed. Set \'force_fetch\' to True to force the fetch if the ' 'failure was due to not being able to fast-forward. Output of the fetch ' 'command follows:\n\n{0}'.format(_strip_exc(exc)) ) return _fail(ret, msg, comments) def _failed_submodule_update(ret, exc, comments=None): msg = 'Failed to update submodules: ' + _strip_exc(exc) return _fail(ret, msg, comments) def _not_fast_forward(ret, rev, pre, post, branch, local_branch, default_branch, local_changes, comments): branch_msg = '' if branch is None: if rev != 'HEAD': if local_branch != rev: branch_msg = ( ' The desired rev ({0}) differs from the name of the ' 'local branch ({1}), if the desired rev is a branch name ' 'then a forced update could possibly be avoided by ' 'setting the \'branch\' argument to \'{0}\' instead.' .format(rev, local_branch) ) else: if default_branch is not None and local_branch != default_branch: branch_msg = ( ' The default remote branch ({0}) differs from the ' 'local branch ({1}). This could be caused by changing the ' 'default remote branch, or if the local branch was ' 'manually changed. Rather than forcing an update, it ' 'may be advisable to set the \'branch\' argument to ' '\'{0}\' instead. To ensure that this state follows the ' '\'{0}\' branch instead of the remote HEAD, set the ' '\'rev\' argument to \'{0}\'.' .format(default_branch, local_branch) ) pre = _short_sha(pre) post = _short_sha(post) return _fail( ret, 'Repository would be updated {0}{1}, but {2}. Set \'force_reset\' to ' 'True{3} to force this update{4}.{5}'.format( 'from {0} to {1}'.format(pre, post) if local_changes and pre != post else 'to {0}'.format(post), ' (after checking out local branch \'{0}\')'.format(branch) if _need_branch_change(branch, local_branch) else '', 'this is not a fast-forward merge' if not local_changes else 'there are uncommitted changes', ' (or \'remote-changes\')' if local_changes else '', ' and discard these changes' if local_changes else '', branch_msg, ), comments ) def latest(name, rev='HEAD', target=None, branch=None, user=None, password=None, update_head=True, force_checkout=False, force_clone=False, force_fetch=False, force_reset=False, submodules=False, bare=False, mirror=False, remote='origin', fetch_tags=True, sync_tags=True, depth=None, identity=None, https_user=None, https_pass=None, onlyif=None, unless=None, refspec_branch='*', refspec_tag='*', output_encoding=None, **kwargs): ''' Make sure the repository is cloned to the given directory and is up-to-date. name Address of the remote repository, as passed to ``git clone`` .. note:: From the `Git documentation`_, there are two URL formats supported for SSH authentication. The below two examples are equivalent: .. code-block:: text # ssh:// URL ssh://user@server/project.git # SCP-like syntax user@server:project.git A common mistake is to use an ``ssh://`` URL, but with a colon after the domain instead of a slash. This is invalid syntax in Git, and will therefore not work in Salt. When in doubt, confirm that a ``git clone`` works for the URL before using it in Salt. It has been reported by some users that SCP-like syntax is incompatible with git repos hosted on `Atlassian Stash/BitBucket Server`_. In these cases, it may be necessary to use ``ssh://`` URLs for SSH authentication. .. _`Git documentation`: https://git-scm.com/book/en/v2/Git-on-the-Server-The-Protocols#The-SSH-Protocol .. _`Atlassian Stash/BitBucket Server`: https://www.atlassian.com/software/bitbucket/server rev : HEAD The remote branch, tag, or revision ID to checkout after clone / before update. If specified, then Salt will also ensure that the tracking branch is set to ``<remote>/<rev>``, unless ``rev`` refers to a tag or SHA1, in which case Salt will ensure that the tracking branch is unset. If ``rev`` is not specified, it will be assumed to be ``HEAD``, and Salt will not manage the tracking branch at all. .. versionchanged:: 2015.8.0 If not specified, ``rev`` now defaults to the remote repository's HEAD. target Name of the target directory where repository is about to be cloned branch Name of the local branch into which to checkout the specified rev. If not specified, then Salt will not care what branch is being used locally and will just use whatever branch is currently there. .. versionadded:: 2015.8.0 .. note:: If this argument is not specified, this means that Salt will not change the local branch if the repository is reset to another branch/tag/SHA1. For example, assume that the following state was run initially: .. code-block:: yaml foo_app: git.latest: - name: https://mydomain.tld/apps/foo.git - target: /var/www/foo - user: www This would have cloned the HEAD of that repo (since a ``rev`` wasn't specified), and because ``branch`` is not specified, the branch in the local clone at ``/var/www/foo`` would be whatever the default branch is on the remote repository (usually ``master``, but not always). Now, assume that it becomes necessary to switch this checkout to the ``dev`` branch. This would require ``rev`` to be set, and probably would also require ``force_reset`` to be enabled: .. code-block:: yaml foo_app: git.latest: - name: https://mydomain.tld/apps/foo.git - target: /var/www/foo - user: www - rev: dev - force_reset: True The result of this state would be to perform a hard-reset to ``origin/dev``. Since ``branch`` was not specified though, while ``/var/www/foo`` would reflect the contents of the remote repo's ``dev`` branch, the local branch would still remain whatever it was when it was cloned. To make the local branch match the remote one, set ``branch`` as well, like so: .. code-block:: yaml foo_app: git.latest: - name: https://mydomain.tld/apps/foo.git - target: /var/www/foo - user: www - rev: dev - branch: dev - force_reset: True This may seem redundant, but Salt tries to support a wide variety of use cases, and doing it this way allows for the use case where the local branch doesn't need to be strictly managed. user Local system user under which to run git commands. By default, commands are run by the user under which the minion is running. .. note:: This is not to be confused with the username for http(s)/SSH authentication. .. versionadded:: 0.17.0 password Windows only. Required when specifying ``user``. This parameter will be ignored on non-Windows platforms. .. versionadded:: 2016.3.4 update_head : True If set to ``False``, then the remote repository will be fetched (if necessary) to ensure that the commit to which ``rev`` points exists in the local checkout, but no changes will be made to the local HEAD. .. versionadded:: 2015.8.3 force_checkout : False When checking out the local branch, the state will fail if there are unwritten changes. Set this argument to ``True`` to discard unwritten changes when checking out. force_clone : False If the ``target`` directory exists and is not a git repository, then this state will fail. Set this argument to ``True`` to remove the contents of the target directory and clone the repo into it. force_fetch : False If a fetch needs to be performed, non-fast-forward fetches will cause this state to fail. Set this argument to ``True`` to force the fetch even if it is a non-fast-forward update. .. versionadded:: 2015.8.0 force_reset : False If the update is not a fast-forward, this state will fail. Set this argument to ``True`` to force a hard-reset to the remote revision in these cases. .. versionchanged:: 2019.2.0 This option can now be set to ``remote-changes``, which will instruct Salt not to discard local changes if the repo is up-to-date with the remote repository. submodules : False Update submodules on clone or branch change bare : False Set to ``True`` if the repository is to be a bare clone of the remote repository. .. note: Setting this option to ``True`` is incompatible with the ``rev`` argument. mirror Set to ``True`` if the repository is to be a mirror of the remote repository. This implies that ``bare`` set to ``True``, and thus is incompatible with ``rev``. remote : origin Git remote to use. If this state needs to clone the repo, it will clone it using this value as the initial remote name. If the repository already exists, and a remote by this name is not present, one will be added. fetch_tags : True If ``True``, then when a fetch is performed all tags will be fetched, even those which are not reachable by any branch on the remote. sync_tags : True If ``True``, then Salt will delete tags which exist in the local clone but are not found on the remote repository. .. versionadded:: 2018.3.4 depth Defines depth in history when git a clone is needed in order to ensure latest. E.g. ``depth: 1`` is useful when deploying from a repository with a long history. Use rev to specify branch or tag. This is not compatible with revision IDs. .. versionchanged:: 2019.2.0 This option now supports tags as well as branches, on Git 1.8.0 and newer. identity Path to a private key to use for ssh URLs. This can be either a single string, or a list of strings. For example: .. code-block:: yaml # Single key git@github.com:user/repo.git: git.latest: - user: deployer - identity: /home/deployer/.ssh/id_rsa # Two keys git@github.com:user/repo.git: git.latest: - user: deployer - identity: - /home/deployer/.ssh/id_rsa - /home/deployer/.ssh/id_rsa_alternate If multiple keys are specified, they will be tried one-by-one in order for each git command which needs to authenticate. .. warning:: Unless Salt is invoked from the minion using ``salt-call``, the key(s) must be passphraseless. For greater security with passphraseless private keys, see the `sshd(8)`_ manpage for information on securing the keypair from the remote side in the ``authorized_keys`` file. .. _`sshd(8)`: http://www.man7.org/linux/man-pages/man8/sshd.8.html#AUTHORIZED_KEYS_FILE%20FORMAT .. versionchanged:: 2015.8.7 Salt will no longer attempt to use passphrase-protected keys unless invoked from the minion using ``salt-call``, to prevent blocking waiting for user input. .. versionchanged:: 2016.3.0 Key can now be specified as a SaltStack fileserver URL (e.g. ``salt://path/to/identity_file``). https_user HTTP Basic Auth username for HTTPS (only) clones .. versionadded:: 2015.5.0 https_pass HTTP Basic Auth password for HTTPS (only) clones .. versionadded:: 2015.5.0 onlyif A command to run as a check, run the named command only if the command passed to the ``onlyif`` option returns true unless A command to run as a check, only run the named command if the command passed to the ``unless`` option returns false refspec_branch : * A glob expression defining which branches to retrieve when fetching. See `git-fetch(1)`_ for more information on how refspecs work. .. versionadded:: 2017.7.0 refspec_tag : * A glob expression defining which tags to retrieve when fetching. See `git-fetch(1)`_ for more information on how refspecs work. .. versionadded:: 2017.7.0 output_encoding Use this option to specify which encoding to use to decode the output from any git commands which are run. This should not be needed in most cases. .. note:: This should only be needed if the files in the repository were created with filenames using an encoding other than UTF-8 to handle Unicode characters. .. versionadded:: 2018.3.1 .. _`git-fetch(1)`: http://git-scm.com/docs/git-fetch .. note:: Clashing ID declarations can be avoided when including different branches from the same git repository in the same SLS file by using the ``name`` argument. The example below checks out the ``gh-pages`` and ``gh-pages-prod`` branches from the same repository into separate directories. The example also sets up the ``ssh_known_hosts`` ssh key required to perform the git checkout. Also, it has been reported that the SCP-like syntax for .. code-block:: yaml gitlab.example.com: ssh_known_hosts: - present - user: root - enc: ecdsa - fingerprint: 4e:94:b0:54:c1:5b:29:a2:70:0e:e1:a3:51:ee:ee:e3 git-website-staging: git.latest: - name: git@gitlab.example.com:user/website.git - rev: gh-pages - target: /usr/share/nginx/staging - identity: /root/.ssh/website_id_rsa - require: - pkg: git - ssh_known_hosts: gitlab.example.com git-website-staging: git.latest: - name: git@gitlab.example.com:user/website.git - rev: gh-pages - target: /usr/share/nginx/staging - identity: salt://website/id_rsa - require: - pkg: git - ssh_known_hosts: gitlab.example.com git-website-prod: git.latest: - name: git@gitlab.example.com:user/website.git - rev: gh-pages-prod - target: /usr/share/nginx/prod - identity: /root/.ssh/website_id_rsa - require: - pkg: git - ssh_known_hosts: gitlab.example.com ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} kwargs = salt.utils.args.clean_kwargs(**kwargs) if kwargs: return _fail( ret, salt.utils.args.invalid_kwargs(kwargs, raise_exc=False) ) if not remote: return _fail(ret, '\'remote\' argument is required') if not target: return _fail(ret, '\'target\' argument is required') if not rev: return _fail( ret, '\'{0}\' is not a valid value for the \'rev\' argument'.format(rev) ) if force_reset not in (True, False, 'remote-changes'): return _fail( ret, '\'force_reset\' must be one of True, False, or \'remote-changes\'' ) # Ensure that certain arguments are strings to ensure that comparisons work if not isinstance(rev, six.string_types): rev = six.text_type(rev) if target is not None: if not isinstance(target, six.string_types): target = six.text_type(target) if not os.path.isabs(target): return _fail( ret, 'target \'{0}\' is not an absolute path'.format(target) ) if branch is not None and not isinstance(branch, six.string_types): branch = six.text_type(branch) if user is not None and not isinstance(user, six.string_types): user = six.text_type(user) if password is not None and not isinstance(password, six.string_types): password = six.text_type(password) if remote is not None and not isinstance(remote, six.string_types): remote = six.text_type(remote) if identity is not None: if isinstance(identity, six.string_types): identity = [identity] elif not isinstance(identity, list): return _fail(ret, 'identity must be either a list or a string') identity = [os.path.expanduser(x) for x in identity] for ident_path in identity: if 'salt://' in ident_path: try: ident_path = __salt__['cp.cache_file'](ident_path, __env__) except IOError as exc: log.exception('Failed to cache %s', ident_path) return _fail( ret, 'identity \'{0}\' does not exist.'.format( ident_path ) ) if not os.path.isabs(ident_path): return _fail( ret, 'identity \'{0}\' is not an absolute path'.format( ident_path ) ) if https_user is not None and not isinstance(https_user, six.string_types): https_user = six.text_type(https_user) if https_pass is not None and not isinstance(https_pass, six.string_types): https_pass = six.text_type(https_pass) # Check for lfs filter settings, and setup lfs_opts accordingly. These opts # will be passed where appropriate to ensure that these commands are # authenticated and that the git LFS plugin can download files. use_lfs = bool( __salt__['git.config_get_regexp']( r'filter\.lfs\.', **{'global': True})) lfs_opts = {'identity': identity} if use_lfs else {} if os.path.isfile(target): return _fail( ret, 'Target \'{0}\' exists and is a regular file, cannot proceed' .format(target) ) try: desired_fetch_url = salt.utils.url.add_http_basic_auth( name, https_user, https_pass, https_only=True ) except ValueError as exc: return _fail(ret, exc.__str__()) redacted_fetch_url = \ salt.utils.url.redact_http_basic_auth(desired_fetch_url) if mirror: bare = True # Check to make sure rev and mirror/bare are not both in use if rev != 'HEAD' and bare: return _fail(ret, ('\'rev\' is not compatible with the \'mirror\' and ' '\'bare\' arguments')) run_check_cmd_kwargs = {'runas': user, 'password': password} if 'shell' in __grains__: run_check_cmd_kwargs['shell'] = __grains__['shell'] # check if git.latest should be applied cret = mod_run_check( run_check_cmd_kwargs, onlyif, unless ) if isinstance(cret, dict): ret.update(cret) return ret refspecs = [ 'refs/heads/{0}:refs/remotes/{1}/{0}'.format(refspec_branch, remote), '+refs/tags/{0}:refs/tags/{0}'.format(refspec_tag) ] if fetch_tags else [] log.info('Checking remote revision for %s', name) try: all_remote_refs = __salt__['git.remote_refs']( name, heads=False, tags=False, user=user, password=password, identity=identity, https_user=https_user, https_pass=https_pass, ignore_retcode=False, saltenv=__env__, output_encoding=output_encoding) except CommandExecutionError as exc: return _fail( ret, 'Failed to check remote refs: {0}'.format(_strip_exc(exc)) ) except NameError as exc: if 'global name' in exc.message: raise CommandExecutionError( 'Failed to check remote refs: You may need to install ' 'GitPython or PyGit2') raise if 'HEAD' in all_remote_refs: head_rev = all_remote_refs['HEAD'] for refname, refsha in six.iteritems(all_remote_refs): if refname.startswith('refs/heads/'): if refsha == head_rev: default_branch = refname.partition('refs/heads/')[-1] break else: default_branch = None else: head_rev = None default_branch = None desired_upstream = False if bare: remote_rev = None remote_rev_type = None else: if rev == 'HEAD': if head_rev is not None: remote_rev = head_rev # Just go with whatever the upstream currently is desired_upstream = None remote_rev_type = 'sha1' else: # Empty remote repo remote_rev = None remote_rev_type = None elif 'refs/heads/' + rev in all_remote_refs: remote_rev = all_remote_refs['refs/heads/' + rev] desired_upstream = '/'.join((remote, rev)) remote_rev_type = 'branch' elif 'refs/tags/' + rev + '^{}' in all_remote_refs: # Annotated tag remote_rev = all_remote_refs['refs/tags/' + rev + '^{}'] remote_rev_type = 'tag' elif 'refs/tags/' + rev in all_remote_refs: # Non-annotated tag remote_rev = all_remote_refs['refs/tags/' + rev] remote_rev_type = 'tag' else: if len(rev) <= 40 \ and all(x in string.hexdigits for x in rev): # git ls-remote did not find the rev, and because it's a # hex string <= 40 chars we're going to assume that the # desired rev is a SHA1 rev = rev.lower() remote_rev = rev remote_rev_type = 'sha1' else: remote_rev = None remote_rev_type = None # For the comment field of the state return dict, the remote location # (and short-sha1, if rev is not a sha1) is referenced several times, # determine it once here and reuse the value below. if remote_rev_type == 'sha1': if rev == 'HEAD': remote_loc = 'remote HEAD (' + remote_rev[:7] + ')' else: remote_loc = remote_rev[:7] elif remote_rev is not None: remote_loc = '{0} ({1})'.format( desired_upstream if remote_rev_type == 'branch' else rev, remote_rev[:7] ) else: # Shouldn't happen but log a warning here for future # troubleshooting purposes in the event we find a corner case. log.warning( 'Unable to determine remote_loc. rev is %s, remote_rev is ' '%s, remove_rev_type is %s, desired_upstream is %s, and bare ' 'is%s set', rev, remote_rev, remote_rev_type, desired_upstream, ' not' if not bare else '' ) remote_loc = None if depth is not None and remote_rev_type not in ('branch', 'tag'): return _fail( ret, 'When \'depth\' is used, \'rev\' must be set to the name of a ' 'branch or tag on the remote repository' ) if remote_rev is None and not bare: if rev != 'HEAD': # A specific rev is desired, but that rev doesn't exist on the # remote repo. return _fail( ret, 'No revision matching \'{0}\' exists in the remote ' 'repository'.format(rev) ) git_ver = _LooseVersion(__salt__['git.version'](versioninfo=False)) check = 'refs' if bare else '.git' gitdir = os.path.join(target, check) comments = [] if os.path.isdir(gitdir) \ or __salt__['git.is_worktree']( target, user=user, password=password, output_encoding=output_encoding): # Target directory is a git repository or git worktree try: all_local_branches = __salt__['git.list_branches']( target, user=user, password=password, output_encoding=output_encoding) all_local_tags = set( __salt__['git.list_tags']( target, user=user, password=password, output_encoding=output_encoding ) ) local_rev, local_branch = _get_local_rev_and_branch( target, user, password, output_encoding) if not bare and remote_rev is None and local_rev is not None: return _fail( ret, 'Remote repository is empty, cannot update from a ' 'non-empty to an empty repository' ) # Base rev and branch are the ones from which any reset or merge # will take place. If the branch is not being specified, the base # will be the "local" rev and branch, i.e. those we began with # before this state was run. If a branch is being specified and it # both exists and is not the one with which we started, then we'll # be checking that branch out first, and it instead becomes our # base. The base branch and rev will be used below in comparisons # to determine what changes to make. base_rev = local_rev base_branch = local_branch if _need_branch_change(branch, local_branch): if branch not in all_local_branches: # We're checking out a new branch, so the base_rev and # remote_rev will be identical. base_rev = remote_rev else: base_branch = branch # Desired branch exists locally and is not the current # branch. We'll be performing a checkout to that branch # eventually, but before we do that we need to find the # current SHA1. try: base_rev = __salt__['git.rev_parse']( target, branch + '^{commit}', user=user, password=password, ignore_retcode=True, output_encoding=output_encoding) except CommandExecutionError as exc: return _fail( ret, 'Unable to get position of local branch \'{0}\': ' '{1}'.format(branch, _strip_exc(exc)), comments ) remotes = __salt__['git.remotes'](target, user=user, password=password, redact_auth=False, output_encoding=output_encoding) revs_match = _revs_equal(local_rev, remote_rev, remote_rev_type) try: # If not a bare repo, check `git diff HEAD` to determine if # there are local changes. local_changes = bool( not bare and __salt__['git.diff'](target, 'HEAD', user=user, password=password, output_encoding=output_encoding) ) except CommandExecutionError: # No need to capture the error and log it, the _git_run() # helper in the git execution module will have already logged # the output from the command. log.warning( 'git.latest: Unable to determine if %s has local changes', target ) local_changes = False if local_changes and revs_match: if force_reset is True: msg = ( '{0} is up-to-date, but with uncommitted changes. ' 'Since \'force_reset\' is set to True, these local ' 'changes would be reset. To only reset when there are ' 'changes in the remote repository, set ' '\'force_reset\' to \'remote-changes\'.'.format(target) ) if __opts__['test']: ret['changes']['forced update'] = True if comments: msg += _format_comments(comments) return _neutral_test(ret, msg) log.debug(msg.replace('would', 'will')) else: log.debug( '%s up-to-date, but with uncommitted changes. Since ' '\'force_reset\' is set to %s, no changes will be ' 'made.', target, force_reset ) return _uptodate(ret, target, _format_comments(comments), local_changes) if remote_rev_type == 'sha1' \ and base_rev is not None \ and base_rev.startswith(remote_rev): # Either we're already checked out to the branch we need and it # is up-to-date, or the branch to which we need to switch is # on the same SHA1 as the desired remote revision. Either way, # we know we have the remote rev present already and no fetch # will be needed. has_remote_rev = True else: has_remote_rev = False if remote_rev is not None: try: __salt__['git.rev_parse']( target, remote_rev + '^{commit}', user=user, password=password, ignore_retcode=True, output_encoding=output_encoding) except CommandExecutionError: # Local checkout doesn't have the remote_rev pass else: # The object might exist enough to get a rev-parse to # work, while the local ref could have been # deleted/changed/force updated. Do some further sanity # checks to determine if we really do have the # remote_rev. if remote_rev_type == 'branch': if remote in remotes: try: # Do a rev-parse on <remote>/<rev> to get # the local SHA1 for it, so we can compare # it to the remote_rev SHA1. local_copy = __salt__['git.rev_parse']( target, desired_upstream, user=user, password=password, ignore_retcode=True, output_encoding=output_encoding) except CommandExecutionError: pass else: # If the SHA1s don't match, then the remote # branch was force-updated, and we need to # fetch to update our local copy the ref # for the remote branch. If they do match, # then we have the remote_rev and don't # need to fetch. if local_copy == remote_rev: has_remote_rev = True elif remote_rev_type == 'tag': if rev in all_local_tags: try: local_tag_sha1 = __salt__['git.rev_parse']( target, rev + '^{commit}', user=user, password=password, ignore_retcode=True, output_encoding=output_encoding) except CommandExecutionError: # Shouldn't happen if the tag exists # locally but account for this just in # case. local_tag_sha1 = None if local_tag_sha1 == remote_rev: has_remote_rev = True else: if not force_reset: # SHA1 of tag on remote repo is # different than local tag. Unless # we're doing a hard reset then we # don't need to proceed as we know that # the fetch will update the tag and the # only way to make the state succeed is # to reset the branch to point at the # tag's new location. return _fail( ret, '\'{0}\' is a tag, but the remote ' 'SHA1 for this tag ({1}) doesn\'t ' 'match the local SHA1 ({2}). Set ' '\'force_reset\' to True to force ' 'this update.'.format( rev, _short_sha(remote_rev), _short_sha(local_tag_sha1) ) ) elif remote_rev_type == 'sha1': has_remote_rev = True # If fast_forward is not boolean, then we don't yet know if this # will be a fast forward or not, because a fetch is required. fast_forward = False \ if (local_changes and force_reset != 'remote-changes') \ else None if has_remote_rev: if (not revs_match and not update_head) \ and (branch is None or branch == local_branch): ret['comment'] = ( '{0} is already present and local HEAD ({1}) does not ' 'match, but update_head=False. HEAD has not been ' 'updated locally.'.format( remote_loc.capitalize() if rev == 'HEAD' else remote_loc, local_rev[:7] ) ) return ret # No need to check if this is a fast_forward if we already know # that it won't be (due to local changes). if fast_forward is not False: if base_rev is None: # If we're here, the remote_rev exists in the local # checkout but there is still no HEAD locally. A # possible reason for this is that an empty repository # existed there and a remote was added and fetched, but # the repository was not fast-forwarded. Regardless, # going from no HEAD to a locally-present rev is # considered a fast-forward update. fast_forward = True else: fast_forward = __salt__['git.merge_base']( target, refs=[base_rev, remote_rev], is_ancestor=True, user=user, password=password, ignore_retcode=True, output_encoding=output_encoding) if fast_forward is False: if force_reset is False: return _not_fast_forward( ret, rev, base_rev, remote_rev, branch, local_branch, default_branch, local_changes, comments) merge_action = 'hard-reset' elif fast_forward is True: merge_action = 'fast-forwarded' else: merge_action = 'updated' if base_branch is None: # No local branch, no upstream tracking branch upstream = None else: try: upstream = __salt__['git.rev_parse']( target, base_branch + '@{upstream}', opts=['--abbrev-ref'], user=user, password=password, ignore_retcode=True, output_encoding=output_encoding) except CommandExecutionError: # There is a local branch but the rev-parse command # failed, so that means there is no upstream tracking # branch. This could be because it is just not set, or # because the branch was checked out to a SHA1 or tag # instead of a branch. Set upstream to False to make a # distinction between the case above where there is no # local_branch (when the local checkout is an empty # repository). upstream = False if remote in remotes: fetch_url = remotes[remote]['fetch'] else: log.debug( 'Remote \'%s\' not found in git checkout at %s', remote, target ) fetch_url = None if remote_rev is not None and desired_fetch_url != fetch_url: if __opts__['test']: actions = [ 'Remote \'{0}\' would be changed from {1} to {2}' .format( remote, salt.utils.url.redact_http_basic_auth(fetch_url), redacted_fetch_url ) ] if not has_remote_rev: actions.append('Remote would be fetched') if not revs_match: if update_head: ret['changes']['revision'] = { 'old': local_rev, 'new': remote_rev } if fast_forward is False: ret['changes']['forced update'] = True actions.append( 'Repository would be {0} to {1}'.format( merge_action, _short_sha(remote_rev) ) ) if ret['changes']: return _neutral_test(ret, _format_comments(actions)) else: if not revs_match and not update_head: # Repo content would not be modified but the remote # URL would be modified, so we can't just say that # the repo is up-to-date, we need to inform the # user of the actions taken. ret['comment'] = _format_comments(actions) return ret return _uptodate(ret, target, _format_comments(actions)) # The fetch_url for the desired remote does not match the # specified URL (or the remote does not exist), so set the # remote URL. __salt__['git.remote_set'](target, url=name, remote=remote, user=user, password=password, https_user=https_user, https_pass=https_pass, output_encoding=output_encoding) if fetch_url is None: comments.append( 'Remote \'{0}\' set to {1}'.format( remote, redacted_fetch_url ) ) ret['changes']['new'] = name + ' => ' + remote else: comments.append( 'Remote \'{0}\' changed from {1} to {2}'.format( remote, salt.utils.url.redact_http_basic_auth(fetch_url), redacted_fetch_url ) ) if remote_rev is not None: if __opts__['test']: actions = [] if not has_remote_rev: actions.append( 'Remote \'{0}\' would be fetched'.format(remote) ) if (not revs_match) \ and (update_head or (branch is not None and branch != local_branch)): ret['changes']['revision'] = { 'old': local_rev, 'new': remote_rev } if _need_branch_change(branch, local_branch): if branch not in all_local_branches: actions.append( 'New branch \'{0}\' would be checked ' 'out, with {1} as a starting ' 'point'.format(branch, remote_loc) ) if desired_upstream: actions.append( 'Tracking branch would be set to {0}' .format(desired_upstream) ) else: actions.append( 'Branch \'{0}\' would be checked out ' 'and {1} to {2}'.format( branch, merge_action, _short_sha(remote_rev) ) ) else: if not revs_match: if update_head: if fast_forward is True: actions.append( 'Repository would be fast-forwarded from ' '{0} to {1}'.format( _short_sha(local_rev), _short_sha(remote_rev) ) ) else: actions.append( 'Repository would be {0} from {1} to {2}' .format( 'hard-reset' if force_reset and has_remote_rev else 'updated', _short_sha(local_rev), _short_sha(remote_rev) ) ) else: actions.append( 'Local HEAD ({0}) does not match {1} but ' 'update_head=False, HEAD would not be ' 'updated locally'.format( local_rev[:7], remote_loc ) ) # Check if upstream needs changing if not upstream and desired_upstream: actions.append( 'Tracking branch would be set to {0}'.format( desired_upstream ) ) elif upstream and desired_upstream is False: actions.append( 'Tracking branch would be unset' ) elif desired_upstream and upstream != desired_upstream: actions.append( 'Tracking branch would be ' 'updated to {0}'.format(desired_upstream) ) if ret['changes']: return _neutral_test(ret, _format_comments(actions)) else: formatted_actions = _format_comments(actions) if not revs_match \ and not update_head \ and formatted_actions: ret['comment'] = formatted_actions return ret return _uptodate(ret, target, _format_comments(actions)) if not upstream and desired_upstream: upstream_action = ( 'Tracking branch was set to {0}'.format( desired_upstream ) ) branch_opts = _get_branch_opts( branch, local_branch, all_local_branches, desired_upstream, git_ver) elif upstream and desired_upstream is False: # If the remote_rev is a tag or SHA1, and there is an # upstream tracking branch, we will unset it. However, we # can only do this if the git version is 1.8.0 or newer, as # the --unset-upstream option was not added until that # version. if git_ver >= _LooseVersion('1.8.0'): upstream_action = 'Tracking branch was unset' branch_opts = ['--unset-upstream'] else: branch_opts = None elif desired_upstream and upstream != desired_upstream: upstream_action = ( 'Tracking branch was updated to {0}'.format( desired_upstream ) ) branch_opts = _get_branch_opts( branch, local_branch, all_local_branches, desired_upstream, git_ver) else: branch_opts = None if branch_opts is not None and local_branch is None: return _fail( ret, 'Cannot set/unset upstream tracking branch, local ' 'HEAD refers to nonexistent branch. This may have ' 'been caused by cloning a remote repository for which ' 'the default branch was renamed or deleted. If you ' 'are unable to fix the remote repository, you can ' 'work around this by setting the \'branch\' argument ' '(which will ensure that the named branch is created ' 'if it does not already exist).', comments ) remote_tags = set([ x.replace('refs/tags/', '') for x in __salt__['git.ls_remote']( cwd=target, remote=remote, opts="--tags", user=user, password=password, identity=identity, saltenv=__env__, ignore_retcode=True, output_encoding=output_encoding) if '^{}' not in x ]) if all_local_tags != remote_tags: has_remote_rev = False new_tags = remote_tags - all_local_tags deleted_tags = all_local_tags - remote_tags if new_tags: ret['changes']['new_tags'] = new_tags if sync_tags and deleted_tags: # Delete the local copy of the tags to keep up with the # remote repository. for tag_name in deleted_tags: try: if not __opts__['test']: __salt__['git.tag']( target, tag_name, opts='-d', user=user, password=password, output_encoding=output_encoding) except CommandExecutionError as exc: ret.setdefault('warnings', []).append( 'Failed to remove local tag \'{0}\':\n\n' '{1}\n\n'.format(tag_name, exc) ) else: ret['changes'].setdefault( 'deleted_tags', []).append(tag_name) if ret['changes'].get('deleted_tags'): comments.append( 'The following tags {0} removed from the local ' 'checkout: {1}'.format( 'would be' if __opts__['test'] else 'were', ', '.join(ret['changes']['deleted_tags']) ) ) if not has_remote_rev: try: fetch_changes = __salt__['git.fetch']( target, remote=remote, force=force_fetch, refspecs=refspecs, user=user, password=password, identity=identity, saltenv=__env__, output_encoding=output_encoding) except CommandExecutionError as exc: return _failed_fetch(ret, exc, comments) else: if fetch_changes: comments.append( '{0} was fetched, resulting in updated ' 'refs'.format(name) ) try: __salt__['git.rev_parse']( target, remote_rev + '^{commit}', user=user, password=password, ignore_retcode=True, output_encoding=output_encoding) except CommandExecutionError as exc: return _fail( ret, 'Fetch did not successfully retrieve rev \'{0}\' ' 'from {1}: {2}'.format(rev, name, exc) ) if (not revs_match and not update_head) \ and (branch is None or branch == local_branch): # Rev now exists locally (was fetched), and since we're # not updating HEAD we'll just exit here. ret['comment'] = remote_loc.capitalize() \ if rev == 'HEAD' \ else remote_loc ret['comment'] += ( ' is already present and local HEAD ({0}) does not ' 'match, but update_head=False. HEAD has not been ' 'updated locally.'.format(local_rev[:7]) ) return ret # Now that we've fetched, check again whether or not # the update is a fast-forward. if base_rev is None: fast_forward = True else: fast_forward = __salt__['git.merge_base']( target, refs=[base_rev, remote_rev], is_ancestor=True, user=user, password=password, output_encoding=output_encoding) if fast_forward is force_reset is False \ or (fast_forward is True and local_changes and force_reset is False): return _not_fast_forward( ret, rev, base_rev, remote_rev, branch, local_branch, default_branch, local_changes, comments) if _need_branch_change(branch, local_branch): if local_changes and not force_checkout: return _fail( ret, 'Local branch \'{0}\' has uncommitted ' 'changes. Set \'force_checkout\' to True to ' 'discard them and proceed.'.format(local_branch) ) # TODO: Maybe re-retrieve all_local_branches to handle # the corner case where the destination branch was # added to the local checkout during a fetch that takes # a long time to complete. if branch not in all_local_branches: if rev == 'HEAD': checkout_rev = remote_rev else: checkout_rev = desired_upstream \ if desired_upstream \ else rev checkout_opts = ['-b', branch] else: checkout_rev = branch checkout_opts = [] __salt__['git.checkout'](target, checkout_rev, force=force_checkout, opts=checkout_opts, user=user, password=password, output_encoding=output_encoding) if '-b' in checkout_opts: comments.append( 'New branch \'{0}\' was checked out, with {1} ' 'as a starting point'.format( branch, remote_loc ) ) else: comments.append( '\'{0}\' was checked out'.format(checkout_rev) ) if fast_forward is False: __salt__['git.reset']( target, opts=['--hard', remote_rev], user=user, password=password, output_encoding=output_encoding, **lfs_opts) ret['changes']['forced update'] = True if local_changes: comments.append('Uncommitted changes were discarded') comments.append( 'Repository was hard-reset to {0}'.format(remote_loc) ) elif fast_forward is True \ and local_changes \ and force_reset is not False: __salt__['git.discard_local_changes']( target, user=user, password=password, output_encoding=output_encoding) comments.append('Uncommitted changes were discarded') if branch_opts is not None: __salt__['git.branch']( target, opts=branch_opts, user=user, password=password, output_encoding=output_encoding) comments.append(upstream_action) # Fast-forward to the desired revision if fast_forward is True \ and not _revs_equal(base_rev, remote_rev, remote_rev_type): if desired_upstream or rev == 'HEAD': # Check first to see if we are on a branch before # trying to merge changes. (The call to # git.symbolic_ref will only return output if HEAD # points to a branch.) if __salt__['git.symbolic_ref']( target, 'HEAD', opts=['--quiet'], user=user, password=password, ignore_retcode=True, output_encoding=output_encoding): if git_ver >= _LooseVersion('1.8.1.6'): # --ff-only added in version 1.8.1.6. It's not # 100% necessary, but if we can use it, we'll # ensure that the merge doesn't go through if # not a fast-forward. Granted, the logic that # gets us to this point shouldn't allow us to # attempt this merge if it's not a # fast-forward, but it's an extra layer of # protection. merge_opts = ['--ff-only'] else: merge_opts = [] __salt__['git.merge']( target, rev=remote_rev, opts=merge_opts, user=user, password=password, output_encoding=output_encoding, **lfs_opts) comments.append( 'Repository was fast-forwarded to {0}' .format(remote_loc) ) else: return _fail( ret, 'Unable to fast-forward, HEAD is detached', comments ) else: # Update is a fast forward, but we cannot merge to that # commit so we'll reset to it. __salt__['git.reset']( target, opts=['--hard', remote_rev if rev == 'HEAD' else rev], user=user, password=password, output_encoding=output_encoding, **lfs_opts) comments.append( 'Repository was reset to {0} (fast-forward)' .format(rev) ) # TODO: Figure out how to add submodule update info to # test=True return data, and changes dict. if submodules: try: __salt__['git.submodule']( target, 'update', opts=['--init', '--recursive'], user=user, password=password, identity=identity, saltenv=__env__, output_encoding=output_encoding) except CommandExecutionError as exc: return _failed_submodule_update(ret, exc, comments) elif bare: if __opts__['test']: msg = ( 'Bare repository at {0} would be fetched' .format(target) ) if ret['changes']: return _neutral_test(ret, msg) else: return _uptodate(ret, target, msg) try: fetch_changes = __salt__['git.fetch']( target, remote=remote, force=force_fetch, refspecs=refspecs, user=user, password=password, identity=identity, saltenv=__env__, output_encoding=output_encoding) except CommandExecutionError as exc: return _failed_fetch(ret, exc, comments) else: comments.append( 'Bare repository at {0} was fetched{1}'.format( target, ', resulting in updated refs' if fetch_changes else '' ) ) try: new_rev = __salt__['git.revision']( cwd=target, user=user, password=password, ignore_retcode=True, output_encoding=output_encoding) except CommandExecutionError: new_rev = None except Exception as exc: log.error( 'Unexpected exception in git.latest state', exc_info=True ) if isinstance(exc, CommandExecutionError): msg = _strip_exc(exc) else: msg = six.text_type(exc) return _fail(ret, msg, comments) if not bare and not _revs_equal(new_rev, remote_rev, remote_rev_type): return _fail(ret, 'Failed to update repository', comments) if local_rev != new_rev: log.info( 'Repository %s updated: %s => %s', target, local_rev, new_rev ) ret['comment'] = _format_comments(comments) ret['changes']['revision'] = {'old': local_rev, 'new': new_rev} else: return _uptodate(ret, target, _format_comments(comments)) else: if os.path.isdir(target): target_contents = os.listdir(target) if force_clone: # Clone is required, and target directory exists, but the # ``force`` option is enabled, so we need to clear out its # contents to proceed. if __opts__['test']: ret['changes']['forced clone'] = True ret['changes']['new'] = name + ' => ' + target return _neutral_test( ret, 'Target directory {0} exists. Since force_clone=True, ' 'the contents of {0} would be deleted, and {1} would ' 'be cloned into this directory.'.format(target, name) ) log.debug( 'Removing contents of %s to clone repository %s in its ' 'place (force_clone=True set in git.latest state)', target, name ) removal_errors = {} for target_object in target_contents: target_path = os.path.join(target, target_object) try: salt.utils.files.rm_rf(target_path) except OSError as exc: if exc.errno != errno.ENOENT: removal_errors[target_path] = exc if removal_errors: err_strings = [ ' {0}\n {1}'.format(k, v) for k, v in six.iteritems(removal_errors) ] return _fail( ret, 'Unable to remove\n{0}'.format('\n'.join(err_strings)), comments ) ret['changes']['forced clone'] = True # Clone is required, but target dir exists and is non-empty. We # can't proceed. elif target_contents: return _fail( ret, 'Target \'{0}\' exists, is non-empty and is not a git ' 'repository. Set the \'force_clone\' option to True to ' 'remove this directory\'s contents and proceed with ' 'cloning the remote repository'.format(target) ) log.debug('Target %s is not found, \'git clone\' is required', target) if __opts__['test']: ret['changes']['new'] = name + ' => ' + target return _neutral_test( ret, 'Repository {0} would be cloned to {1}'.format( name, target ) ) try: clone_opts = ['--mirror'] if mirror else ['--bare'] if bare else [] if remote != 'origin': clone_opts.extend(['--origin', remote]) if depth is not None: clone_opts.extend(['--depth', six.text_type(depth), '--branch', rev]) # We're cloning a fresh repo, there is no local branch or revision local_branch = local_rev = None try: __salt__['git.clone'](target, name, user=user, password=password, opts=clone_opts, identity=identity, https_user=https_user, https_pass=https_pass, saltenv=__env__, output_encoding=output_encoding) except CommandExecutionError as exc: msg = 'Clone failed: {0}'.format(_strip_exc(exc)) return _fail(ret, msg, comments) ret['changes']['new'] = name + ' => ' + target comments.append( '{0} cloned to {1}{2}'.format( name, target, ' as mirror' if mirror else ' as bare repository' if bare else '' ) ) if not bare: if not remote_rev: if rev != 'HEAD': # No HEAD means the remote repo is empty, which means # our new clone will also be empty. This state has # failed, since a rev was specified but no matching rev # exists on the remote host. msg = ( '%s was cloned but is empty, so {0}/{1} ' 'cannot be checked out'.format(remote, rev) ) log.error(msg, name) # Disable check for string substitution return _fail(ret, msg % 'Repository', comments) # pylint: disable=E1321 else: if remote_rev_type == 'tag' \ and rev not in __salt__['git.list_tags']( target, user=user, password=password, output_encoding=output_encoding): return _fail( ret, 'Revision \'{0}\' does not exist in clone' .format(rev), comments ) if branch is not None: if branch not in \ __salt__['git.list_branches']( target, user=user, password=password, output_encoding=output_encoding): if rev == 'HEAD': checkout_rev = remote_rev else: checkout_rev = desired_upstream \ if desired_upstream \ else rev __salt__['git.checkout']( target, checkout_rev, opts=['-b', branch], user=user, password=password, output_encoding=output_encoding) comments.append( 'Branch \'{0}\' checked out, with {1} ' 'as a starting point'.format( branch, remote_loc ) ) local_rev, local_branch = _get_local_rev_and_branch( target, user, password, output_encoding=output_encoding) if local_branch is None \ and remote_rev is not None \ and 'HEAD' not in all_remote_refs: return _fail( ret, 'Remote HEAD refers to a ref that does not exist. ' 'This can happen when the default branch on the ' 'remote repository is renamed or deleted. If you ' 'are unable to fix the remote repository, you can ' 'work around this by setting the \'branch\' argument ' '(which will ensure that the named branch is created ' 'if it does not already exist).', comments ) if not _revs_equal(local_rev, remote_rev, remote_rev_type): __salt__['git.reset']( target, opts=['--hard', remote_rev], user=user, password=password, output_encoding=output_encoding) comments.append( 'Repository was reset to {0}'.format(remote_loc) ) try: upstream = __salt__['git.rev_parse']( target, local_branch + '@{upstream}', opts=['--abbrev-ref'], user=user, password=password, ignore_retcode=True, output_encoding=output_encoding) except CommandExecutionError: upstream = False if not upstream and desired_upstream: upstream_action = ( 'Tracking branch was set to {0}'.format( desired_upstream ) ) branch_opts = _get_branch_opts( branch, local_branch, __salt__['git.list_branches']( target, user=user, password=password, output_encoding=output_encoding), desired_upstream, git_ver) elif upstream and desired_upstream is False: # If the remote_rev is a tag or SHA1, and there is an # upstream tracking branch, we will unset it. However, # we can only do this if the git version is 1.8.0 or # newer, as the --unset-upstream option was not added # until that version. if git_ver >= _LooseVersion('1.8.0'): upstream_action = 'Tracking branch was unset' branch_opts = ['--unset-upstream'] else: branch_opts = None elif desired_upstream and upstream != desired_upstream: upstream_action = ( 'Tracking branch was updated to {0}'.format( desired_upstream ) ) branch_opts = _get_branch_opts( branch, local_branch, __salt__['git.list_branches']( target, user=user, password=password, output_encoding=output_encoding), desired_upstream, git_ver) else: branch_opts = None if branch_opts is not None: __salt__['git.branch']( target, opts=branch_opts, user=user, password=password, output_encoding=output_encoding) comments.append(upstream_action) if submodules and remote_rev: try: __salt__['git.submodule']( target, 'update', opts=['--init', '--recursive'], user=user, password=password, identity=identity, output_encoding=output_encoding) except CommandExecutionError as exc: return _failed_submodule_update(ret, exc, comments) try: new_rev = __salt__['git.revision']( cwd=target, user=user, password=password, ignore_retcode=True, output_encoding=output_encoding) except CommandExecutionError: new_rev = None except Exception as exc: log.error( 'Unexpected exception in git.latest state', exc_info=True ) if isinstance(exc, CommandExecutionError): msg = _strip_exc(exc) else: msg = six.text_type(exc) return _fail(ret, msg, comments) msg = _format_comments(comments) log.info(msg) ret['comment'] = msg if new_rev is not None: ret['changes']['revision'] = {'old': None, 'new': new_rev} return ret def detached(name, rev, target=None, remote='origin', user=None, password=None, force_clone=False, force_checkout=False, fetch_remote=True, hard_reset=False, submodules=False, identity=None, https_user=None, https_pass=None, onlyif=None, unless=None, output_encoding=None, **kwargs): ''' .. versionadded:: 2016.3.0 Make sure a repository is cloned to the given target directory and is a detached HEAD checkout of the commit ID resolved from ``rev``. name Address of the remote repository. rev The branch, tag, or commit ID to checkout after clone. If a branch or tag is specified it will be resolved to a commit ID and checked out. target Name of the target directory where repository is about to be cloned. remote : origin Git remote to use. If this state needs to clone the repo, it will clone it using this value as the initial remote name. If the repository already exists, and a remote by this name is not present, one will be added. user User under which to run git commands. By default, commands are run by the user under which the minion is running. password Windows only. Required when specifying ``user``. This parameter will be ignored on non-Windows platforms. .. versionadded:: 2016.3.4 force_clone : False If the ``target`` directory exists and is not a git repository, then this state will fail. Set this argument to ``True`` to remove the contents of the target directory and clone the repo into it. force_checkout : False When checking out the revision ID, the state will fail if there are unwritten changes. Set this argument to ``True`` to discard unwritten changes when checking out. fetch_remote : True If ``False`` a fetch will not be performed and only local refs will be reachable. hard_reset : False If ``True`` a hard reset will be performed before the checkout and any uncommitted modifications to the working directory will be discarded. Untracked files will remain in place. .. note:: Changes resulting from a hard reset will not trigger requisites. submodules : False Update submodules identity A path on the minion (or a SaltStack fileserver URL, e.g. ``salt://path/to/identity_file``) to a private key to use for SSH authentication. https_user HTTP Basic Auth username for HTTPS (only) clones https_pass HTTP Basic Auth password for HTTPS (only) clones onlyif A command to run as a check, run the named command only if the command passed to the ``onlyif`` option returns true unless A command to run as a check, only run the named command if the command passed to the ``unless`` option returns false output_encoding Use this option to specify which encoding to use to decode the output from any git commands which are run. This should not be needed in most cases. .. note:: This should only be needed if the files in the repository were created with filenames using an encoding other than UTF-8 to handle Unicode characters. .. versionadded:: 2018.3.1 ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} kwargs = salt.utils.args.clean_kwargs(**kwargs) if kwargs: return _fail( ret, salt.utils.args.invalid_kwargs(kwargs, raise_exc=False) ) if not rev: return _fail( ret, '\'{0}\' is not a valid value for the \'rev\' argument'.format(rev) ) if not target: return _fail( ret, '\'{0}\' is not a valid value for the \'target\' argument'.format(rev) ) # Ensure that certain arguments are strings to ensure that comparisons work if not isinstance(rev, six.string_types): rev = six.text_type(rev) if target is not None: if not isinstance(target, six.string_types): target = six.text_type(target) if not os.path.isabs(target): return _fail( ret, 'Target \'{0}\' is not an absolute path'.format(target) ) if user is not None and not isinstance(user, six.string_types): user = six.text_type(user) if remote is not None and not isinstance(remote, six.string_types): remote = six.text_type(remote) if identity is not None: if isinstance(identity, six.string_types): identity = [identity] elif not isinstance(identity, list): return _fail(ret, 'Identity must be either a list or a string') identity = [os.path.expanduser(x) for x in identity] for ident_path in identity: if 'salt://' in ident_path: try: ident_path = __salt__['cp.cache_file'](ident_path) except IOError as exc: log.error('Failed to cache %s: %s', ident_path, exc) return _fail( ret, 'Identity \'{0}\' does not exist.'.format( ident_path ) ) if not os.path.isabs(ident_path): return _fail( ret, 'Identity \'{0}\' is not an absolute path'.format( ident_path ) ) if https_user is not None and not isinstance(https_user, six.string_types): https_user = six.text_type(https_user) if https_pass is not None and not isinstance(https_pass, six.string_types): https_pass = six.text_type(https_pass) if os.path.isfile(target): return _fail( ret, 'Target \'{0}\' exists and is a regular file, cannot proceed' .format(target) ) try: desired_fetch_url = salt.utils.url.add_http_basic_auth( name, https_user, https_pass, https_only=True ) except ValueError as exc: return _fail(ret, exc.__str__()) redacted_fetch_url = salt.utils.url.redact_http_basic_auth(desired_fetch_url) # Check if onlyif or unless conditions match run_check_cmd_kwargs = {'runas': user} if 'shell' in __grains__: run_check_cmd_kwargs['shell'] = __grains__['shell'] cret = mod_run_check( run_check_cmd_kwargs, onlyif, unless ) if isinstance(cret, dict): ret.update(cret) return ret # Determine if supplied ref is a hash remote_rev_type = 'ref' if len(rev) <= 40 \ and all(x in string.hexdigits for x in rev): rev = rev.lower() remote_rev_type = 'hash' comments = [] hash_exists_locally = False local_commit_id = None gitdir = os.path.join(target, '.git') if os.path.isdir(gitdir) \ or __salt__['git.is_worktree'](target, user=user, password=password, output_encoding=output_encoding): # Target directory is a git repository or git worktree local_commit_id = _get_local_rev_and_branch( target, user, password, output_encoding=output_encoding)[0] if remote_rev_type is 'hash': try: __salt__['git.describe'](target, rev, user=user, password=password, ignore_retcode=True, output_encoding=output_encoding) except CommandExecutionError: hash_exists_locally = False else: # The rev is a hash and it exists locally so skip to checkout hash_exists_locally = True else: # Check that remote is present and set to correct url remotes = __salt__['git.remotes'](target, user=user, password=password, redact_auth=False, output_encoding=output_encoding) if remote in remotes and name in remotes[remote]['fetch']: pass else: # The fetch_url for the desired remote does not match the # specified URL (or the remote does not exist), so set the # remote URL. current_fetch_url = None if remote in remotes: current_fetch_url = remotes[remote]['fetch'] if __opts__['test']: return _neutral_test( ret, 'Remote {0} would be set to {1}'.format( remote, name ) ) __salt__['git.remote_set'](target, url=name, remote=remote, user=user, password=password, https_user=https_user, https_pass=https_pass, output_encoding=output_encoding) comments.append( 'Remote {0} updated from \'{1}\' to \'{2}\''.format( remote, current_fetch_url, name ) ) else: # Clone repository if os.path.isdir(target): target_contents = os.listdir(target) if force_clone: # Clone is required, and target directory exists, but the # ``force`` option is enabled, so we need to clear out its # contents to proceed. if __opts__['test']: return _neutral_test( ret, 'Target directory {0} exists. Since force_clone=True, ' 'the contents of {0} would be deleted, and {1} would ' 'be cloned into this directory.'.format(target, name) ) log.debug( 'Removing contents of %s to clone repository %s in its ' 'place (force_clone=True set in git.detached state)', target, name ) removal_errors = {} for target_object in target_contents: target_path = os.path.join(target, target_object) try: salt.utils.files.rm_rf(target_path) except OSError as exc: if exc.errno != errno.ENOENT: removal_errors[target_path] = exc if removal_errors: err_strings = [ ' {0}\n {1}'.format(k, v) for k, v in six.iteritems(removal_errors) ] return _fail( ret, 'Unable to remove\n{0}'.format('\n'.join(err_strings)), comments ) ret['changes']['forced clone'] = True elif target_contents: # Clone is required, but target dir exists and is non-empty. We # can't proceed. return _fail( ret, 'Target \'{0}\' exists, is non-empty and is not a git ' 'repository. Set the \'force_clone\' option to True to ' 'remove this directory\'s contents and proceed with ' 'cloning the remote repository'.format(target) ) log.debug('Target %s is not found, \'git clone\' is required', target) if __opts__['test']: return _neutral_test( ret, 'Repository {0} would be cloned to {1}'.format( name, target ) ) try: clone_opts = ['--no-checkout'] if remote != 'origin': clone_opts.extend(['--origin', remote]) __salt__['git.clone'](target, name, user=user, password=password, opts=clone_opts, identity=identity, https_user=https_user, https_pass=https_pass, saltenv=__env__, output_encoding=output_encoding) comments.append('{0} cloned to {1}'.format(name, target)) except Exception as exc: log.error( 'Unexpected exception in git.detached state', exc_info=True ) if isinstance(exc, CommandExecutionError): msg = _strip_exc(exc) else: msg = six.text_type(exc) return _fail(ret, msg, comments) # Repository exists and is ready for fetch/checkout refspecs = [ 'refs/heads/*:refs/remotes/{0}/*'.format(remote), '+refs/tags/*:refs/tags/*' ] if hash_exists_locally or fetch_remote is False: pass else: # Fetch refs from remote if __opts__['test']: return _neutral_test( ret, 'Repository remote {0} would be fetched'.format(remote) ) try: fetch_changes = __salt__['git.fetch']( target, remote=remote, force=True, refspecs=refspecs, user=user, password=password, identity=identity, saltenv=__env__, output_encoding=output_encoding) except CommandExecutionError as exc: msg = 'Fetch failed' msg += ':\n\n' + six.text_type(exc) return _fail(ret, msg, comments) else: if fetch_changes: comments.append( 'Remote {0} was fetched, resulting in updated ' 'refs'.format(remote) ) # get refs and checkout checkout_commit_id = '' if remote_rev_type is 'hash': if __salt__['git.describe']( target, rev, user=user, password=password, output_encoding=output_encoding): checkout_commit_id = rev else: return _fail( ret, 'Revision \'{0}\' does not exist'.format(rev) ) else: try: all_remote_refs = __salt__['git.remote_refs']( target, user=user, password=password, identity=identity, https_user=https_user, https_pass=https_pass, ignore_retcode=False, output_encoding=output_encoding) if 'refs/remotes/'+remote+'/'+rev in all_remote_refs: checkout_commit_id = all_remote_refs['refs/remotes/' + remote + '/' + rev] elif 'refs/tags/' + rev in all_remote_refs: checkout_commit_id = all_remote_refs['refs/tags/' + rev] else: return _fail( ret, 'Revision \'{0}\' does not exist'.format(rev) ) except CommandExecutionError as exc: return _fail( ret, 'Failed to list refs for {0}: {1}'.format(remote, _strip_exc(exc)) ) if hard_reset: if __opts__['test']: return _neutral_test( ret, 'Hard reset to HEAD would be performed on {0}'.format(target) ) __salt__['git.reset']( target, opts=['--hard', 'HEAD'], user=user, password=password, output_encoding=output_encoding) comments.append( 'Repository was reset to HEAD before checking out revision' ) # TODO: implement clean function for git module and add clean flag if checkout_commit_id == local_commit_id: new_rev = None else: if __opts__['test']: ret['changes']['HEAD'] = {'old': local_commit_id, 'new': checkout_commit_id} return _neutral_test( ret, 'Commit ID {0} would be checked out at {1}'.format( checkout_commit_id, target ) ) __salt__['git.checkout'](target, checkout_commit_id, force=force_checkout, user=user, password=password, output_encoding=output_encoding) comments.append( 'Commit ID {0} was checked out at {1}'.format( checkout_commit_id, target ) ) try: new_rev = __salt__['git.revision']( cwd=target, user=user, password=password, ignore_retcode=True, output_encoding=output_encoding) except CommandExecutionError: new_rev = None if submodules: __salt__['git.submodule'](target, 'update', opts=['--init', '--recursive'], user=user, password=password, identity=identity, output_encoding=output_encoding) comments.append( 'Submodules were updated' ) if new_rev is not None: ret['changes']['HEAD'] = {'old': local_commit_id, 'new': new_rev} else: comments.append("Already checked out at correct revision") msg = _format_comments(comments) log.info(msg) ret['comment'] = msg return ret def cloned(name, target=None, branch=None, user=None, password=None, identity=None, https_user=None, https_pass=None, output_encoding=None): ''' .. versionadded:: 2018.3.3,2019.2.0 Ensure that a repository has been cloned to the specified target directory. If not, clone that repository. No fetches will be performed once cloned. name Address of the remote repository target Name of the target directory where repository should be cloned branch Remote branch to check out. If unspecified, the default branch (i.e. the one to the remote HEAD points) will be checked out. .. note:: The local branch name will match the remote branch name. If the branch name is changed, then that branch will be checked out locally, but keep in mind that remote repository will not be fetched. If your use case requires that you keep the clone up to date with the remote repository, then consider using :py:func:`git.latest <salt.states.git.latest>`. user User under which to run git commands. By default, commands are run by the user under which the minion is running. password Windows only. Required when specifying ``user``. This parameter will be ignored on non-Windows platforms. identity Path to a private key to use for ssh URLs. Works the same way as in :py:func:`git.latest <salt.states.git.latest>`, see that state's documentation for more information. https_user HTTP Basic Auth username for HTTPS (only) clones https_pass HTTP Basic Auth password for HTTPS (only) clones output_encoding Use this option to specify which encoding to use to decode the output from any git commands which are run. This should not be needed in most cases. .. note:: This should only be needed if the files in the repository were created with filenames using an encoding other than UTF-8 to handle Unicode characters. ''' ret = {'name': name, 'result': False, 'comment': '', 'changes': {}} if target is None: ret['comment'] = '\'target\' argument is required' return ret elif not isinstance(target, six.string_types): target = six.text_type(target) if not os.path.isabs(target): ret['comment'] = '\'target\' path must be absolute' return ret if branch is not None: if not isinstance(branch, six.string_types): branch = six.text_type(branch) if not branch: ret['comment'] = 'Invalid \'branch\' argument' return ret if not os.path.exists(target): need_clone = True else: try: __salt__['git.status'](target, user=user, password=password, output_encoding=output_encoding) except Exception as exc: ret['comment'] = six.text_type(exc) return ret else: need_clone = False comments = [] def _clone_changes(ret): ret['changes']['new'] = name + ' => ' + target def _branch_changes(ret, old, new): ret['changes']['branch'] = {'old': old, 'new': new} if need_clone: if __opts__['test']: _clone_changes(ret) comment = '{0} would be cloned to {1}{2}'.format( name, target, ' with branch \'{0}\''.format(branch) if branch is not None else '' ) return _neutral_test(ret, comment) clone_opts = ['--branch', branch] if branch is not None else None try: __salt__['git.clone'](target, name, opts=clone_opts, user=user, password=password, identity=identity, https_user=https_user, https_pass=https_pass, output_encoding=output_encoding) except CommandExecutionError as exc: msg = 'Clone failed: {0}'.format(_strip_exc(exc)) return _fail(ret, msg, comments) comments.append( '{0} cloned to {1}{2}'.format( name, target, ' with branch \'{0}\''.format(branch) if branch is not None else '' ) ) _clone_changes(ret) ret['comment'] = _format_comments(comments) ret['result'] = True return ret else: if branch is None: return _already_cloned(ret, target, branch, comments) else: current_branch = __salt__['git.current_branch']( target, user=user, password=password, output_encoding=output_encoding) if current_branch == branch: return _already_cloned(ret, target, branch, comments) else: if __opts__['test']: _branch_changes(ret, current_branch, branch) return _neutral_test( ret, 'Branch would be changed to \'{0}\''.format(branch)) try: __salt__['git.rev_parse']( target, rev=branch, user=user, password=password, ignore_retcode=True, output_encoding=output_encoding) except CommandExecutionError: # Local head does not exist, so we need to check out a new # branch at the remote rev checkout_rev = '/'.join(('origin', branch)) checkout_opts = ['-b', branch] else: # Local head exists, so we just need to check it out checkout_rev = branch checkout_opts = None try: __salt__['git.checkout']( target, rev=checkout_rev, opts=checkout_opts, user=user, password=password, output_encoding=output_encoding) except CommandExecutionError as exc: msg = 'Failed to change branch to \'{0}\': {1}'.format(branch, exc) return _fail(ret, msg, comments) else: comments.append('Branch changed to \'{0}\''.format(branch)) _branch_changes(ret, current_branch, branch) ret['comment'] = _format_comments(comments) ret['result'] = True return ret def config_unset(name, value_regex=None, repo=None, user=None, password=None, output_encoding=None, **kwargs): r''' .. versionadded:: 2015.8.0 Ensure that the named config key is not present name The name of the configuration key to unset. This value can be a regex, but the regex must match the entire key name. For example, ``foo\.`` would not match all keys in the ``foo`` section, it would be necessary to use ``foo\..+`` to do so. value_regex Regex indicating the values to unset for the matching key(s) .. note:: This option behaves differently depending on whether or not ``all`` is set to ``True``. If it is, then all values matching the regex will be deleted (this is the only way to delete multiple values from a multivar). If ``all`` is set to ``False``, then this state will fail if the regex matches more than one value in a multivar. all : False If ``True``, unset all matches repo Location of the git repository for which the config value should be set. Required unless ``global`` is set to ``True``. user User under which to run git commands. By default, commands are run by the user under which the minion is running. password Windows only. Required when specifying ``user``. This parameter will be ignored on non-Windows platforms. .. versionadded:: 2016.3.4 global : False If ``True``, this will set a global git config option output_encoding Use this option to specify which encoding to use to decode the output from any git commands which are run. This should not be needed in most cases. .. note:: This should only be needed if the files in the repository were created with filenames using an encoding other than UTF-8 to handle Unicode characters. .. versionadded:: 2018.3.1 **Examples:** .. code-block:: yaml # Value matching 'baz' mylocalrepo: git.config_unset: - name: foo.bar - value_regex: 'baz' - repo: /path/to/repo # Ensure entire multivar is unset mylocalrepo: git.config_unset: - name: foo.bar - all: True # Ensure all variables in 'foo' section are unset, including multivars mylocalrepo: git.config_unset: - name: 'foo\..+' - all: True # Ensure that global config value is unset mylocalrepo: git.config_unset: - name: foo.bar - global: True ''' ret = {'name': name, 'changes': {}, 'result': True, 'comment': 'No matching keys are set'} # Sanitize kwargs and make sure that no invalid ones were passed. This # allows us to accept 'global' as an argument to this function without # shadowing global(), while also not allowing unwanted arguments to be # passed. kwargs = salt.utils.args.clean_kwargs(**kwargs) global_ = kwargs.pop('global', False) all_ = kwargs.pop('all', False) if kwargs: return _fail( ret, salt.utils.args.invalid_kwargs(kwargs, raise_exc=False) ) if not global_ and not repo: return _fail( ret, 'Non-global config options require the \'repo\' argument to be ' 'set' ) if not isinstance(name, six.string_types): name = six.text_type(name) if value_regex is not None: if not isinstance(value_regex, six.string_types): value_regex = six.text_type(value_regex) # Ensure that the key regex matches the full key name key = '^' + name.lstrip('^').rstrip('$') + '$' # Get matching keys/values pre_matches = __salt__['git.config_get_regexp']( cwd=repo, key=key, value_regex=value_regex, user=user, password=password, ignore_retcode=True, output_encoding=output_encoding, **{'global': global_} ) if not pre_matches: # No changes need to be made return ret # Perform sanity check on the matches. We can't proceed if the value_regex # matches more than one value in a given key, and 'all' is not set to True if not all_: greedy_matches = ['{0} ({1})'.format(x, ', '.join(y)) for x, y in six.iteritems(pre_matches) if len(y) > 1] if greedy_matches: if value_regex is not None: return _fail( ret, 'Multiple values are matched by value_regex for the ' 'following keys (set \'all\' to True to force removal): ' '{0}'.format('; '.join(greedy_matches)) ) else: return _fail( ret, 'Multivar(s) matched by the key expression (set \'all\' ' 'to True to force removal): {0}'.format( '; '.join(greedy_matches) ) ) if __opts__['test']: ret['changes'] = pre_matches return _neutral_test( ret, '{0} key(s) would have value(s) unset'.format(len(pre_matches)) ) if value_regex is None: pre = pre_matches else: # Get all keys matching the key expression, so we can accurately report # on changes made. pre = __salt__['git.config_get_regexp']( cwd=repo, key=key, value_regex=None, user=user, password=password, ignore_retcode=True, output_encoding=output_encoding, **{'global': global_} ) failed = [] # Unset the specified value(s). There is no unset for regexes so loop # through the pre_matches dict and unset each matching key individually. for key_name in pre_matches: try: __salt__['git.config_unset']( cwd=repo, key=name, value_regex=value_regex, all=all_, user=user, password=password, output_encoding=output_encoding, **{'global': global_} ) except CommandExecutionError as exc: msg = 'Failed to unset \'{0}\''.format(key_name) if value_regex is not None: msg += ' using value_regex \'{1}\'' msg += ': ' + _strip_exc(exc) log.error(msg) failed.append(key_name) if failed: return _fail( ret, 'Error(s) occurred unsetting values for the following keys (see ' 'the minion log for details): {0}'.format(', '.join(failed)) ) post = __salt__['git.config_get_regexp']( cwd=repo, key=key, value_regex=None, user=user, password=password, ignore_retcode=True, output_encoding=output_encoding, **{'global': global_} ) for key_name in pre: if key_name not in post: ret['changes'][key_name] = pre[key_name] unset = [x for x in pre[key_name] if x not in post[key_name]] if unset: ret['changes'][key_name] = unset if value_regex is None: post_matches = post else: post_matches = __salt__['git.config_get_regexp']( cwd=repo, key=key, value_regex=value_regex, user=user, password=password, ignore_retcode=True, output_encoding=output_encoding, **{'global': global_} ) if post_matches: failed = ['{0} ({1})'.format(x, ', '.join(y)) for x, y in six.iteritems(post_matches)] return _fail( ret, 'Failed to unset value(s): {0}'.format('; '.join(failed)) ) ret['comment'] = 'Value(s) successfully unset' return ret def config_set(name, value=None, multivar=None, repo=None, user=None, password=None, output_encoding=None, **kwargs): ''' .. versionadded:: 2014.7.0 .. versionchanged:: 2015.8.0 Renamed from ``git.config`` to ``git.config_set``. For earlier versions, use ``git.config``. Ensure that a config value is set to the desired value(s) name Name of the git config value to set value Set a single value for the config item multivar Set multiple values for the config item .. note:: The order matters here, if the same parameters are set but in a different order, they will be removed and replaced in the order specified. .. versionadded:: 2015.8.0 repo Location of the git repository for which the config value should be set. Required unless ``global`` is set to ``True``. user User under which to run git commands. By default, the commands are run by the user under which the minion is running. password Windows only. Required when specifying ``user``. This parameter will be ignored on non-Windows platforms. .. versionadded:: 2016.3.4 global : False If ``True``, this will set a global git config option output_encoding Use this option to specify which encoding to use to decode the output from any git commands which are run. This should not be needed in most cases. .. note:: This should only be needed if the files in the repository were created with filenames using an encoding other than UTF-8 to handle Unicode characters. .. versionadded:: 2018.3.1 **Local Config Example:** .. code-block:: yaml # Single value mylocalrepo: git.config_set: - name: user.email - value: foo@bar.net - repo: /path/to/repo # Multiple values mylocalrepo: git.config_set: - name: mysection.myattribute - multivar: - foo - bar - baz - repo: /path/to/repo **Global Config Example (User ``foo``):** .. code-block:: yaml mylocalrepo: git.config_set: - name: user.name - value: Foo Bar - user: foo - global: True ''' ret = {'name': name, 'changes': {}, 'result': True, 'comment': ''} if value is not None and multivar is not None: return _fail( ret, 'Only one of \'value\' and \'multivar\' is permitted' ) # Sanitize kwargs and make sure that no invalid ones were passed. This # allows us to accept 'global' as an argument to this function without # shadowing global(), while also not allowing unwanted arguments to be # passed. kwargs = salt.utils.args.clean_kwargs(**kwargs) global_ = kwargs.pop('global', False) if kwargs: return _fail( ret, salt.utils.args.invalid_kwargs(kwargs, raise_exc=False) ) if not global_ and not repo: return _fail( ret, 'Non-global config options require the \'repo\' argument to be ' 'set' ) if not isinstance(name, six.string_types): name = six.text_type(name) if value is not None: if not isinstance(value, six.string_types): value = six.text_type(value) value_comment = '\'' + value + '\'' desired = [value] if multivar is not None: if not isinstance(multivar, list): try: multivar = multivar.split(',') except AttributeError: multivar = six.text_type(multivar).split(',') else: new_multivar = [] for item in multivar: if isinstance(item, six.string_types): new_multivar.append(item) else: new_multivar.append(six.text_type(item)) multivar = new_multivar value_comment = multivar desired = multivar # Get current value pre = __salt__['git.config_get']( cwd=repo, key=name, user=user, password=password, ignore_retcode=True, output_encoding=output_encoding, **{'all': True, 'global': global_} ) if desired == pre: ret['comment'] = '{0}\'{1}\' is already set to {2}'.format( 'Global key ' if global_ else '', name, value_comment ) return ret if __opts__['test']: ret['changes'] = {'old': pre, 'new': desired} msg = '{0}\'{1}\' would be {2} {3}'.format( 'Global key ' if global_ else '', name, 'added as' if pre is None else 'set to', value_comment ) return _neutral_test(ret, msg) try: # Set/update config value post = __salt__['git.config_set']( cwd=repo, key=name, value=value, multivar=multivar, user=user, password=password, output_encoding=output_encoding, **{'global': global_} ) except CommandExecutionError as exc: return _fail( ret, 'Failed to set {0}\'{1}\' to {2}: {3}'.format( 'global key ' if global_ else '', name, value_comment, _strip_exc(exc) ) ) if pre != post: ret['changes'][name] = {'old': pre, 'new': post} if post != desired: return _fail( ret, 'Failed to set {0}\'{1}\' to {2}'.format( 'global key ' if global_ else '', name, value_comment ) ) ret['comment'] = '{0}\'{1}\' was {2} {3}'.format( 'Global key ' if global_ else '', name, 'added as' if pre is None else 'set to', value_comment ) return ret def mod_run_check(cmd_kwargs, onlyif, unless): ''' Execute the onlyif and unless logic. Return a result dict if: * onlyif failed (onlyif != 0) * unless succeeded (unless == 0) Otherwise, returns ``True`` ''' cmd_kwargs = copy.deepcopy(cmd_kwargs) cmd_kwargs.update({ 'use_vt': False, 'bg': False, 'ignore_retcode': True, 'python_shell': True, }) if onlyif is not None: if not isinstance(onlyif, list): onlyif = [onlyif] for command in onlyif: if not isinstance(command, six.string_types) and command: # Boolean or some other non-string which resolves to True continue try: if __salt__['cmd.retcode'](command, **cmd_kwargs) == 0: # Command exited with a zero retcode continue except Exception as exc: log.exception( 'The following onlyif command raised an error: %s', command ) return { 'comment': 'onlyif raised error ({0}), see log for ' 'more details'.format(exc), 'result': False } return {'comment': 'onlyif condition is false', 'skip_watch': True, 'result': True} if unless is not None: if not isinstance(unless, list): unless = [unless] for command in unless: if not isinstance(command, six.string_types) and not command: # Boolean or some other non-string which resolves to False break try: if __salt__['cmd.retcode'](command, **cmd_kwargs) != 0: # Command exited with a non-zero retcode break except Exception as exc: log.exception( 'The following unless command raised an error: %s', command ) return { 'comment': 'unless raised error ({0}), see log for ' 'more details'.format(exc), 'result': False } else: return {'comment': 'unless condition is true', 'skip_watch': True, 'result': True} return True
saltstack/salt
salt/states/git.py
detached
python
def detached(name, rev, target=None, remote='origin', user=None, password=None, force_clone=False, force_checkout=False, fetch_remote=True, hard_reset=False, submodules=False, identity=None, https_user=None, https_pass=None, onlyif=None, unless=None, output_encoding=None, **kwargs): ''' .. versionadded:: 2016.3.0 Make sure a repository is cloned to the given target directory and is a detached HEAD checkout of the commit ID resolved from ``rev``. name Address of the remote repository. rev The branch, tag, or commit ID to checkout after clone. If a branch or tag is specified it will be resolved to a commit ID and checked out. target Name of the target directory where repository is about to be cloned. remote : origin Git remote to use. If this state needs to clone the repo, it will clone it using this value as the initial remote name. If the repository already exists, and a remote by this name is not present, one will be added. user User under which to run git commands. By default, commands are run by the user under which the minion is running. password Windows only. Required when specifying ``user``. This parameter will be ignored on non-Windows platforms. .. versionadded:: 2016.3.4 force_clone : False If the ``target`` directory exists and is not a git repository, then this state will fail. Set this argument to ``True`` to remove the contents of the target directory and clone the repo into it. force_checkout : False When checking out the revision ID, the state will fail if there are unwritten changes. Set this argument to ``True`` to discard unwritten changes when checking out. fetch_remote : True If ``False`` a fetch will not be performed and only local refs will be reachable. hard_reset : False If ``True`` a hard reset will be performed before the checkout and any uncommitted modifications to the working directory will be discarded. Untracked files will remain in place. .. note:: Changes resulting from a hard reset will not trigger requisites. submodules : False Update submodules identity A path on the minion (or a SaltStack fileserver URL, e.g. ``salt://path/to/identity_file``) to a private key to use for SSH authentication. https_user HTTP Basic Auth username for HTTPS (only) clones https_pass HTTP Basic Auth password for HTTPS (only) clones onlyif A command to run as a check, run the named command only if the command passed to the ``onlyif`` option returns true unless A command to run as a check, only run the named command if the command passed to the ``unless`` option returns false output_encoding Use this option to specify which encoding to use to decode the output from any git commands which are run. This should not be needed in most cases. .. note:: This should only be needed if the files in the repository were created with filenames using an encoding other than UTF-8 to handle Unicode characters. .. versionadded:: 2018.3.1 ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} kwargs = salt.utils.args.clean_kwargs(**kwargs) if kwargs: return _fail( ret, salt.utils.args.invalid_kwargs(kwargs, raise_exc=False) ) if not rev: return _fail( ret, '\'{0}\' is not a valid value for the \'rev\' argument'.format(rev) ) if not target: return _fail( ret, '\'{0}\' is not a valid value for the \'target\' argument'.format(rev) ) # Ensure that certain arguments are strings to ensure that comparisons work if not isinstance(rev, six.string_types): rev = six.text_type(rev) if target is not None: if not isinstance(target, six.string_types): target = six.text_type(target) if not os.path.isabs(target): return _fail( ret, 'Target \'{0}\' is not an absolute path'.format(target) ) if user is not None and not isinstance(user, six.string_types): user = six.text_type(user) if remote is not None and not isinstance(remote, six.string_types): remote = six.text_type(remote) if identity is not None: if isinstance(identity, six.string_types): identity = [identity] elif not isinstance(identity, list): return _fail(ret, 'Identity must be either a list or a string') identity = [os.path.expanduser(x) for x in identity] for ident_path in identity: if 'salt://' in ident_path: try: ident_path = __salt__['cp.cache_file'](ident_path) except IOError as exc: log.error('Failed to cache %s: %s', ident_path, exc) return _fail( ret, 'Identity \'{0}\' does not exist.'.format( ident_path ) ) if not os.path.isabs(ident_path): return _fail( ret, 'Identity \'{0}\' is not an absolute path'.format( ident_path ) ) if https_user is not None and not isinstance(https_user, six.string_types): https_user = six.text_type(https_user) if https_pass is not None and not isinstance(https_pass, six.string_types): https_pass = six.text_type(https_pass) if os.path.isfile(target): return _fail( ret, 'Target \'{0}\' exists and is a regular file, cannot proceed' .format(target) ) try: desired_fetch_url = salt.utils.url.add_http_basic_auth( name, https_user, https_pass, https_only=True ) except ValueError as exc: return _fail(ret, exc.__str__()) redacted_fetch_url = salt.utils.url.redact_http_basic_auth(desired_fetch_url) # Check if onlyif or unless conditions match run_check_cmd_kwargs = {'runas': user} if 'shell' in __grains__: run_check_cmd_kwargs['shell'] = __grains__['shell'] cret = mod_run_check( run_check_cmd_kwargs, onlyif, unless ) if isinstance(cret, dict): ret.update(cret) return ret # Determine if supplied ref is a hash remote_rev_type = 'ref' if len(rev) <= 40 \ and all(x in string.hexdigits for x in rev): rev = rev.lower() remote_rev_type = 'hash' comments = [] hash_exists_locally = False local_commit_id = None gitdir = os.path.join(target, '.git') if os.path.isdir(gitdir) \ or __salt__['git.is_worktree'](target, user=user, password=password, output_encoding=output_encoding): # Target directory is a git repository or git worktree local_commit_id = _get_local_rev_and_branch( target, user, password, output_encoding=output_encoding)[0] if remote_rev_type is 'hash': try: __salt__['git.describe'](target, rev, user=user, password=password, ignore_retcode=True, output_encoding=output_encoding) except CommandExecutionError: hash_exists_locally = False else: # The rev is a hash and it exists locally so skip to checkout hash_exists_locally = True else: # Check that remote is present and set to correct url remotes = __salt__['git.remotes'](target, user=user, password=password, redact_auth=False, output_encoding=output_encoding) if remote in remotes and name in remotes[remote]['fetch']: pass else: # The fetch_url for the desired remote does not match the # specified URL (or the remote does not exist), so set the # remote URL. current_fetch_url = None if remote in remotes: current_fetch_url = remotes[remote]['fetch'] if __opts__['test']: return _neutral_test( ret, 'Remote {0} would be set to {1}'.format( remote, name ) ) __salt__['git.remote_set'](target, url=name, remote=remote, user=user, password=password, https_user=https_user, https_pass=https_pass, output_encoding=output_encoding) comments.append( 'Remote {0} updated from \'{1}\' to \'{2}\''.format( remote, current_fetch_url, name ) ) else: # Clone repository if os.path.isdir(target): target_contents = os.listdir(target) if force_clone: # Clone is required, and target directory exists, but the # ``force`` option is enabled, so we need to clear out its # contents to proceed. if __opts__['test']: return _neutral_test( ret, 'Target directory {0} exists. Since force_clone=True, ' 'the contents of {0} would be deleted, and {1} would ' 'be cloned into this directory.'.format(target, name) ) log.debug( 'Removing contents of %s to clone repository %s in its ' 'place (force_clone=True set in git.detached state)', target, name ) removal_errors = {} for target_object in target_contents: target_path = os.path.join(target, target_object) try: salt.utils.files.rm_rf(target_path) except OSError as exc: if exc.errno != errno.ENOENT: removal_errors[target_path] = exc if removal_errors: err_strings = [ ' {0}\n {1}'.format(k, v) for k, v in six.iteritems(removal_errors) ] return _fail( ret, 'Unable to remove\n{0}'.format('\n'.join(err_strings)), comments ) ret['changes']['forced clone'] = True elif target_contents: # Clone is required, but target dir exists and is non-empty. We # can't proceed. return _fail( ret, 'Target \'{0}\' exists, is non-empty and is not a git ' 'repository. Set the \'force_clone\' option to True to ' 'remove this directory\'s contents and proceed with ' 'cloning the remote repository'.format(target) ) log.debug('Target %s is not found, \'git clone\' is required', target) if __opts__['test']: return _neutral_test( ret, 'Repository {0} would be cloned to {1}'.format( name, target ) ) try: clone_opts = ['--no-checkout'] if remote != 'origin': clone_opts.extend(['--origin', remote]) __salt__['git.clone'](target, name, user=user, password=password, opts=clone_opts, identity=identity, https_user=https_user, https_pass=https_pass, saltenv=__env__, output_encoding=output_encoding) comments.append('{0} cloned to {1}'.format(name, target)) except Exception as exc: log.error( 'Unexpected exception in git.detached state', exc_info=True ) if isinstance(exc, CommandExecutionError): msg = _strip_exc(exc) else: msg = six.text_type(exc) return _fail(ret, msg, comments) # Repository exists and is ready for fetch/checkout refspecs = [ 'refs/heads/*:refs/remotes/{0}/*'.format(remote), '+refs/tags/*:refs/tags/*' ] if hash_exists_locally or fetch_remote is False: pass else: # Fetch refs from remote if __opts__['test']: return _neutral_test( ret, 'Repository remote {0} would be fetched'.format(remote) ) try: fetch_changes = __salt__['git.fetch']( target, remote=remote, force=True, refspecs=refspecs, user=user, password=password, identity=identity, saltenv=__env__, output_encoding=output_encoding) except CommandExecutionError as exc: msg = 'Fetch failed' msg += ':\n\n' + six.text_type(exc) return _fail(ret, msg, comments) else: if fetch_changes: comments.append( 'Remote {0} was fetched, resulting in updated ' 'refs'.format(remote) ) # get refs and checkout checkout_commit_id = '' if remote_rev_type is 'hash': if __salt__['git.describe']( target, rev, user=user, password=password, output_encoding=output_encoding): checkout_commit_id = rev else: return _fail( ret, 'Revision \'{0}\' does not exist'.format(rev) ) else: try: all_remote_refs = __salt__['git.remote_refs']( target, user=user, password=password, identity=identity, https_user=https_user, https_pass=https_pass, ignore_retcode=False, output_encoding=output_encoding) if 'refs/remotes/'+remote+'/'+rev in all_remote_refs: checkout_commit_id = all_remote_refs['refs/remotes/' + remote + '/' + rev] elif 'refs/tags/' + rev in all_remote_refs: checkout_commit_id = all_remote_refs['refs/tags/' + rev] else: return _fail( ret, 'Revision \'{0}\' does not exist'.format(rev) ) except CommandExecutionError as exc: return _fail( ret, 'Failed to list refs for {0}: {1}'.format(remote, _strip_exc(exc)) ) if hard_reset: if __opts__['test']: return _neutral_test( ret, 'Hard reset to HEAD would be performed on {0}'.format(target) ) __salt__['git.reset']( target, opts=['--hard', 'HEAD'], user=user, password=password, output_encoding=output_encoding) comments.append( 'Repository was reset to HEAD before checking out revision' ) # TODO: implement clean function for git module and add clean flag if checkout_commit_id == local_commit_id: new_rev = None else: if __opts__['test']: ret['changes']['HEAD'] = {'old': local_commit_id, 'new': checkout_commit_id} return _neutral_test( ret, 'Commit ID {0} would be checked out at {1}'.format( checkout_commit_id, target ) ) __salt__['git.checkout'](target, checkout_commit_id, force=force_checkout, user=user, password=password, output_encoding=output_encoding) comments.append( 'Commit ID {0} was checked out at {1}'.format( checkout_commit_id, target ) ) try: new_rev = __salt__['git.revision']( cwd=target, user=user, password=password, ignore_retcode=True, output_encoding=output_encoding) except CommandExecutionError: new_rev = None if submodules: __salt__['git.submodule'](target, 'update', opts=['--init', '--recursive'], user=user, password=password, identity=identity, output_encoding=output_encoding) comments.append( 'Submodules were updated' ) if new_rev is not None: ret['changes']['HEAD'] = {'old': local_commit_id, 'new': new_rev} else: comments.append("Already checked out at correct revision") msg = _format_comments(comments) log.info(msg) ret['comment'] = msg return ret
.. versionadded:: 2016.3.0 Make sure a repository is cloned to the given target directory and is a detached HEAD checkout of the commit ID resolved from ``rev``. name Address of the remote repository. rev The branch, tag, or commit ID to checkout after clone. If a branch or tag is specified it will be resolved to a commit ID and checked out. target Name of the target directory where repository is about to be cloned. remote : origin Git remote to use. If this state needs to clone the repo, it will clone it using this value as the initial remote name. If the repository already exists, and a remote by this name is not present, one will be added. user User under which to run git commands. By default, commands are run by the user under which the minion is running. password Windows only. Required when specifying ``user``. This parameter will be ignored on non-Windows platforms. .. versionadded:: 2016.3.4 force_clone : False If the ``target`` directory exists and is not a git repository, then this state will fail. Set this argument to ``True`` to remove the contents of the target directory and clone the repo into it. force_checkout : False When checking out the revision ID, the state will fail if there are unwritten changes. Set this argument to ``True`` to discard unwritten changes when checking out. fetch_remote : True If ``False`` a fetch will not be performed and only local refs will be reachable. hard_reset : False If ``True`` a hard reset will be performed before the checkout and any uncommitted modifications to the working directory will be discarded. Untracked files will remain in place. .. note:: Changes resulting from a hard reset will not trigger requisites. submodules : False Update submodules identity A path on the minion (or a SaltStack fileserver URL, e.g. ``salt://path/to/identity_file``) to a private key to use for SSH authentication. https_user HTTP Basic Auth username for HTTPS (only) clones https_pass HTTP Basic Auth password for HTTPS (only) clones onlyif A command to run as a check, run the named command only if the command passed to the ``onlyif`` option returns true unless A command to run as a check, only run the named command if the command passed to the ``unless`` option returns false output_encoding Use this option to specify which encoding to use to decode the output from any git commands which are run. This should not be needed in most cases. .. note:: This should only be needed if the files in the repository were created with filenames using an encoding other than UTF-8 to handle Unicode characters. .. versionadded:: 2018.3.1
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/git.py#L2238-L2761
[ "def clean_kwargs(**kwargs):\n '''\n Return a dict without any of the __pub* keys (or any other keys starting\n with a dunder) from the kwargs dict passed into the execution module\n functions. These keys are useful for tracking what was used to invoke\n the function call, but they may not be desirable to have if passing the\n kwargs forward wholesale.\n\n Usage example:\n\n .. code-block:: python\n\n kwargs = __utils__['args.clean_kwargs'](**kwargs)\n '''\n ret = {}\n for key, val in six.iteritems(kwargs):\n if not key.startswith('__'):\n ret[key] = val\n return ret\n", "def invalid_kwargs(invalid_kwargs, raise_exc=True):\n '''\n Raise a SaltInvocationError if invalid_kwargs is non-empty\n '''\n if invalid_kwargs:\n if isinstance(invalid_kwargs, dict):\n new_invalid = [\n '{0}={1}'.format(x, y)\n for x, y in six.iteritems(invalid_kwargs)\n ]\n invalid_kwargs = new_invalid\n msg = (\n 'The following keyword arguments are not valid: {0}'\n .format(', '.join(invalid_kwargs))\n )\n if raise_exc:\n raise SaltInvocationError(msg)\n else:\n return msg\n", "def _fail(ret, msg, comments=None):\n ret['result'] = False\n if comments:\n msg += '\\n\\nChanges already made: ' + _format_comments(comments)\n ret['comment'] = msg\n return ret\n" ]
# -*- coding: utf-8 -*- ''' States to manage git repositories and git configuration .. important:: Before using git over ssh, make sure your remote host fingerprint exists in your ``~/.ssh/known_hosts`` file. .. versionchanged:: 2015.8.8 This state module now requires git 1.6.5 (released 10 October 2009) or newer. ''' from __future__ import absolute_import, print_function, unicode_literals # Import python libs import copy import errno import logging import os import re import string # Import salt libs import salt.utils.args import salt.utils.files import salt.utils.url import salt.utils.versions from salt.exceptions import CommandExecutionError from salt.utils.versions import LooseVersion as _LooseVersion # Import 3rd-party libs from salt.ext import six log = logging.getLogger(__name__) def __virtual__(): ''' Only load if git is available ''' if 'git.version' not in __salt__: return False git_ver = _LooseVersion(__salt__['git.version'](versioninfo=False)) return git_ver >= _LooseVersion('1.6.5') def _revs_equal(rev1, rev2, rev_type): ''' Shorthand helper function for comparing SHA1s. If rev_type == 'sha1' then the comparison will be done using str.startwith() to allow short SHA1s to compare successfully. NOTE: This means that rev2 must be the short rev. ''' if (rev1 is None and rev2 is not None) \ or (rev2 is None and rev1 is not None): return False elif rev1 is rev2 is None: return True elif rev_type == 'sha1': return rev1.startswith(rev2) else: return rev1 == rev2 def _short_sha(sha1): return sha1[:7] if sha1 is not None else None def _format_comments(comments): ''' Return a joined list ''' ret = '. '.join(comments) if len(comments) > 1: ret += '.' return ret def _need_branch_change(branch, local_branch): ''' Short hand for telling when a new branch is needed ''' return branch is not None and branch != local_branch def _get_branch_opts(branch, local_branch, all_local_branches, desired_upstream, git_ver=None): ''' DRY helper to build list of opts for git.branch, for the purposes of setting upstream tracking branch ''' if branch is not None and branch not in all_local_branches: # We won't be setting upstream because the act of checking out a new # branch will set upstream for us return None if git_ver is None: git_ver = _LooseVersion(__salt__['git.version'](versioninfo=False)) ret = [] if git_ver >= _LooseVersion('1.8.0'): ret.extend(['--set-upstream-to', desired_upstream]) else: ret.append('--set-upstream') # --set-upstream does not assume the current branch, so we have to # tell it which branch we'll be using ret.append(local_branch if branch is None else branch) ret.append(desired_upstream) return ret def _get_local_rev_and_branch(target, user, password, output_encoding=None): ''' Return the local revision for before/after comparisons ''' log.info('Checking local revision for %s', target) try: local_rev = __salt__['git.revision']( target, user=user, password=password, ignore_retcode=True, output_encoding=output_encoding) except CommandExecutionError: log.info('No local revision for %s', target) local_rev = None log.info('Checking local branch for %s', target) try: local_branch = __salt__['git.current_branch']( target, user=user, password=password, ignore_retcode=True, output_encoding=output_encoding) except CommandExecutionError: log.info('No local branch for %s', target) local_branch = None return local_rev, local_branch def _strip_exc(exc): ''' Strip the actual command that was run from exc.strerror to leave just the error message ''' return re.sub(r'^Command [\'"].+[\'"] failed: ', '', exc.strerror) def _uptodate(ret, target, comments=None, local_changes=False): ret['comment'] = 'Repository {0} is up-to-date'.format(target) if local_changes: ret['comment'] += ( ', but with uncommitted changes. Set \'force_reset\' to True to ' 'purge uncommitted changes.' ) if comments: # Shouldn't be making any changes if the repo was up to date, but # report on them so we are alerted to potential problems with our # logic. ret['comment'] += ( '\n\nChanges {0}made: {1}'.format( 'that would be ' if __opts__['test'] else '', _format_comments(comments) ) ) return ret def _neutral_test(ret, comment): ret['result'] = None ret['comment'] = comment return ret def _fail(ret, msg, comments=None): ret['result'] = False if comments: msg += '\n\nChanges already made: ' + _format_comments(comments) ret['comment'] = msg return ret def _already_cloned(ret, target, branch=None, comments=None): ret['result'] = True ret['comment'] = 'Repository already exists at {0}{1}'.format( target, ' and is checked out to branch \'{0}\''.format(branch) if branch else '' ) if comments: ret['comment'] += ( '\n\nChanges {0}made: {1}'.format( 'that would be ' if __opts__['test'] else '', _format_comments(comments) ) ) return ret def _failed_fetch(ret, exc, comments=None): msg = ( 'Fetch failed. Set \'force_fetch\' to True to force the fetch if the ' 'failure was due to not being able to fast-forward. Output of the fetch ' 'command follows:\n\n{0}'.format(_strip_exc(exc)) ) return _fail(ret, msg, comments) def _failed_submodule_update(ret, exc, comments=None): msg = 'Failed to update submodules: ' + _strip_exc(exc) return _fail(ret, msg, comments) def _not_fast_forward(ret, rev, pre, post, branch, local_branch, default_branch, local_changes, comments): branch_msg = '' if branch is None: if rev != 'HEAD': if local_branch != rev: branch_msg = ( ' The desired rev ({0}) differs from the name of the ' 'local branch ({1}), if the desired rev is a branch name ' 'then a forced update could possibly be avoided by ' 'setting the \'branch\' argument to \'{0}\' instead.' .format(rev, local_branch) ) else: if default_branch is not None and local_branch != default_branch: branch_msg = ( ' The default remote branch ({0}) differs from the ' 'local branch ({1}). This could be caused by changing the ' 'default remote branch, or if the local branch was ' 'manually changed. Rather than forcing an update, it ' 'may be advisable to set the \'branch\' argument to ' '\'{0}\' instead. To ensure that this state follows the ' '\'{0}\' branch instead of the remote HEAD, set the ' '\'rev\' argument to \'{0}\'.' .format(default_branch, local_branch) ) pre = _short_sha(pre) post = _short_sha(post) return _fail( ret, 'Repository would be updated {0}{1}, but {2}. Set \'force_reset\' to ' 'True{3} to force this update{4}.{5}'.format( 'from {0} to {1}'.format(pre, post) if local_changes and pre != post else 'to {0}'.format(post), ' (after checking out local branch \'{0}\')'.format(branch) if _need_branch_change(branch, local_branch) else '', 'this is not a fast-forward merge' if not local_changes else 'there are uncommitted changes', ' (or \'remote-changes\')' if local_changes else '', ' and discard these changes' if local_changes else '', branch_msg, ), comments ) def latest(name, rev='HEAD', target=None, branch=None, user=None, password=None, update_head=True, force_checkout=False, force_clone=False, force_fetch=False, force_reset=False, submodules=False, bare=False, mirror=False, remote='origin', fetch_tags=True, sync_tags=True, depth=None, identity=None, https_user=None, https_pass=None, onlyif=None, unless=None, refspec_branch='*', refspec_tag='*', output_encoding=None, **kwargs): ''' Make sure the repository is cloned to the given directory and is up-to-date. name Address of the remote repository, as passed to ``git clone`` .. note:: From the `Git documentation`_, there are two URL formats supported for SSH authentication. The below two examples are equivalent: .. code-block:: text # ssh:// URL ssh://user@server/project.git # SCP-like syntax user@server:project.git A common mistake is to use an ``ssh://`` URL, but with a colon after the domain instead of a slash. This is invalid syntax in Git, and will therefore not work in Salt. When in doubt, confirm that a ``git clone`` works for the URL before using it in Salt. It has been reported by some users that SCP-like syntax is incompatible with git repos hosted on `Atlassian Stash/BitBucket Server`_. In these cases, it may be necessary to use ``ssh://`` URLs for SSH authentication. .. _`Git documentation`: https://git-scm.com/book/en/v2/Git-on-the-Server-The-Protocols#The-SSH-Protocol .. _`Atlassian Stash/BitBucket Server`: https://www.atlassian.com/software/bitbucket/server rev : HEAD The remote branch, tag, or revision ID to checkout after clone / before update. If specified, then Salt will also ensure that the tracking branch is set to ``<remote>/<rev>``, unless ``rev`` refers to a tag or SHA1, in which case Salt will ensure that the tracking branch is unset. If ``rev`` is not specified, it will be assumed to be ``HEAD``, and Salt will not manage the tracking branch at all. .. versionchanged:: 2015.8.0 If not specified, ``rev`` now defaults to the remote repository's HEAD. target Name of the target directory where repository is about to be cloned branch Name of the local branch into which to checkout the specified rev. If not specified, then Salt will not care what branch is being used locally and will just use whatever branch is currently there. .. versionadded:: 2015.8.0 .. note:: If this argument is not specified, this means that Salt will not change the local branch if the repository is reset to another branch/tag/SHA1. For example, assume that the following state was run initially: .. code-block:: yaml foo_app: git.latest: - name: https://mydomain.tld/apps/foo.git - target: /var/www/foo - user: www This would have cloned the HEAD of that repo (since a ``rev`` wasn't specified), and because ``branch`` is not specified, the branch in the local clone at ``/var/www/foo`` would be whatever the default branch is on the remote repository (usually ``master``, but not always). Now, assume that it becomes necessary to switch this checkout to the ``dev`` branch. This would require ``rev`` to be set, and probably would also require ``force_reset`` to be enabled: .. code-block:: yaml foo_app: git.latest: - name: https://mydomain.tld/apps/foo.git - target: /var/www/foo - user: www - rev: dev - force_reset: True The result of this state would be to perform a hard-reset to ``origin/dev``. Since ``branch`` was not specified though, while ``/var/www/foo`` would reflect the contents of the remote repo's ``dev`` branch, the local branch would still remain whatever it was when it was cloned. To make the local branch match the remote one, set ``branch`` as well, like so: .. code-block:: yaml foo_app: git.latest: - name: https://mydomain.tld/apps/foo.git - target: /var/www/foo - user: www - rev: dev - branch: dev - force_reset: True This may seem redundant, but Salt tries to support a wide variety of use cases, and doing it this way allows for the use case where the local branch doesn't need to be strictly managed. user Local system user under which to run git commands. By default, commands are run by the user under which the minion is running. .. note:: This is not to be confused with the username for http(s)/SSH authentication. .. versionadded:: 0.17.0 password Windows only. Required when specifying ``user``. This parameter will be ignored on non-Windows platforms. .. versionadded:: 2016.3.4 update_head : True If set to ``False``, then the remote repository will be fetched (if necessary) to ensure that the commit to which ``rev`` points exists in the local checkout, but no changes will be made to the local HEAD. .. versionadded:: 2015.8.3 force_checkout : False When checking out the local branch, the state will fail if there are unwritten changes. Set this argument to ``True`` to discard unwritten changes when checking out. force_clone : False If the ``target`` directory exists and is not a git repository, then this state will fail. Set this argument to ``True`` to remove the contents of the target directory and clone the repo into it. force_fetch : False If a fetch needs to be performed, non-fast-forward fetches will cause this state to fail. Set this argument to ``True`` to force the fetch even if it is a non-fast-forward update. .. versionadded:: 2015.8.0 force_reset : False If the update is not a fast-forward, this state will fail. Set this argument to ``True`` to force a hard-reset to the remote revision in these cases. .. versionchanged:: 2019.2.0 This option can now be set to ``remote-changes``, which will instruct Salt not to discard local changes if the repo is up-to-date with the remote repository. submodules : False Update submodules on clone or branch change bare : False Set to ``True`` if the repository is to be a bare clone of the remote repository. .. note: Setting this option to ``True`` is incompatible with the ``rev`` argument. mirror Set to ``True`` if the repository is to be a mirror of the remote repository. This implies that ``bare`` set to ``True``, and thus is incompatible with ``rev``. remote : origin Git remote to use. If this state needs to clone the repo, it will clone it using this value as the initial remote name. If the repository already exists, and a remote by this name is not present, one will be added. fetch_tags : True If ``True``, then when a fetch is performed all tags will be fetched, even those which are not reachable by any branch on the remote. sync_tags : True If ``True``, then Salt will delete tags which exist in the local clone but are not found on the remote repository. .. versionadded:: 2018.3.4 depth Defines depth in history when git a clone is needed in order to ensure latest. E.g. ``depth: 1`` is useful when deploying from a repository with a long history. Use rev to specify branch or tag. This is not compatible with revision IDs. .. versionchanged:: 2019.2.0 This option now supports tags as well as branches, on Git 1.8.0 and newer. identity Path to a private key to use for ssh URLs. This can be either a single string, or a list of strings. For example: .. code-block:: yaml # Single key git@github.com:user/repo.git: git.latest: - user: deployer - identity: /home/deployer/.ssh/id_rsa # Two keys git@github.com:user/repo.git: git.latest: - user: deployer - identity: - /home/deployer/.ssh/id_rsa - /home/deployer/.ssh/id_rsa_alternate If multiple keys are specified, they will be tried one-by-one in order for each git command which needs to authenticate. .. warning:: Unless Salt is invoked from the minion using ``salt-call``, the key(s) must be passphraseless. For greater security with passphraseless private keys, see the `sshd(8)`_ manpage for information on securing the keypair from the remote side in the ``authorized_keys`` file. .. _`sshd(8)`: http://www.man7.org/linux/man-pages/man8/sshd.8.html#AUTHORIZED_KEYS_FILE%20FORMAT .. versionchanged:: 2015.8.7 Salt will no longer attempt to use passphrase-protected keys unless invoked from the minion using ``salt-call``, to prevent blocking waiting for user input. .. versionchanged:: 2016.3.0 Key can now be specified as a SaltStack fileserver URL (e.g. ``salt://path/to/identity_file``). https_user HTTP Basic Auth username for HTTPS (only) clones .. versionadded:: 2015.5.0 https_pass HTTP Basic Auth password for HTTPS (only) clones .. versionadded:: 2015.5.0 onlyif A command to run as a check, run the named command only if the command passed to the ``onlyif`` option returns true unless A command to run as a check, only run the named command if the command passed to the ``unless`` option returns false refspec_branch : * A glob expression defining which branches to retrieve when fetching. See `git-fetch(1)`_ for more information on how refspecs work. .. versionadded:: 2017.7.0 refspec_tag : * A glob expression defining which tags to retrieve when fetching. See `git-fetch(1)`_ for more information on how refspecs work. .. versionadded:: 2017.7.0 output_encoding Use this option to specify which encoding to use to decode the output from any git commands which are run. This should not be needed in most cases. .. note:: This should only be needed if the files in the repository were created with filenames using an encoding other than UTF-8 to handle Unicode characters. .. versionadded:: 2018.3.1 .. _`git-fetch(1)`: http://git-scm.com/docs/git-fetch .. note:: Clashing ID declarations can be avoided when including different branches from the same git repository in the same SLS file by using the ``name`` argument. The example below checks out the ``gh-pages`` and ``gh-pages-prod`` branches from the same repository into separate directories. The example also sets up the ``ssh_known_hosts`` ssh key required to perform the git checkout. Also, it has been reported that the SCP-like syntax for .. code-block:: yaml gitlab.example.com: ssh_known_hosts: - present - user: root - enc: ecdsa - fingerprint: 4e:94:b0:54:c1:5b:29:a2:70:0e:e1:a3:51:ee:ee:e3 git-website-staging: git.latest: - name: git@gitlab.example.com:user/website.git - rev: gh-pages - target: /usr/share/nginx/staging - identity: /root/.ssh/website_id_rsa - require: - pkg: git - ssh_known_hosts: gitlab.example.com git-website-staging: git.latest: - name: git@gitlab.example.com:user/website.git - rev: gh-pages - target: /usr/share/nginx/staging - identity: salt://website/id_rsa - require: - pkg: git - ssh_known_hosts: gitlab.example.com git-website-prod: git.latest: - name: git@gitlab.example.com:user/website.git - rev: gh-pages-prod - target: /usr/share/nginx/prod - identity: /root/.ssh/website_id_rsa - require: - pkg: git - ssh_known_hosts: gitlab.example.com ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} kwargs = salt.utils.args.clean_kwargs(**kwargs) if kwargs: return _fail( ret, salt.utils.args.invalid_kwargs(kwargs, raise_exc=False) ) if not remote: return _fail(ret, '\'remote\' argument is required') if not target: return _fail(ret, '\'target\' argument is required') if not rev: return _fail( ret, '\'{0}\' is not a valid value for the \'rev\' argument'.format(rev) ) if force_reset not in (True, False, 'remote-changes'): return _fail( ret, '\'force_reset\' must be one of True, False, or \'remote-changes\'' ) # Ensure that certain arguments are strings to ensure that comparisons work if not isinstance(rev, six.string_types): rev = six.text_type(rev) if target is not None: if not isinstance(target, six.string_types): target = six.text_type(target) if not os.path.isabs(target): return _fail( ret, 'target \'{0}\' is not an absolute path'.format(target) ) if branch is not None and not isinstance(branch, six.string_types): branch = six.text_type(branch) if user is not None and not isinstance(user, six.string_types): user = six.text_type(user) if password is not None and not isinstance(password, six.string_types): password = six.text_type(password) if remote is not None and not isinstance(remote, six.string_types): remote = six.text_type(remote) if identity is not None: if isinstance(identity, six.string_types): identity = [identity] elif not isinstance(identity, list): return _fail(ret, 'identity must be either a list or a string') identity = [os.path.expanduser(x) for x in identity] for ident_path in identity: if 'salt://' in ident_path: try: ident_path = __salt__['cp.cache_file'](ident_path, __env__) except IOError as exc: log.exception('Failed to cache %s', ident_path) return _fail( ret, 'identity \'{0}\' does not exist.'.format( ident_path ) ) if not os.path.isabs(ident_path): return _fail( ret, 'identity \'{0}\' is not an absolute path'.format( ident_path ) ) if https_user is not None and not isinstance(https_user, six.string_types): https_user = six.text_type(https_user) if https_pass is not None and not isinstance(https_pass, six.string_types): https_pass = six.text_type(https_pass) # Check for lfs filter settings, and setup lfs_opts accordingly. These opts # will be passed where appropriate to ensure that these commands are # authenticated and that the git LFS plugin can download files. use_lfs = bool( __salt__['git.config_get_regexp']( r'filter\.lfs\.', **{'global': True})) lfs_opts = {'identity': identity} if use_lfs else {} if os.path.isfile(target): return _fail( ret, 'Target \'{0}\' exists and is a regular file, cannot proceed' .format(target) ) try: desired_fetch_url = salt.utils.url.add_http_basic_auth( name, https_user, https_pass, https_only=True ) except ValueError as exc: return _fail(ret, exc.__str__()) redacted_fetch_url = \ salt.utils.url.redact_http_basic_auth(desired_fetch_url) if mirror: bare = True # Check to make sure rev and mirror/bare are not both in use if rev != 'HEAD' and bare: return _fail(ret, ('\'rev\' is not compatible with the \'mirror\' and ' '\'bare\' arguments')) run_check_cmd_kwargs = {'runas': user, 'password': password} if 'shell' in __grains__: run_check_cmd_kwargs['shell'] = __grains__['shell'] # check if git.latest should be applied cret = mod_run_check( run_check_cmd_kwargs, onlyif, unless ) if isinstance(cret, dict): ret.update(cret) return ret refspecs = [ 'refs/heads/{0}:refs/remotes/{1}/{0}'.format(refspec_branch, remote), '+refs/tags/{0}:refs/tags/{0}'.format(refspec_tag) ] if fetch_tags else [] log.info('Checking remote revision for %s', name) try: all_remote_refs = __salt__['git.remote_refs']( name, heads=False, tags=False, user=user, password=password, identity=identity, https_user=https_user, https_pass=https_pass, ignore_retcode=False, saltenv=__env__, output_encoding=output_encoding) except CommandExecutionError as exc: return _fail( ret, 'Failed to check remote refs: {0}'.format(_strip_exc(exc)) ) except NameError as exc: if 'global name' in exc.message: raise CommandExecutionError( 'Failed to check remote refs: You may need to install ' 'GitPython or PyGit2') raise if 'HEAD' in all_remote_refs: head_rev = all_remote_refs['HEAD'] for refname, refsha in six.iteritems(all_remote_refs): if refname.startswith('refs/heads/'): if refsha == head_rev: default_branch = refname.partition('refs/heads/')[-1] break else: default_branch = None else: head_rev = None default_branch = None desired_upstream = False if bare: remote_rev = None remote_rev_type = None else: if rev == 'HEAD': if head_rev is not None: remote_rev = head_rev # Just go with whatever the upstream currently is desired_upstream = None remote_rev_type = 'sha1' else: # Empty remote repo remote_rev = None remote_rev_type = None elif 'refs/heads/' + rev in all_remote_refs: remote_rev = all_remote_refs['refs/heads/' + rev] desired_upstream = '/'.join((remote, rev)) remote_rev_type = 'branch' elif 'refs/tags/' + rev + '^{}' in all_remote_refs: # Annotated tag remote_rev = all_remote_refs['refs/tags/' + rev + '^{}'] remote_rev_type = 'tag' elif 'refs/tags/' + rev in all_remote_refs: # Non-annotated tag remote_rev = all_remote_refs['refs/tags/' + rev] remote_rev_type = 'tag' else: if len(rev) <= 40 \ and all(x in string.hexdigits for x in rev): # git ls-remote did not find the rev, and because it's a # hex string <= 40 chars we're going to assume that the # desired rev is a SHA1 rev = rev.lower() remote_rev = rev remote_rev_type = 'sha1' else: remote_rev = None remote_rev_type = None # For the comment field of the state return dict, the remote location # (and short-sha1, if rev is not a sha1) is referenced several times, # determine it once here and reuse the value below. if remote_rev_type == 'sha1': if rev == 'HEAD': remote_loc = 'remote HEAD (' + remote_rev[:7] + ')' else: remote_loc = remote_rev[:7] elif remote_rev is not None: remote_loc = '{0} ({1})'.format( desired_upstream if remote_rev_type == 'branch' else rev, remote_rev[:7] ) else: # Shouldn't happen but log a warning here for future # troubleshooting purposes in the event we find a corner case. log.warning( 'Unable to determine remote_loc. rev is %s, remote_rev is ' '%s, remove_rev_type is %s, desired_upstream is %s, and bare ' 'is%s set', rev, remote_rev, remote_rev_type, desired_upstream, ' not' if not bare else '' ) remote_loc = None if depth is not None and remote_rev_type not in ('branch', 'tag'): return _fail( ret, 'When \'depth\' is used, \'rev\' must be set to the name of a ' 'branch or tag on the remote repository' ) if remote_rev is None and not bare: if rev != 'HEAD': # A specific rev is desired, but that rev doesn't exist on the # remote repo. return _fail( ret, 'No revision matching \'{0}\' exists in the remote ' 'repository'.format(rev) ) git_ver = _LooseVersion(__salt__['git.version'](versioninfo=False)) check = 'refs' if bare else '.git' gitdir = os.path.join(target, check) comments = [] if os.path.isdir(gitdir) \ or __salt__['git.is_worktree']( target, user=user, password=password, output_encoding=output_encoding): # Target directory is a git repository or git worktree try: all_local_branches = __salt__['git.list_branches']( target, user=user, password=password, output_encoding=output_encoding) all_local_tags = set( __salt__['git.list_tags']( target, user=user, password=password, output_encoding=output_encoding ) ) local_rev, local_branch = _get_local_rev_and_branch( target, user, password, output_encoding) if not bare and remote_rev is None and local_rev is not None: return _fail( ret, 'Remote repository is empty, cannot update from a ' 'non-empty to an empty repository' ) # Base rev and branch are the ones from which any reset or merge # will take place. If the branch is not being specified, the base # will be the "local" rev and branch, i.e. those we began with # before this state was run. If a branch is being specified and it # both exists and is not the one with which we started, then we'll # be checking that branch out first, and it instead becomes our # base. The base branch and rev will be used below in comparisons # to determine what changes to make. base_rev = local_rev base_branch = local_branch if _need_branch_change(branch, local_branch): if branch not in all_local_branches: # We're checking out a new branch, so the base_rev and # remote_rev will be identical. base_rev = remote_rev else: base_branch = branch # Desired branch exists locally and is not the current # branch. We'll be performing a checkout to that branch # eventually, but before we do that we need to find the # current SHA1. try: base_rev = __salt__['git.rev_parse']( target, branch + '^{commit}', user=user, password=password, ignore_retcode=True, output_encoding=output_encoding) except CommandExecutionError as exc: return _fail( ret, 'Unable to get position of local branch \'{0}\': ' '{1}'.format(branch, _strip_exc(exc)), comments ) remotes = __salt__['git.remotes'](target, user=user, password=password, redact_auth=False, output_encoding=output_encoding) revs_match = _revs_equal(local_rev, remote_rev, remote_rev_type) try: # If not a bare repo, check `git diff HEAD` to determine if # there are local changes. local_changes = bool( not bare and __salt__['git.diff'](target, 'HEAD', user=user, password=password, output_encoding=output_encoding) ) except CommandExecutionError: # No need to capture the error and log it, the _git_run() # helper in the git execution module will have already logged # the output from the command. log.warning( 'git.latest: Unable to determine if %s has local changes', target ) local_changes = False if local_changes and revs_match: if force_reset is True: msg = ( '{0} is up-to-date, but with uncommitted changes. ' 'Since \'force_reset\' is set to True, these local ' 'changes would be reset. To only reset when there are ' 'changes in the remote repository, set ' '\'force_reset\' to \'remote-changes\'.'.format(target) ) if __opts__['test']: ret['changes']['forced update'] = True if comments: msg += _format_comments(comments) return _neutral_test(ret, msg) log.debug(msg.replace('would', 'will')) else: log.debug( '%s up-to-date, but with uncommitted changes. Since ' '\'force_reset\' is set to %s, no changes will be ' 'made.', target, force_reset ) return _uptodate(ret, target, _format_comments(comments), local_changes) if remote_rev_type == 'sha1' \ and base_rev is not None \ and base_rev.startswith(remote_rev): # Either we're already checked out to the branch we need and it # is up-to-date, or the branch to which we need to switch is # on the same SHA1 as the desired remote revision. Either way, # we know we have the remote rev present already and no fetch # will be needed. has_remote_rev = True else: has_remote_rev = False if remote_rev is not None: try: __salt__['git.rev_parse']( target, remote_rev + '^{commit}', user=user, password=password, ignore_retcode=True, output_encoding=output_encoding) except CommandExecutionError: # Local checkout doesn't have the remote_rev pass else: # The object might exist enough to get a rev-parse to # work, while the local ref could have been # deleted/changed/force updated. Do some further sanity # checks to determine if we really do have the # remote_rev. if remote_rev_type == 'branch': if remote in remotes: try: # Do a rev-parse on <remote>/<rev> to get # the local SHA1 for it, so we can compare # it to the remote_rev SHA1. local_copy = __salt__['git.rev_parse']( target, desired_upstream, user=user, password=password, ignore_retcode=True, output_encoding=output_encoding) except CommandExecutionError: pass else: # If the SHA1s don't match, then the remote # branch was force-updated, and we need to # fetch to update our local copy the ref # for the remote branch. If they do match, # then we have the remote_rev and don't # need to fetch. if local_copy == remote_rev: has_remote_rev = True elif remote_rev_type == 'tag': if rev in all_local_tags: try: local_tag_sha1 = __salt__['git.rev_parse']( target, rev + '^{commit}', user=user, password=password, ignore_retcode=True, output_encoding=output_encoding) except CommandExecutionError: # Shouldn't happen if the tag exists # locally but account for this just in # case. local_tag_sha1 = None if local_tag_sha1 == remote_rev: has_remote_rev = True else: if not force_reset: # SHA1 of tag on remote repo is # different than local tag. Unless # we're doing a hard reset then we # don't need to proceed as we know that # the fetch will update the tag and the # only way to make the state succeed is # to reset the branch to point at the # tag's new location. return _fail( ret, '\'{0}\' is a tag, but the remote ' 'SHA1 for this tag ({1}) doesn\'t ' 'match the local SHA1 ({2}). Set ' '\'force_reset\' to True to force ' 'this update.'.format( rev, _short_sha(remote_rev), _short_sha(local_tag_sha1) ) ) elif remote_rev_type == 'sha1': has_remote_rev = True # If fast_forward is not boolean, then we don't yet know if this # will be a fast forward or not, because a fetch is required. fast_forward = False \ if (local_changes and force_reset != 'remote-changes') \ else None if has_remote_rev: if (not revs_match and not update_head) \ and (branch is None or branch == local_branch): ret['comment'] = ( '{0} is already present and local HEAD ({1}) does not ' 'match, but update_head=False. HEAD has not been ' 'updated locally.'.format( remote_loc.capitalize() if rev == 'HEAD' else remote_loc, local_rev[:7] ) ) return ret # No need to check if this is a fast_forward if we already know # that it won't be (due to local changes). if fast_forward is not False: if base_rev is None: # If we're here, the remote_rev exists in the local # checkout but there is still no HEAD locally. A # possible reason for this is that an empty repository # existed there and a remote was added and fetched, but # the repository was not fast-forwarded. Regardless, # going from no HEAD to a locally-present rev is # considered a fast-forward update. fast_forward = True else: fast_forward = __salt__['git.merge_base']( target, refs=[base_rev, remote_rev], is_ancestor=True, user=user, password=password, ignore_retcode=True, output_encoding=output_encoding) if fast_forward is False: if force_reset is False: return _not_fast_forward( ret, rev, base_rev, remote_rev, branch, local_branch, default_branch, local_changes, comments) merge_action = 'hard-reset' elif fast_forward is True: merge_action = 'fast-forwarded' else: merge_action = 'updated' if base_branch is None: # No local branch, no upstream tracking branch upstream = None else: try: upstream = __salt__['git.rev_parse']( target, base_branch + '@{upstream}', opts=['--abbrev-ref'], user=user, password=password, ignore_retcode=True, output_encoding=output_encoding) except CommandExecutionError: # There is a local branch but the rev-parse command # failed, so that means there is no upstream tracking # branch. This could be because it is just not set, or # because the branch was checked out to a SHA1 or tag # instead of a branch. Set upstream to False to make a # distinction between the case above where there is no # local_branch (when the local checkout is an empty # repository). upstream = False if remote in remotes: fetch_url = remotes[remote]['fetch'] else: log.debug( 'Remote \'%s\' not found in git checkout at %s', remote, target ) fetch_url = None if remote_rev is not None and desired_fetch_url != fetch_url: if __opts__['test']: actions = [ 'Remote \'{0}\' would be changed from {1} to {2}' .format( remote, salt.utils.url.redact_http_basic_auth(fetch_url), redacted_fetch_url ) ] if not has_remote_rev: actions.append('Remote would be fetched') if not revs_match: if update_head: ret['changes']['revision'] = { 'old': local_rev, 'new': remote_rev } if fast_forward is False: ret['changes']['forced update'] = True actions.append( 'Repository would be {0} to {1}'.format( merge_action, _short_sha(remote_rev) ) ) if ret['changes']: return _neutral_test(ret, _format_comments(actions)) else: if not revs_match and not update_head: # Repo content would not be modified but the remote # URL would be modified, so we can't just say that # the repo is up-to-date, we need to inform the # user of the actions taken. ret['comment'] = _format_comments(actions) return ret return _uptodate(ret, target, _format_comments(actions)) # The fetch_url for the desired remote does not match the # specified URL (or the remote does not exist), so set the # remote URL. __salt__['git.remote_set'](target, url=name, remote=remote, user=user, password=password, https_user=https_user, https_pass=https_pass, output_encoding=output_encoding) if fetch_url is None: comments.append( 'Remote \'{0}\' set to {1}'.format( remote, redacted_fetch_url ) ) ret['changes']['new'] = name + ' => ' + remote else: comments.append( 'Remote \'{0}\' changed from {1} to {2}'.format( remote, salt.utils.url.redact_http_basic_auth(fetch_url), redacted_fetch_url ) ) if remote_rev is not None: if __opts__['test']: actions = [] if not has_remote_rev: actions.append( 'Remote \'{0}\' would be fetched'.format(remote) ) if (not revs_match) \ and (update_head or (branch is not None and branch != local_branch)): ret['changes']['revision'] = { 'old': local_rev, 'new': remote_rev } if _need_branch_change(branch, local_branch): if branch not in all_local_branches: actions.append( 'New branch \'{0}\' would be checked ' 'out, with {1} as a starting ' 'point'.format(branch, remote_loc) ) if desired_upstream: actions.append( 'Tracking branch would be set to {0}' .format(desired_upstream) ) else: actions.append( 'Branch \'{0}\' would be checked out ' 'and {1} to {2}'.format( branch, merge_action, _short_sha(remote_rev) ) ) else: if not revs_match: if update_head: if fast_forward is True: actions.append( 'Repository would be fast-forwarded from ' '{0} to {1}'.format( _short_sha(local_rev), _short_sha(remote_rev) ) ) else: actions.append( 'Repository would be {0} from {1} to {2}' .format( 'hard-reset' if force_reset and has_remote_rev else 'updated', _short_sha(local_rev), _short_sha(remote_rev) ) ) else: actions.append( 'Local HEAD ({0}) does not match {1} but ' 'update_head=False, HEAD would not be ' 'updated locally'.format( local_rev[:7], remote_loc ) ) # Check if upstream needs changing if not upstream and desired_upstream: actions.append( 'Tracking branch would be set to {0}'.format( desired_upstream ) ) elif upstream and desired_upstream is False: actions.append( 'Tracking branch would be unset' ) elif desired_upstream and upstream != desired_upstream: actions.append( 'Tracking branch would be ' 'updated to {0}'.format(desired_upstream) ) if ret['changes']: return _neutral_test(ret, _format_comments(actions)) else: formatted_actions = _format_comments(actions) if not revs_match \ and not update_head \ and formatted_actions: ret['comment'] = formatted_actions return ret return _uptodate(ret, target, _format_comments(actions)) if not upstream and desired_upstream: upstream_action = ( 'Tracking branch was set to {0}'.format( desired_upstream ) ) branch_opts = _get_branch_opts( branch, local_branch, all_local_branches, desired_upstream, git_ver) elif upstream and desired_upstream is False: # If the remote_rev is a tag or SHA1, and there is an # upstream tracking branch, we will unset it. However, we # can only do this if the git version is 1.8.0 or newer, as # the --unset-upstream option was not added until that # version. if git_ver >= _LooseVersion('1.8.0'): upstream_action = 'Tracking branch was unset' branch_opts = ['--unset-upstream'] else: branch_opts = None elif desired_upstream and upstream != desired_upstream: upstream_action = ( 'Tracking branch was updated to {0}'.format( desired_upstream ) ) branch_opts = _get_branch_opts( branch, local_branch, all_local_branches, desired_upstream, git_ver) else: branch_opts = None if branch_opts is not None and local_branch is None: return _fail( ret, 'Cannot set/unset upstream tracking branch, local ' 'HEAD refers to nonexistent branch. This may have ' 'been caused by cloning a remote repository for which ' 'the default branch was renamed or deleted. If you ' 'are unable to fix the remote repository, you can ' 'work around this by setting the \'branch\' argument ' '(which will ensure that the named branch is created ' 'if it does not already exist).', comments ) remote_tags = set([ x.replace('refs/tags/', '') for x in __salt__['git.ls_remote']( cwd=target, remote=remote, opts="--tags", user=user, password=password, identity=identity, saltenv=__env__, ignore_retcode=True, output_encoding=output_encoding) if '^{}' not in x ]) if all_local_tags != remote_tags: has_remote_rev = False new_tags = remote_tags - all_local_tags deleted_tags = all_local_tags - remote_tags if new_tags: ret['changes']['new_tags'] = new_tags if sync_tags and deleted_tags: # Delete the local copy of the tags to keep up with the # remote repository. for tag_name in deleted_tags: try: if not __opts__['test']: __salt__['git.tag']( target, tag_name, opts='-d', user=user, password=password, output_encoding=output_encoding) except CommandExecutionError as exc: ret.setdefault('warnings', []).append( 'Failed to remove local tag \'{0}\':\n\n' '{1}\n\n'.format(tag_name, exc) ) else: ret['changes'].setdefault( 'deleted_tags', []).append(tag_name) if ret['changes'].get('deleted_tags'): comments.append( 'The following tags {0} removed from the local ' 'checkout: {1}'.format( 'would be' if __opts__['test'] else 'were', ', '.join(ret['changes']['deleted_tags']) ) ) if not has_remote_rev: try: fetch_changes = __salt__['git.fetch']( target, remote=remote, force=force_fetch, refspecs=refspecs, user=user, password=password, identity=identity, saltenv=__env__, output_encoding=output_encoding) except CommandExecutionError as exc: return _failed_fetch(ret, exc, comments) else: if fetch_changes: comments.append( '{0} was fetched, resulting in updated ' 'refs'.format(name) ) try: __salt__['git.rev_parse']( target, remote_rev + '^{commit}', user=user, password=password, ignore_retcode=True, output_encoding=output_encoding) except CommandExecutionError as exc: return _fail( ret, 'Fetch did not successfully retrieve rev \'{0}\' ' 'from {1}: {2}'.format(rev, name, exc) ) if (not revs_match and not update_head) \ and (branch is None or branch == local_branch): # Rev now exists locally (was fetched), and since we're # not updating HEAD we'll just exit here. ret['comment'] = remote_loc.capitalize() \ if rev == 'HEAD' \ else remote_loc ret['comment'] += ( ' is already present and local HEAD ({0}) does not ' 'match, but update_head=False. HEAD has not been ' 'updated locally.'.format(local_rev[:7]) ) return ret # Now that we've fetched, check again whether or not # the update is a fast-forward. if base_rev is None: fast_forward = True else: fast_forward = __salt__['git.merge_base']( target, refs=[base_rev, remote_rev], is_ancestor=True, user=user, password=password, output_encoding=output_encoding) if fast_forward is force_reset is False \ or (fast_forward is True and local_changes and force_reset is False): return _not_fast_forward( ret, rev, base_rev, remote_rev, branch, local_branch, default_branch, local_changes, comments) if _need_branch_change(branch, local_branch): if local_changes and not force_checkout: return _fail( ret, 'Local branch \'{0}\' has uncommitted ' 'changes. Set \'force_checkout\' to True to ' 'discard them and proceed.'.format(local_branch) ) # TODO: Maybe re-retrieve all_local_branches to handle # the corner case where the destination branch was # added to the local checkout during a fetch that takes # a long time to complete. if branch not in all_local_branches: if rev == 'HEAD': checkout_rev = remote_rev else: checkout_rev = desired_upstream \ if desired_upstream \ else rev checkout_opts = ['-b', branch] else: checkout_rev = branch checkout_opts = [] __salt__['git.checkout'](target, checkout_rev, force=force_checkout, opts=checkout_opts, user=user, password=password, output_encoding=output_encoding) if '-b' in checkout_opts: comments.append( 'New branch \'{0}\' was checked out, with {1} ' 'as a starting point'.format( branch, remote_loc ) ) else: comments.append( '\'{0}\' was checked out'.format(checkout_rev) ) if fast_forward is False: __salt__['git.reset']( target, opts=['--hard', remote_rev], user=user, password=password, output_encoding=output_encoding, **lfs_opts) ret['changes']['forced update'] = True if local_changes: comments.append('Uncommitted changes were discarded') comments.append( 'Repository was hard-reset to {0}'.format(remote_loc) ) elif fast_forward is True \ and local_changes \ and force_reset is not False: __salt__['git.discard_local_changes']( target, user=user, password=password, output_encoding=output_encoding) comments.append('Uncommitted changes were discarded') if branch_opts is not None: __salt__['git.branch']( target, opts=branch_opts, user=user, password=password, output_encoding=output_encoding) comments.append(upstream_action) # Fast-forward to the desired revision if fast_forward is True \ and not _revs_equal(base_rev, remote_rev, remote_rev_type): if desired_upstream or rev == 'HEAD': # Check first to see if we are on a branch before # trying to merge changes. (The call to # git.symbolic_ref will only return output if HEAD # points to a branch.) if __salt__['git.symbolic_ref']( target, 'HEAD', opts=['--quiet'], user=user, password=password, ignore_retcode=True, output_encoding=output_encoding): if git_ver >= _LooseVersion('1.8.1.6'): # --ff-only added in version 1.8.1.6. It's not # 100% necessary, but if we can use it, we'll # ensure that the merge doesn't go through if # not a fast-forward. Granted, the logic that # gets us to this point shouldn't allow us to # attempt this merge if it's not a # fast-forward, but it's an extra layer of # protection. merge_opts = ['--ff-only'] else: merge_opts = [] __salt__['git.merge']( target, rev=remote_rev, opts=merge_opts, user=user, password=password, output_encoding=output_encoding, **lfs_opts) comments.append( 'Repository was fast-forwarded to {0}' .format(remote_loc) ) else: return _fail( ret, 'Unable to fast-forward, HEAD is detached', comments ) else: # Update is a fast forward, but we cannot merge to that # commit so we'll reset to it. __salt__['git.reset']( target, opts=['--hard', remote_rev if rev == 'HEAD' else rev], user=user, password=password, output_encoding=output_encoding, **lfs_opts) comments.append( 'Repository was reset to {0} (fast-forward)' .format(rev) ) # TODO: Figure out how to add submodule update info to # test=True return data, and changes dict. if submodules: try: __salt__['git.submodule']( target, 'update', opts=['--init', '--recursive'], user=user, password=password, identity=identity, saltenv=__env__, output_encoding=output_encoding) except CommandExecutionError as exc: return _failed_submodule_update(ret, exc, comments) elif bare: if __opts__['test']: msg = ( 'Bare repository at {0} would be fetched' .format(target) ) if ret['changes']: return _neutral_test(ret, msg) else: return _uptodate(ret, target, msg) try: fetch_changes = __salt__['git.fetch']( target, remote=remote, force=force_fetch, refspecs=refspecs, user=user, password=password, identity=identity, saltenv=__env__, output_encoding=output_encoding) except CommandExecutionError as exc: return _failed_fetch(ret, exc, comments) else: comments.append( 'Bare repository at {0} was fetched{1}'.format( target, ', resulting in updated refs' if fetch_changes else '' ) ) try: new_rev = __salt__['git.revision']( cwd=target, user=user, password=password, ignore_retcode=True, output_encoding=output_encoding) except CommandExecutionError: new_rev = None except Exception as exc: log.error( 'Unexpected exception in git.latest state', exc_info=True ) if isinstance(exc, CommandExecutionError): msg = _strip_exc(exc) else: msg = six.text_type(exc) return _fail(ret, msg, comments) if not bare and not _revs_equal(new_rev, remote_rev, remote_rev_type): return _fail(ret, 'Failed to update repository', comments) if local_rev != new_rev: log.info( 'Repository %s updated: %s => %s', target, local_rev, new_rev ) ret['comment'] = _format_comments(comments) ret['changes']['revision'] = {'old': local_rev, 'new': new_rev} else: return _uptodate(ret, target, _format_comments(comments)) else: if os.path.isdir(target): target_contents = os.listdir(target) if force_clone: # Clone is required, and target directory exists, but the # ``force`` option is enabled, so we need to clear out its # contents to proceed. if __opts__['test']: ret['changes']['forced clone'] = True ret['changes']['new'] = name + ' => ' + target return _neutral_test( ret, 'Target directory {0} exists. Since force_clone=True, ' 'the contents of {0} would be deleted, and {1} would ' 'be cloned into this directory.'.format(target, name) ) log.debug( 'Removing contents of %s to clone repository %s in its ' 'place (force_clone=True set in git.latest state)', target, name ) removal_errors = {} for target_object in target_contents: target_path = os.path.join(target, target_object) try: salt.utils.files.rm_rf(target_path) except OSError as exc: if exc.errno != errno.ENOENT: removal_errors[target_path] = exc if removal_errors: err_strings = [ ' {0}\n {1}'.format(k, v) for k, v in six.iteritems(removal_errors) ] return _fail( ret, 'Unable to remove\n{0}'.format('\n'.join(err_strings)), comments ) ret['changes']['forced clone'] = True # Clone is required, but target dir exists and is non-empty. We # can't proceed. elif target_contents: return _fail( ret, 'Target \'{0}\' exists, is non-empty and is not a git ' 'repository. Set the \'force_clone\' option to True to ' 'remove this directory\'s contents and proceed with ' 'cloning the remote repository'.format(target) ) log.debug('Target %s is not found, \'git clone\' is required', target) if __opts__['test']: ret['changes']['new'] = name + ' => ' + target return _neutral_test( ret, 'Repository {0} would be cloned to {1}'.format( name, target ) ) try: clone_opts = ['--mirror'] if mirror else ['--bare'] if bare else [] if remote != 'origin': clone_opts.extend(['--origin', remote]) if depth is not None: clone_opts.extend(['--depth', six.text_type(depth), '--branch', rev]) # We're cloning a fresh repo, there is no local branch or revision local_branch = local_rev = None try: __salt__['git.clone'](target, name, user=user, password=password, opts=clone_opts, identity=identity, https_user=https_user, https_pass=https_pass, saltenv=__env__, output_encoding=output_encoding) except CommandExecutionError as exc: msg = 'Clone failed: {0}'.format(_strip_exc(exc)) return _fail(ret, msg, comments) ret['changes']['new'] = name + ' => ' + target comments.append( '{0} cloned to {1}{2}'.format( name, target, ' as mirror' if mirror else ' as bare repository' if bare else '' ) ) if not bare: if not remote_rev: if rev != 'HEAD': # No HEAD means the remote repo is empty, which means # our new clone will also be empty. This state has # failed, since a rev was specified but no matching rev # exists on the remote host. msg = ( '%s was cloned but is empty, so {0}/{1} ' 'cannot be checked out'.format(remote, rev) ) log.error(msg, name) # Disable check for string substitution return _fail(ret, msg % 'Repository', comments) # pylint: disable=E1321 else: if remote_rev_type == 'tag' \ and rev not in __salt__['git.list_tags']( target, user=user, password=password, output_encoding=output_encoding): return _fail( ret, 'Revision \'{0}\' does not exist in clone' .format(rev), comments ) if branch is not None: if branch not in \ __salt__['git.list_branches']( target, user=user, password=password, output_encoding=output_encoding): if rev == 'HEAD': checkout_rev = remote_rev else: checkout_rev = desired_upstream \ if desired_upstream \ else rev __salt__['git.checkout']( target, checkout_rev, opts=['-b', branch], user=user, password=password, output_encoding=output_encoding) comments.append( 'Branch \'{0}\' checked out, with {1} ' 'as a starting point'.format( branch, remote_loc ) ) local_rev, local_branch = _get_local_rev_and_branch( target, user, password, output_encoding=output_encoding) if local_branch is None \ and remote_rev is not None \ and 'HEAD' not in all_remote_refs: return _fail( ret, 'Remote HEAD refers to a ref that does not exist. ' 'This can happen when the default branch on the ' 'remote repository is renamed or deleted. If you ' 'are unable to fix the remote repository, you can ' 'work around this by setting the \'branch\' argument ' '(which will ensure that the named branch is created ' 'if it does not already exist).', comments ) if not _revs_equal(local_rev, remote_rev, remote_rev_type): __salt__['git.reset']( target, opts=['--hard', remote_rev], user=user, password=password, output_encoding=output_encoding) comments.append( 'Repository was reset to {0}'.format(remote_loc) ) try: upstream = __salt__['git.rev_parse']( target, local_branch + '@{upstream}', opts=['--abbrev-ref'], user=user, password=password, ignore_retcode=True, output_encoding=output_encoding) except CommandExecutionError: upstream = False if not upstream and desired_upstream: upstream_action = ( 'Tracking branch was set to {0}'.format( desired_upstream ) ) branch_opts = _get_branch_opts( branch, local_branch, __salt__['git.list_branches']( target, user=user, password=password, output_encoding=output_encoding), desired_upstream, git_ver) elif upstream and desired_upstream is False: # If the remote_rev is a tag or SHA1, and there is an # upstream tracking branch, we will unset it. However, # we can only do this if the git version is 1.8.0 or # newer, as the --unset-upstream option was not added # until that version. if git_ver >= _LooseVersion('1.8.0'): upstream_action = 'Tracking branch was unset' branch_opts = ['--unset-upstream'] else: branch_opts = None elif desired_upstream and upstream != desired_upstream: upstream_action = ( 'Tracking branch was updated to {0}'.format( desired_upstream ) ) branch_opts = _get_branch_opts( branch, local_branch, __salt__['git.list_branches']( target, user=user, password=password, output_encoding=output_encoding), desired_upstream, git_ver) else: branch_opts = None if branch_opts is not None: __salt__['git.branch']( target, opts=branch_opts, user=user, password=password, output_encoding=output_encoding) comments.append(upstream_action) if submodules and remote_rev: try: __salt__['git.submodule']( target, 'update', opts=['--init', '--recursive'], user=user, password=password, identity=identity, output_encoding=output_encoding) except CommandExecutionError as exc: return _failed_submodule_update(ret, exc, comments) try: new_rev = __salt__['git.revision']( cwd=target, user=user, password=password, ignore_retcode=True, output_encoding=output_encoding) except CommandExecutionError: new_rev = None except Exception as exc: log.error( 'Unexpected exception in git.latest state', exc_info=True ) if isinstance(exc, CommandExecutionError): msg = _strip_exc(exc) else: msg = six.text_type(exc) return _fail(ret, msg, comments) msg = _format_comments(comments) log.info(msg) ret['comment'] = msg if new_rev is not None: ret['changes']['revision'] = {'old': None, 'new': new_rev} return ret def present(name, force=False, bare=True, template=None, separate_git_dir=None, shared=None, user=None, password=None, output_encoding=None): ''' Ensure that a repository exists in the given directory .. warning:: If the minion has Git 2.5 or later installed, ``name`` points to a worktree_, and ``force`` is set to ``True``, then the worktree will be deleted. This has been corrected in Salt 2015.8.0. name Path to the directory .. versionchanged:: 2015.8.0 This path must now be absolute force : False If ``True``, and if ``name`` points to an existing directory which does not contain a git repository, then the contents of that directory will be recursively removed and a new repository will be initialized in its place. bare : True If ``True``, and a repository must be initialized, then the repository will be a bare repository. .. note:: This differs from the default behavior of :py:func:`git.init <salt.modules.git.init>`, make sure to set this value to ``False`` if a bare repo is not desired. template If a new repository is initialized, this argument will specify an alternate template directory. .. versionadded:: 2015.8.0 separate_git_dir If a new repository is initialized, this argument will specify an alternate ``$GIT_DIR`` .. versionadded:: 2015.8.0 shared Set sharing permissions on git repo. See `git-init(1)`_ for more details. .. versionadded:: 2015.5.0 user User under which to run git commands. By default, commands are run by the user under which the minion is running. .. versionadded:: 0.17.0 password Windows only. Required when specifying ``user``. This parameter will be ignored on non-Windows platforms. .. versionadded:: 2016.3.4 output_encoding Use this option to specify which encoding to use to decode the output from any git commands which are run. This should not be needed in most cases. .. note:: This should only be needed if the files in the repository were created with filenames using an encoding other than UTF-8 to handle Unicode characters. .. versionadded:: 2018.3.1 .. _`git-init(1)`: http://git-scm.com/docs/git-init .. _`worktree`: http://git-scm.com/docs/git-worktree ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} # If the named directory is a git repo return True if os.path.isdir(name): if bare and os.path.isfile(os.path.join(name, 'HEAD')): return ret elif not bare and \ (os.path.isdir(os.path.join(name, '.git')) or __salt__['git.is_worktree'](name, user=user, password=password, output_encoding=output_encoding)): return ret # Directory exists and is not a git repo, if force is set destroy the # directory and recreate, otherwise throw an error elif force: # Directory exists, and the ``force`` option is enabled, so we need # to clear out its contents to proceed. if __opts__['test']: ret['changes']['new'] = name ret['changes']['forced init'] = True return _neutral_test( ret, 'Target directory {0} exists. Since force=True, the ' 'contents of {0} would be deleted, and a {1}repository ' 'would be initialized in its place.' .format(name, 'bare ' if bare else '') ) log.debug( 'Removing contents of %s to initialize %srepository in its ' 'place (force=True set in git.present state)', name, 'bare ' if bare else '' ) try: if os.path.islink(name): os.unlink(name) else: salt.utils.files.rm_rf(name) except OSError as exc: return _fail( ret, 'Unable to remove {0}: {1}'.format(name, exc) ) else: ret['changes']['forced init'] = True elif os.listdir(name): return _fail( ret, 'Target \'{0}\' exists, is non-empty, and is not a git ' 'repository. Set the \'force\' option to True to remove ' 'this directory\'s contents and proceed with initializing a ' 'repository'.format(name) ) # Run test is set if __opts__['test']: ret['changes']['new'] = name return _neutral_test( ret, 'New {0}repository would be created'.format( 'bare ' if bare else '' ) ) __salt__['git.init'](cwd=name, bare=bare, template=template, separate_git_dir=separate_git_dir, shared=shared, user=user, password=password, output_encoding=output_encoding) actions = [ 'Initialized {0}repository in {1}'.format( 'bare ' if bare else '', name ) ] if template: actions.append('Template directory set to {0}'.format(template)) if separate_git_dir: actions.append('Gitdir set to {0}'.format(separate_git_dir)) message = '. '.join(actions) if len(actions) > 1: message += '.' log.info(message) ret['changes']['new'] = name ret['comment'] = message return ret def cloned(name, target=None, branch=None, user=None, password=None, identity=None, https_user=None, https_pass=None, output_encoding=None): ''' .. versionadded:: 2018.3.3,2019.2.0 Ensure that a repository has been cloned to the specified target directory. If not, clone that repository. No fetches will be performed once cloned. name Address of the remote repository target Name of the target directory where repository should be cloned branch Remote branch to check out. If unspecified, the default branch (i.e. the one to the remote HEAD points) will be checked out. .. note:: The local branch name will match the remote branch name. If the branch name is changed, then that branch will be checked out locally, but keep in mind that remote repository will not be fetched. If your use case requires that you keep the clone up to date with the remote repository, then consider using :py:func:`git.latest <salt.states.git.latest>`. user User under which to run git commands. By default, commands are run by the user under which the minion is running. password Windows only. Required when specifying ``user``. This parameter will be ignored on non-Windows platforms. identity Path to a private key to use for ssh URLs. Works the same way as in :py:func:`git.latest <salt.states.git.latest>`, see that state's documentation for more information. https_user HTTP Basic Auth username for HTTPS (only) clones https_pass HTTP Basic Auth password for HTTPS (only) clones output_encoding Use this option to specify which encoding to use to decode the output from any git commands which are run. This should not be needed in most cases. .. note:: This should only be needed if the files in the repository were created with filenames using an encoding other than UTF-8 to handle Unicode characters. ''' ret = {'name': name, 'result': False, 'comment': '', 'changes': {}} if target is None: ret['comment'] = '\'target\' argument is required' return ret elif not isinstance(target, six.string_types): target = six.text_type(target) if not os.path.isabs(target): ret['comment'] = '\'target\' path must be absolute' return ret if branch is not None: if not isinstance(branch, six.string_types): branch = six.text_type(branch) if not branch: ret['comment'] = 'Invalid \'branch\' argument' return ret if not os.path.exists(target): need_clone = True else: try: __salt__['git.status'](target, user=user, password=password, output_encoding=output_encoding) except Exception as exc: ret['comment'] = six.text_type(exc) return ret else: need_clone = False comments = [] def _clone_changes(ret): ret['changes']['new'] = name + ' => ' + target def _branch_changes(ret, old, new): ret['changes']['branch'] = {'old': old, 'new': new} if need_clone: if __opts__['test']: _clone_changes(ret) comment = '{0} would be cloned to {1}{2}'.format( name, target, ' with branch \'{0}\''.format(branch) if branch is not None else '' ) return _neutral_test(ret, comment) clone_opts = ['--branch', branch] if branch is not None else None try: __salt__['git.clone'](target, name, opts=clone_opts, user=user, password=password, identity=identity, https_user=https_user, https_pass=https_pass, output_encoding=output_encoding) except CommandExecutionError as exc: msg = 'Clone failed: {0}'.format(_strip_exc(exc)) return _fail(ret, msg, comments) comments.append( '{0} cloned to {1}{2}'.format( name, target, ' with branch \'{0}\''.format(branch) if branch is not None else '' ) ) _clone_changes(ret) ret['comment'] = _format_comments(comments) ret['result'] = True return ret else: if branch is None: return _already_cloned(ret, target, branch, comments) else: current_branch = __salt__['git.current_branch']( target, user=user, password=password, output_encoding=output_encoding) if current_branch == branch: return _already_cloned(ret, target, branch, comments) else: if __opts__['test']: _branch_changes(ret, current_branch, branch) return _neutral_test( ret, 'Branch would be changed to \'{0}\''.format(branch)) try: __salt__['git.rev_parse']( target, rev=branch, user=user, password=password, ignore_retcode=True, output_encoding=output_encoding) except CommandExecutionError: # Local head does not exist, so we need to check out a new # branch at the remote rev checkout_rev = '/'.join(('origin', branch)) checkout_opts = ['-b', branch] else: # Local head exists, so we just need to check it out checkout_rev = branch checkout_opts = None try: __salt__['git.checkout']( target, rev=checkout_rev, opts=checkout_opts, user=user, password=password, output_encoding=output_encoding) except CommandExecutionError as exc: msg = 'Failed to change branch to \'{0}\': {1}'.format(branch, exc) return _fail(ret, msg, comments) else: comments.append('Branch changed to \'{0}\''.format(branch)) _branch_changes(ret, current_branch, branch) ret['comment'] = _format_comments(comments) ret['result'] = True return ret def config_unset(name, value_regex=None, repo=None, user=None, password=None, output_encoding=None, **kwargs): r''' .. versionadded:: 2015.8.0 Ensure that the named config key is not present name The name of the configuration key to unset. This value can be a regex, but the regex must match the entire key name. For example, ``foo\.`` would not match all keys in the ``foo`` section, it would be necessary to use ``foo\..+`` to do so. value_regex Regex indicating the values to unset for the matching key(s) .. note:: This option behaves differently depending on whether or not ``all`` is set to ``True``. If it is, then all values matching the regex will be deleted (this is the only way to delete multiple values from a multivar). If ``all`` is set to ``False``, then this state will fail if the regex matches more than one value in a multivar. all : False If ``True``, unset all matches repo Location of the git repository for which the config value should be set. Required unless ``global`` is set to ``True``. user User under which to run git commands. By default, commands are run by the user under which the minion is running. password Windows only. Required when specifying ``user``. This parameter will be ignored on non-Windows platforms. .. versionadded:: 2016.3.4 global : False If ``True``, this will set a global git config option output_encoding Use this option to specify which encoding to use to decode the output from any git commands which are run. This should not be needed in most cases. .. note:: This should only be needed if the files in the repository were created with filenames using an encoding other than UTF-8 to handle Unicode characters. .. versionadded:: 2018.3.1 **Examples:** .. code-block:: yaml # Value matching 'baz' mylocalrepo: git.config_unset: - name: foo.bar - value_regex: 'baz' - repo: /path/to/repo # Ensure entire multivar is unset mylocalrepo: git.config_unset: - name: foo.bar - all: True # Ensure all variables in 'foo' section are unset, including multivars mylocalrepo: git.config_unset: - name: 'foo\..+' - all: True # Ensure that global config value is unset mylocalrepo: git.config_unset: - name: foo.bar - global: True ''' ret = {'name': name, 'changes': {}, 'result': True, 'comment': 'No matching keys are set'} # Sanitize kwargs and make sure that no invalid ones were passed. This # allows us to accept 'global' as an argument to this function without # shadowing global(), while also not allowing unwanted arguments to be # passed. kwargs = salt.utils.args.clean_kwargs(**kwargs) global_ = kwargs.pop('global', False) all_ = kwargs.pop('all', False) if kwargs: return _fail( ret, salt.utils.args.invalid_kwargs(kwargs, raise_exc=False) ) if not global_ and not repo: return _fail( ret, 'Non-global config options require the \'repo\' argument to be ' 'set' ) if not isinstance(name, six.string_types): name = six.text_type(name) if value_regex is not None: if not isinstance(value_regex, six.string_types): value_regex = six.text_type(value_regex) # Ensure that the key regex matches the full key name key = '^' + name.lstrip('^').rstrip('$') + '$' # Get matching keys/values pre_matches = __salt__['git.config_get_regexp']( cwd=repo, key=key, value_regex=value_regex, user=user, password=password, ignore_retcode=True, output_encoding=output_encoding, **{'global': global_} ) if not pre_matches: # No changes need to be made return ret # Perform sanity check on the matches. We can't proceed if the value_regex # matches more than one value in a given key, and 'all' is not set to True if not all_: greedy_matches = ['{0} ({1})'.format(x, ', '.join(y)) for x, y in six.iteritems(pre_matches) if len(y) > 1] if greedy_matches: if value_regex is not None: return _fail( ret, 'Multiple values are matched by value_regex for the ' 'following keys (set \'all\' to True to force removal): ' '{0}'.format('; '.join(greedy_matches)) ) else: return _fail( ret, 'Multivar(s) matched by the key expression (set \'all\' ' 'to True to force removal): {0}'.format( '; '.join(greedy_matches) ) ) if __opts__['test']: ret['changes'] = pre_matches return _neutral_test( ret, '{0} key(s) would have value(s) unset'.format(len(pre_matches)) ) if value_regex is None: pre = pre_matches else: # Get all keys matching the key expression, so we can accurately report # on changes made. pre = __salt__['git.config_get_regexp']( cwd=repo, key=key, value_regex=None, user=user, password=password, ignore_retcode=True, output_encoding=output_encoding, **{'global': global_} ) failed = [] # Unset the specified value(s). There is no unset for regexes so loop # through the pre_matches dict and unset each matching key individually. for key_name in pre_matches: try: __salt__['git.config_unset']( cwd=repo, key=name, value_regex=value_regex, all=all_, user=user, password=password, output_encoding=output_encoding, **{'global': global_} ) except CommandExecutionError as exc: msg = 'Failed to unset \'{0}\''.format(key_name) if value_regex is not None: msg += ' using value_regex \'{1}\'' msg += ': ' + _strip_exc(exc) log.error(msg) failed.append(key_name) if failed: return _fail( ret, 'Error(s) occurred unsetting values for the following keys (see ' 'the minion log for details): {0}'.format(', '.join(failed)) ) post = __salt__['git.config_get_regexp']( cwd=repo, key=key, value_regex=None, user=user, password=password, ignore_retcode=True, output_encoding=output_encoding, **{'global': global_} ) for key_name in pre: if key_name not in post: ret['changes'][key_name] = pre[key_name] unset = [x for x in pre[key_name] if x not in post[key_name]] if unset: ret['changes'][key_name] = unset if value_regex is None: post_matches = post else: post_matches = __salt__['git.config_get_regexp']( cwd=repo, key=key, value_regex=value_regex, user=user, password=password, ignore_retcode=True, output_encoding=output_encoding, **{'global': global_} ) if post_matches: failed = ['{0} ({1})'.format(x, ', '.join(y)) for x, y in six.iteritems(post_matches)] return _fail( ret, 'Failed to unset value(s): {0}'.format('; '.join(failed)) ) ret['comment'] = 'Value(s) successfully unset' return ret def config_set(name, value=None, multivar=None, repo=None, user=None, password=None, output_encoding=None, **kwargs): ''' .. versionadded:: 2014.7.0 .. versionchanged:: 2015.8.0 Renamed from ``git.config`` to ``git.config_set``. For earlier versions, use ``git.config``. Ensure that a config value is set to the desired value(s) name Name of the git config value to set value Set a single value for the config item multivar Set multiple values for the config item .. note:: The order matters here, if the same parameters are set but in a different order, they will be removed and replaced in the order specified. .. versionadded:: 2015.8.0 repo Location of the git repository for which the config value should be set. Required unless ``global`` is set to ``True``. user User under which to run git commands. By default, the commands are run by the user under which the minion is running. password Windows only. Required when specifying ``user``. This parameter will be ignored on non-Windows platforms. .. versionadded:: 2016.3.4 global : False If ``True``, this will set a global git config option output_encoding Use this option to specify which encoding to use to decode the output from any git commands which are run. This should not be needed in most cases. .. note:: This should only be needed if the files in the repository were created with filenames using an encoding other than UTF-8 to handle Unicode characters. .. versionadded:: 2018.3.1 **Local Config Example:** .. code-block:: yaml # Single value mylocalrepo: git.config_set: - name: user.email - value: foo@bar.net - repo: /path/to/repo # Multiple values mylocalrepo: git.config_set: - name: mysection.myattribute - multivar: - foo - bar - baz - repo: /path/to/repo **Global Config Example (User ``foo``):** .. code-block:: yaml mylocalrepo: git.config_set: - name: user.name - value: Foo Bar - user: foo - global: True ''' ret = {'name': name, 'changes': {}, 'result': True, 'comment': ''} if value is not None and multivar is not None: return _fail( ret, 'Only one of \'value\' and \'multivar\' is permitted' ) # Sanitize kwargs and make sure that no invalid ones were passed. This # allows us to accept 'global' as an argument to this function without # shadowing global(), while also not allowing unwanted arguments to be # passed. kwargs = salt.utils.args.clean_kwargs(**kwargs) global_ = kwargs.pop('global', False) if kwargs: return _fail( ret, salt.utils.args.invalid_kwargs(kwargs, raise_exc=False) ) if not global_ and not repo: return _fail( ret, 'Non-global config options require the \'repo\' argument to be ' 'set' ) if not isinstance(name, six.string_types): name = six.text_type(name) if value is not None: if not isinstance(value, six.string_types): value = six.text_type(value) value_comment = '\'' + value + '\'' desired = [value] if multivar is not None: if not isinstance(multivar, list): try: multivar = multivar.split(',') except AttributeError: multivar = six.text_type(multivar).split(',') else: new_multivar = [] for item in multivar: if isinstance(item, six.string_types): new_multivar.append(item) else: new_multivar.append(six.text_type(item)) multivar = new_multivar value_comment = multivar desired = multivar # Get current value pre = __salt__['git.config_get']( cwd=repo, key=name, user=user, password=password, ignore_retcode=True, output_encoding=output_encoding, **{'all': True, 'global': global_} ) if desired == pre: ret['comment'] = '{0}\'{1}\' is already set to {2}'.format( 'Global key ' if global_ else '', name, value_comment ) return ret if __opts__['test']: ret['changes'] = {'old': pre, 'new': desired} msg = '{0}\'{1}\' would be {2} {3}'.format( 'Global key ' if global_ else '', name, 'added as' if pre is None else 'set to', value_comment ) return _neutral_test(ret, msg) try: # Set/update config value post = __salt__['git.config_set']( cwd=repo, key=name, value=value, multivar=multivar, user=user, password=password, output_encoding=output_encoding, **{'global': global_} ) except CommandExecutionError as exc: return _fail( ret, 'Failed to set {0}\'{1}\' to {2}: {3}'.format( 'global key ' if global_ else '', name, value_comment, _strip_exc(exc) ) ) if pre != post: ret['changes'][name] = {'old': pre, 'new': post} if post != desired: return _fail( ret, 'Failed to set {0}\'{1}\' to {2}'.format( 'global key ' if global_ else '', name, value_comment ) ) ret['comment'] = '{0}\'{1}\' was {2} {3}'.format( 'Global key ' if global_ else '', name, 'added as' if pre is None else 'set to', value_comment ) return ret def mod_run_check(cmd_kwargs, onlyif, unless): ''' Execute the onlyif and unless logic. Return a result dict if: * onlyif failed (onlyif != 0) * unless succeeded (unless == 0) Otherwise, returns ``True`` ''' cmd_kwargs = copy.deepcopy(cmd_kwargs) cmd_kwargs.update({ 'use_vt': False, 'bg': False, 'ignore_retcode': True, 'python_shell': True, }) if onlyif is not None: if not isinstance(onlyif, list): onlyif = [onlyif] for command in onlyif: if not isinstance(command, six.string_types) and command: # Boolean or some other non-string which resolves to True continue try: if __salt__['cmd.retcode'](command, **cmd_kwargs) == 0: # Command exited with a zero retcode continue except Exception as exc: log.exception( 'The following onlyif command raised an error: %s', command ) return { 'comment': 'onlyif raised error ({0}), see log for ' 'more details'.format(exc), 'result': False } return {'comment': 'onlyif condition is false', 'skip_watch': True, 'result': True} if unless is not None: if not isinstance(unless, list): unless = [unless] for command in unless: if not isinstance(command, six.string_types) and not command: # Boolean or some other non-string which resolves to False break try: if __salt__['cmd.retcode'](command, **cmd_kwargs) != 0: # Command exited with a non-zero retcode break except Exception as exc: log.exception( 'The following unless command raised an error: %s', command ) return { 'comment': 'unless raised error ({0}), see log for ' 'more details'.format(exc), 'result': False } else: return {'comment': 'unless condition is true', 'skip_watch': True, 'result': True} return True
saltstack/salt
salt/states/git.py
cloned
python
def cloned(name, target=None, branch=None, user=None, password=None, identity=None, https_user=None, https_pass=None, output_encoding=None): ''' .. versionadded:: 2018.3.3,2019.2.0 Ensure that a repository has been cloned to the specified target directory. If not, clone that repository. No fetches will be performed once cloned. name Address of the remote repository target Name of the target directory where repository should be cloned branch Remote branch to check out. If unspecified, the default branch (i.e. the one to the remote HEAD points) will be checked out. .. note:: The local branch name will match the remote branch name. If the branch name is changed, then that branch will be checked out locally, but keep in mind that remote repository will not be fetched. If your use case requires that you keep the clone up to date with the remote repository, then consider using :py:func:`git.latest <salt.states.git.latest>`. user User under which to run git commands. By default, commands are run by the user under which the minion is running. password Windows only. Required when specifying ``user``. This parameter will be ignored on non-Windows platforms. identity Path to a private key to use for ssh URLs. Works the same way as in :py:func:`git.latest <salt.states.git.latest>`, see that state's documentation for more information. https_user HTTP Basic Auth username for HTTPS (only) clones https_pass HTTP Basic Auth password for HTTPS (only) clones output_encoding Use this option to specify which encoding to use to decode the output from any git commands which are run. This should not be needed in most cases. .. note:: This should only be needed if the files in the repository were created with filenames using an encoding other than UTF-8 to handle Unicode characters. ''' ret = {'name': name, 'result': False, 'comment': '', 'changes': {}} if target is None: ret['comment'] = '\'target\' argument is required' return ret elif not isinstance(target, six.string_types): target = six.text_type(target) if not os.path.isabs(target): ret['comment'] = '\'target\' path must be absolute' return ret if branch is not None: if not isinstance(branch, six.string_types): branch = six.text_type(branch) if not branch: ret['comment'] = 'Invalid \'branch\' argument' return ret if not os.path.exists(target): need_clone = True else: try: __salt__['git.status'](target, user=user, password=password, output_encoding=output_encoding) except Exception as exc: ret['comment'] = six.text_type(exc) return ret else: need_clone = False comments = [] def _clone_changes(ret): ret['changes']['new'] = name + ' => ' + target def _branch_changes(ret, old, new): ret['changes']['branch'] = {'old': old, 'new': new} if need_clone: if __opts__['test']: _clone_changes(ret) comment = '{0} would be cloned to {1}{2}'.format( name, target, ' with branch \'{0}\''.format(branch) if branch is not None else '' ) return _neutral_test(ret, comment) clone_opts = ['--branch', branch] if branch is not None else None try: __salt__['git.clone'](target, name, opts=clone_opts, user=user, password=password, identity=identity, https_user=https_user, https_pass=https_pass, output_encoding=output_encoding) except CommandExecutionError as exc: msg = 'Clone failed: {0}'.format(_strip_exc(exc)) return _fail(ret, msg, comments) comments.append( '{0} cloned to {1}{2}'.format( name, target, ' with branch \'{0}\''.format(branch) if branch is not None else '' ) ) _clone_changes(ret) ret['comment'] = _format_comments(comments) ret['result'] = True return ret else: if branch is None: return _already_cloned(ret, target, branch, comments) else: current_branch = __salt__['git.current_branch']( target, user=user, password=password, output_encoding=output_encoding) if current_branch == branch: return _already_cloned(ret, target, branch, comments) else: if __opts__['test']: _branch_changes(ret, current_branch, branch) return _neutral_test( ret, 'Branch would be changed to \'{0}\''.format(branch)) try: __salt__['git.rev_parse']( target, rev=branch, user=user, password=password, ignore_retcode=True, output_encoding=output_encoding) except CommandExecutionError: # Local head does not exist, so we need to check out a new # branch at the remote rev checkout_rev = '/'.join(('origin', branch)) checkout_opts = ['-b', branch] else: # Local head exists, so we just need to check it out checkout_rev = branch checkout_opts = None try: __salt__['git.checkout']( target, rev=checkout_rev, opts=checkout_opts, user=user, password=password, output_encoding=output_encoding) except CommandExecutionError as exc: msg = 'Failed to change branch to \'{0}\': {1}'.format(branch, exc) return _fail(ret, msg, comments) else: comments.append('Branch changed to \'{0}\''.format(branch)) _branch_changes(ret, current_branch, branch) ret['comment'] = _format_comments(comments) ret['result'] = True return ret
.. versionadded:: 2018.3.3,2019.2.0 Ensure that a repository has been cloned to the specified target directory. If not, clone that repository. No fetches will be performed once cloned. name Address of the remote repository target Name of the target directory where repository should be cloned branch Remote branch to check out. If unspecified, the default branch (i.e. the one to the remote HEAD points) will be checked out. .. note:: The local branch name will match the remote branch name. If the branch name is changed, then that branch will be checked out locally, but keep in mind that remote repository will not be fetched. If your use case requires that you keep the clone up to date with the remote repository, then consider using :py:func:`git.latest <salt.states.git.latest>`. user User under which to run git commands. By default, commands are run by the user under which the minion is running. password Windows only. Required when specifying ``user``. This parameter will be ignored on non-Windows platforms. identity Path to a private key to use for ssh URLs. Works the same way as in :py:func:`git.latest <salt.states.git.latest>`, see that state's documentation for more information. https_user HTTP Basic Auth username for HTTPS (only) clones https_pass HTTP Basic Auth password for HTTPS (only) clones output_encoding Use this option to specify which encoding to use to decode the output from any git commands which are run. This should not be needed in most cases. .. note:: This should only be needed if the files in the repository were created with filenames using an encoding other than UTF-8 to handle Unicode characters.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/git.py#L2764-L2957
null
# -*- coding: utf-8 -*- ''' States to manage git repositories and git configuration .. important:: Before using git over ssh, make sure your remote host fingerprint exists in your ``~/.ssh/known_hosts`` file. .. versionchanged:: 2015.8.8 This state module now requires git 1.6.5 (released 10 October 2009) or newer. ''' from __future__ import absolute_import, print_function, unicode_literals # Import python libs import copy import errno import logging import os import re import string # Import salt libs import salt.utils.args import salt.utils.files import salt.utils.url import salt.utils.versions from salt.exceptions import CommandExecutionError from salt.utils.versions import LooseVersion as _LooseVersion # Import 3rd-party libs from salt.ext import six log = logging.getLogger(__name__) def __virtual__(): ''' Only load if git is available ''' if 'git.version' not in __salt__: return False git_ver = _LooseVersion(__salt__['git.version'](versioninfo=False)) return git_ver >= _LooseVersion('1.6.5') def _revs_equal(rev1, rev2, rev_type): ''' Shorthand helper function for comparing SHA1s. If rev_type == 'sha1' then the comparison will be done using str.startwith() to allow short SHA1s to compare successfully. NOTE: This means that rev2 must be the short rev. ''' if (rev1 is None and rev2 is not None) \ or (rev2 is None and rev1 is not None): return False elif rev1 is rev2 is None: return True elif rev_type == 'sha1': return rev1.startswith(rev2) else: return rev1 == rev2 def _short_sha(sha1): return sha1[:7] if sha1 is not None else None def _format_comments(comments): ''' Return a joined list ''' ret = '. '.join(comments) if len(comments) > 1: ret += '.' return ret def _need_branch_change(branch, local_branch): ''' Short hand for telling when a new branch is needed ''' return branch is not None and branch != local_branch def _get_branch_opts(branch, local_branch, all_local_branches, desired_upstream, git_ver=None): ''' DRY helper to build list of opts for git.branch, for the purposes of setting upstream tracking branch ''' if branch is not None and branch not in all_local_branches: # We won't be setting upstream because the act of checking out a new # branch will set upstream for us return None if git_ver is None: git_ver = _LooseVersion(__salt__['git.version'](versioninfo=False)) ret = [] if git_ver >= _LooseVersion('1.8.0'): ret.extend(['--set-upstream-to', desired_upstream]) else: ret.append('--set-upstream') # --set-upstream does not assume the current branch, so we have to # tell it which branch we'll be using ret.append(local_branch if branch is None else branch) ret.append(desired_upstream) return ret def _get_local_rev_and_branch(target, user, password, output_encoding=None): ''' Return the local revision for before/after comparisons ''' log.info('Checking local revision for %s', target) try: local_rev = __salt__['git.revision']( target, user=user, password=password, ignore_retcode=True, output_encoding=output_encoding) except CommandExecutionError: log.info('No local revision for %s', target) local_rev = None log.info('Checking local branch for %s', target) try: local_branch = __salt__['git.current_branch']( target, user=user, password=password, ignore_retcode=True, output_encoding=output_encoding) except CommandExecutionError: log.info('No local branch for %s', target) local_branch = None return local_rev, local_branch def _strip_exc(exc): ''' Strip the actual command that was run from exc.strerror to leave just the error message ''' return re.sub(r'^Command [\'"].+[\'"] failed: ', '', exc.strerror) def _uptodate(ret, target, comments=None, local_changes=False): ret['comment'] = 'Repository {0} is up-to-date'.format(target) if local_changes: ret['comment'] += ( ', but with uncommitted changes. Set \'force_reset\' to True to ' 'purge uncommitted changes.' ) if comments: # Shouldn't be making any changes if the repo was up to date, but # report on them so we are alerted to potential problems with our # logic. ret['comment'] += ( '\n\nChanges {0}made: {1}'.format( 'that would be ' if __opts__['test'] else '', _format_comments(comments) ) ) return ret def _neutral_test(ret, comment): ret['result'] = None ret['comment'] = comment return ret def _fail(ret, msg, comments=None): ret['result'] = False if comments: msg += '\n\nChanges already made: ' + _format_comments(comments) ret['comment'] = msg return ret def _already_cloned(ret, target, branch=None, comments=None): ret['result'] = True ret['comment'] = 'Repository already exists at {0}{1}'.format( target, ' and is checked out to branch \'{0}\''.format(branch) if branch else '' ) if comments: ret['comment'] += ( '\n\nChanges {0}made: {1}'.format( 'that would be ' if __opts__['test'] else '', _format_comments(comments) ) ) return ret def _failed_fetch(ret, exc, comments=None): msg = ( 'Fetch failed. Set \'force_fetch\' to True to force the fetch if the ' 'failure was due to not being able to fast-forward. Output of the fetch ' 'command follows:\n\n{0}'.format(_strip_exc(exc)) ) return _fail(ret, msg, comments) def _failed_submodule_update(ret, exc, comments=None): msg = 'Failed to update submodules: ' + _strip_exc(exc) return _fail(ret, msg, comments) def _not_fast_forward(ret, rev, pre, post, branch, local_branch, default_branch, local_changes, comments): branch_msg = '' if branch is None: if rev != 'HEAD': if local_branch != rev: branch_msg = ( ' The desired rev ({0}) differs from the name of the ' 'local branch ({1}), if the desired rev is a branch name ' 'then a forced update could possibly be avoided by ' 'setting the \'branch\' argument to \'{0}\' instead.' .format(rev, local_branch) ) else: if default_branch is not None and local_branch != default_branch: branch_msg = ( ' The default remote branch ({0}) differs from the ' 'local branch ({1}). This could be caused by changing the ' 'default remote branch, or if the local branch was ' 'manually changed. Rather than forcing an update, it ' 'may be advisable to set the \'branch\' argument to ' '\'{0}\' instead. To ensure that this state follows the ' '\'{0}\' branch instead of the remote HEAD, set the ' '\'rev\' argument to \'{0}\'.' .format(default_branch, local_branch) ) pre = _short_sha(pre) post = _short_sha(post) return _fail( ret, 'Repository would be updated {0}{1}, but {2}. Set \'force_reset\' to ' 'True{3} to force this update{4}.{5}'.format( 'from {0} to {1}'.format(pre, post) if local_changes and pre != post else 'to {0}'.format(post), ' (after checking out local branch \'{0}\')'.format(branch) if _need_branch_change(branch, local_branch) else '', 'this is not a fast-forward merge' if not local_changes else 'there are uncommitted changes', ' (or \'remote-changes\')' if local_changes else '', ' and discard these changes' if local_changes else '', branch_msg, ), comments ) def latest(name, rev='HEAD', target=None, branch=None, user=None, password=None, update_head=True, force_checkout=False, force_clone=False, force_fetch=False, force_reset=False, submodules=False, bare=False, mirror=False, remote='origin', fetch_tags=True, sync_tags=True, depth=None, identity=None, https_user=None, https_pass=None, onlyif=None, unless=None, refspec_branch='*', refspec_tag='*', output_encoding=None, **kwargs): ''' Make sure the repository is cloned to the given directory and is up-to-date. name Address of the remote repository, as passed to ``git clone`` .. note:: From the `Git documentation`_, there are two URL formats supported for SSH authentication. The below two examples are equivalent: .. code-block:: text # ssh:// URL ssh://user@server/project.git # SCP-like syntax user@server:project.git A common mistake is to use an ``ssh://`` URL, but with a colon after the domain instead of a slash. This is invalid syntax in Git, and will therefore not work in Salt. When in doubt, confirm that a ``git clone`` works for the URL before using it in Salt. It has been reported by some users that SCP-like syntax is incompatible with git repos hosted on `Atlassian Stash/BitBucket Server`_. In these cases, it may be necessary to use ``ssh://`` URLs for SSH authentication. .. _`Git documentation`: https://git-scm.com/book/en/v2/Git-on-the-Server-The-Protocols#The-SSH-Protocol .. _`Atlassian Stash/BitBucket Server`: https://www.atlassian.com/software/bitbucket/server rev : HEAD The remote branch, tag, or revision ID to checkout after clone / before update. If specified, then Salt will also ensure that the tracking branch is set to ``<remote>/<rev>``, unless ``rev`` refers to a tag or SHA1, in which case Salt will ensure that the tracking branch is unset. If ``rev`` is not specified, it will be assumed to be ``HEAD``, and Salt will not manage the tracking branch at all. .. versionchanged:: 2015.8.0 If not specified, ``rev`` now defaults to the remote repository's HEAD. target Name of the target directory where repository is about to be cloned branch Name of the local branch into which to checkout the specified rev. If not specified, then Salt will not care what branch is being used locally and will just use whatever branch is currently there. .. versionadded:: 2015.8.0 .. note:: If this argument is not specified, this means that Salt will not change the local branch if the repository is reset to another branch/tag/SHA1. For example, assume that the following state was run initially: .. code-block:: yaml foo_app: git.latest: - name: https://mydomain.tld/apps/foo.git - target: /var/www/foo - user: www This would have cloned the HEAD of that repo (since a ``rev`` wasn't specified), and because ``branch`` is not specified, the branch in the local clone at ``/var/www/foo`` would be whatever the default branch is on the remote repository (usually ``master``, but not always). Now, assume that it becomes necessary to switch this checkout to the ``dev`` branch. This would require ``rev`` to be set, and probably would also require ``force_reset`` to be enabled: .. code-block:: yaml foo_app: git.latest: - name: https://mydomain.tld/apps/foo.git - target: /var/www/foo - user: www - rev: dev - force_reset: True The result of this state would be to perform a hard-reset to ``origin/dev``. Since ``branch`` was not specified though, while ``/var/www/foo`` would reflect the contents of the remote repo's ``dev`` branch, the local branch would still remain whatever it was when it was cloned. To make the local branch match the remote one, set ``branch`` as well, like so: .. code-block:: yaml foo_app: git.latest: - name: https://mydomain.tld/apps/foo.git - target: /var/www/foo - user: www - rev: dev - branch: dev - force_reset: True This may seem redundant, but Salt tries to support a wide variety of use cases, and doing it this way allows for the use case where the local branch doesn't need to be strictly managed. user Local system user under which to run git commands. By default, commands are run by the user under which the minion is running. .. note:: This is not to be confused with the username for http(s)/SSH authentication. .. versionadded:: 0.17.0 password Windows only. Required when specifying ``user``. This parameter will be ignored on non-Windows platforms. .. versionadded:: 2016.3.4 update_head : True If set to ``False``, then the remote repository will be fetched (if necessary) to ensure that the commit to which ``rev`` points exists in the local checkout, but no changes will be made to the local HEAD. .. versionadded:: 2015.8.3 force_checkout : False When checking out the local branch, the state will fail if there are unwritten changes. Set this argument to ``True`` to discard unwritten changes when checking out. force_clone : False If the ``target`` directory exists and is not a git repository, then this state will fail. Set this argument to ``True`` to remove the contents of the target directory and clone the repo into it. force_fetch : False If a fetch needs to be performed, non-fast-forward fetches will cause this state to fail. Set this argument to ``True`` to force the fetch even if it is a non-fast-forward update. .. versionadded:: 2015.8.0 force_reset : False If the update is not a fast-forward, this state will fail. Set this argument to ``True`` to force a hard-reset to the remote revision in these cases. .. versionchanged:: 2019.2.0 This option can now be set to ``remote-changes``, which will instruct Salt not to discard local changes if the repo is up-to-date with the remote repository. submodules : False Update submodules on clone or branch change bare : False Set to ``True`` if the repository is to be a bare clone of the remote repository. .. note: Setting this option to ``True`` is incompatible with the ``rev`` argument. mirror Set to ``True`` if the repository is to be a mirror of the remote repository. This implies that ``bare`` set to ``True``, and thus is incompatible with ``rev``. remote : origin Git remote to use. If this state needs to clone the repo, it will clone it using this value as the initial remote name. If the repository already exists, and a remote by this name is not present, one will be added. fetch_tags : True If ``True``, then when a fetch is performed all tags will be fetched, even those which are not reachable by any branch on the remote. sync_tags : True If ``True``, then Salt will delete tags which exist in the local clone but are not found on the remote repository. .. versionadded:: 2018.3.4 depth Defines depth in history when git a clone is needed in order to ensure latest. E.g. ``depth: 1`` is useful when deploying from a repository with a long history. Use rev to specify branch or tag. This is not compatible with revision IDs. .. versionchanged:: 2019.2.0 This option now supports tags as well as branches, on Git 1.8.0 and newer. identity Path to a private key to use for ssh URLs. This can be either a single string, or a list of strings. For example: .. code-block:: yaml # Single key git@github.com:user/repo.git: git.latest: - user: deployer - identity: /home/deployer/.ssh/id_rsa # Two keys git@github.com:user/repo.git: git.latest: - user: deployer - identity: - /home/deployer/.ssh/id_rsa - /home/deployer/.ssh/id_rsa_alternate If multiple keys are specified, they will be tried one-by-one in order for each git command which needs to authenticate. .. warning:: Unless Salt is invoked from the minion using ``salt-call``, the key(s) must be passphraseless. For greater security with passphraseless private keys, see the `sshd(8)`_ manpage for information on securing the keypair from the remote side in the ``authorized_keys`` file. .. _`sshd(8)`: http://www.man7.org/linux/man-pages/man8/sshd.8.html#AUTHORIZED_KEYS_FILE%20FORMAT .. versionchanged:: 2015.8.7 Salt will no longer attempt to use passphrase-protected keys unless invoked from the minion using ``salt-call``, to prevent blocking waiting for user input. .. versionchanged:: 2016.3.0 Key can now be specified as a SaltStack fileserver URL (e.g. ``salt://path/to/identity_file``). https_user HTTP Basic Auth username for HTTPS (only) clones .. versionadded:: 2015.5.0 https_pass HTTP Basic Auth password for HTTPS (only) clones .. versionadded:: 2015.5.0 onlyif A command to run as a check, run the named command only if the command passed to the ``onlyif`` option returns true unless A command to run as a check, only run the named command if the command passed to the ``unless`` option returns false refspec_branch : * A glob expression defining which branches to retrieve when fetching. See `git-fetch(1)`_ for more information on how refspecs work. .. versionadded:: 2017.7.0 refspec_tag : * A glob expression defining which tags to retrieve when fetching. See `git-fetch(1)`_ for more information on how refspecs work. .. versionadded:: 2017.7.0 output_encoding Use this option to specify which encoding to use to decode the output from any git commands which are run. This should not be needed in most cases. .. note:: This should only be needed if the files in the repository were created with filenames using an encoding other than UTF-8 to handle Unicode characters. .. versionadded:: 2018.3.1 .. _`git-fetch(1)`: http://git-scm.com/docs/git-fetch .. note:: Clashing ID declarations can be avoided when including different branches from the same git repository in the same SLS file by using the ``name`` argument. The example below checks out the ``gh-pages`` and ``gh-pages-prod`` branches from the same repository into separate directories. The example also sets up the ``ssh_known_hosts`` ssh key required to perform the git checkout. Also, it has been reported that the SCP-like syntax for .. code-block:: yaml gitlab.example.com: ssh_known_hosts: - present - user: root - enc: ecdsa - fingerprint: 4e:94:b0:54:c1:5b:29:a2:70:0e:e1:a3:51:ee:ee:e3 git-website-staging: git.latest: - name: git@gitlab.example.com:user/website.git - rev: gh-pages - target: /usr/share/nginx/staging - identity: /root/.ssh/website_id_rsa - require: - pkg: git - ssh_known_hosts: gitlab.example.com git-website-staging: git.latest: - name: git@gitlab.example.com:user/website.git - rev: gh-pages - target: /usr/share/nginx/staging - identity: salt://website/id_rsa - require: - pkg: git - ssh_known_hosts: gitlab.example.com git-website-prod: git.latest: - name: git@gitlab.example.com:user/website.git - rev: gh-pages-prod - target: /usr/share/nginx/prod - identity: /root/.ssh/website_id_rsa - require: - pkg: git - ssh_known_hosts: gitlab.example.com ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} kwargs = salt.utils.args.clean_kwargs(**kwargs) if kwargs: return _fail( ret, salt.utils.args.invalid_kwargs(kwargs, raise_exc=False) ) if not remote: return _fail(ret, '\'remote\' argument is required') if not target: return _fail(ret, '\'target\' argument is required') if not rev: return _fail( ret, '\'{0}\' is not a valid value for the \'rev\' argument'.format(rev) ) if force_reset not in (True, False, 'remote-changes'): return _fail( ret, '\'force_reset\' must be one of True, False, or \'remote-changes\'' ) # Ensure that certain arguments are strings to ensure that comparisons work if not isinstance(rev, six.string_types): rev = six.text_type(rev) if target is not None: if not isinstance(target, six.string_types): target = six.text_type(target) if not os.path.isabs(target): return _fail( ret, 'target \'{0}\' is not an absolute path'.format(target) ) if branch is not None and not isinstance(branch, six.string_types): branch = six.text_type(branch) if user is not None and not isinstance(user, six.string_types): user = six.text_type(user) if password is not None and not isinstance(password, six.string_types): password = six.text_type(password) if remote is not None and not isinstance(remote, six.string_types): remote = six.text_type(remote) if identity is not None: if isinstance(identity, six.string_types): identity = [identity] elif not isinstance(identity, list): return _fail(ret, 'identity must be either a list or a string') identity = [os.path.expanduser(x) for x in identity] for ident_path in identity: if 'salt://' in ident_path: try: ident_path = __salt__['cp.cache_file'](ident_path, __env__) except IOError as exc: log.exception('Failed to cache %s', ident_path) return _fail( ret, 'identity \'{0}\' does not exist.'.format( ident_path ) ) if not os.path.isabs(ident_path): return _fail( ret, 'identity \'{0}\' is not an absolute path'.format( ident_path ) ) if https_user is not None and not isinstance(https_user, six.string_types): https_user = six.text_type(https_user) if https_pass is not None and not isinstance(https_pass, six.string_types): https_pass = six.text_type(https_pass) # Check for lfs filter settings, and setup lfs_opts accordingly. These opts # will be passed where appropriate to ensure that these commands are # authenticated and that the git LFS plugin can download files. use_lfs = bool( __salt__['git.config_get_regexp']( r'filter\.lfs\.', **{'global': True})) lfs_opts = {'identity': identity} if use_lfs else {} if os.path.isfile(target): return _fail( ret, 'Target \'{0}\' exists and is a regular file, cannot proceed' .format(target) ) try: desired_fetch_url = salt.utils.url.add_http_basic_auth( name, https_user, https_pass, https_only=True ) except ValueError as exc: return _fail(ret, exc.__str__()) redacted_fetch_url = \ salt.utils.url.redact_http_basic_auth(desired_fetch_url) if mirror: bare = True # Check to make sure rev and mirror/bare are not both in use if rev != 'HEAD' and bare: return _fail(ret, ('\'rev\' is not compatible with the \'mirror\' and ' '\'bare\' arguments')) run_check_cmd_kwargs = {'runas': user, 'password': password} if 'shell' in __grains__: run_check_cmd_kwargs['shell'] = __grains__['shell'] # check if git.latest should be applied cret = mod_run_check( run_check_cmd_kwargs, onlyif, unless ) if isinstance(cret, dict): ret.update(cret) return ret refspecs = [ 'refs/heads/{0}:refs/remotes/{1}/{0}'.format(refspec_branch, remote), '+refs/tags/{0}:refs/tags/{0}'.format(refspec_tag) ] if fetch_tags else [] log.info('Checking remote revision for %s', name) try: all_remote_refs = __salt__['git.remote_refs']( name, heads=False, tags=False, user=user, password=password, identity=identity, https_user=https_user, https_pass=https_pass, ignore_retcode=False, saltenv=__env__, output_encoding=output_encoding) except CommandExecutionError as exc: return _fail( ret, 'Failed to check remote refs: {0}'.format(_strip_exc(exc)) ) except NameError as exc: if 'global name' in exc.message: raise CommandExecutionError( 'Failed to check remote refs: You may need to install ' 'GitPython or PyGit2') raise if 'HEAD' in all_remote_refs: head_rev = all_remote_refs['HEAD'] for refname, refsha in six.iteritems(all_remote_refs): if refname.startswith('refs/heads/'): if refsha == head_rev: default_branch = refname.partition('refs/heads/')[-1] break else: default_branch = None else: head_rev = None default_branch = None desired_upstream = False if bare: remote_rev = None remote_rev_type = None else: if rev == 'HEAD': if head_rev is not None: remote_rev = head_rev # Just go with whatever the upstream currently is desired_upstream = None remote_rev_type = 'sha1' else: # Empty remote repo remote_rev = None remote_rev_type = None elif 'refs/heads/' + rev in all_remote_refs: remote_rev = all_remote_refs['refs/heads/' + rev] desired_upstream = '/'.join((remote, rev)) remote_rev_type = 'branch' elif 'refs/tags/' + rev + '^{}' in all_remote_refs: # Annotated tag remote_rev = all_remote_refs['refs/tags/' + rev + '^{}'] remote_rev_type = 'tag' elif 'refs/tags/' + rev in all_remote_refs: # Non-annotated tag remote_rev = all_remote_refs['refs/tags/' + rev] remote_rev_type = 'tag' else: if len(rev) <= 40 \ and all(x in string.hexdigits for x in rev): # git ls-remote did not find the rev, and because it's a # hex string <= 40 chars we're going to assume that the # desired rev is a SHA1 rev = rev.lower() remote_rev = rev remote_rev_type = 'sha1' else: remote_rev = None remote_rev_type = None # For the comment field of the state return dict, the remote location # (and short-sha1, if rev is not a sha1) is referenced several times, # determine it once here and reuse the value below. if remote_rev_type == 'sha1': if rev == 'HEAD': remote_loc = 'remote HEAD (' + remote_rev[:7] + ')' else: remote_loc = remote_rev[:7] elif remote_rev is not None: remote_loc = '{0} ({1})'.format( desired_upstream if remote_rev_type == 'branch' else rev, remote_rev[:7] ) else: # Shouldn't happen but log a warning here for future # troubleshooting purposes in the event we find a corner case. log.warning( 'Unable to determine remote_loc. rev is %s, remote_rev is ' '%s, remove_rev_type is %s, desired_upstream is %s, and bare ' 'is%s set', rev, remote_rev, remote_rev_type, desired_upstream, ' not' if not bare else '' ) remote_loc = None if depth is not None and remote_rev_type not in ('branch', 'tag'): return _fail( ret, 'When \'depth\' is used, \'rev\' must be set to the name of a ' 'branch or tag on the remote repository' ) if remote_rev is None and not bare: if rev != 'HEAD': # A specific rev is desired, but that rev doesn't exist on the # remote repo. return _fail( ret, 'No revision matching \'{0}\' exists in the remote ' 'repository'.format(rev) ) git_ver = _LooseVersion(__salt__['git.version'](versioninfo=False)) check = 'refs' if bare else '.git' gitdir = os.path.join(target, check) comments = [] if os.path.isdir(gitdir) \ or __salt__['git.is_worktree']( target, user=user, password=password, output_encoding=output_encoding): # Target directory is a git repository or git worktree try: all_local_branches = __salt__['git.list_branches']( target, user=user, password=password, output_encoding=output_encoding) all_local_tags = set( __salt__['git.list_tags']( target, user=user, password=password, output_encoding=output_encoding ) ) local_rev, local_branch = _get_local_rev_and_branch( target, user, password, output_encoding) if not bare and remote_rev is None and local_rev is not None: return _fail( ret, 'Remote repository is empty, cannot update from a ' 'non-empty to an empty repository' ) # Base rev and branch are the ones from which any reset or merge # will take place. If the branch is not being specified, the base # will be the "local" rev and branch, i.e. those we began with # before this state was run. If a branch is being specified and it # both exists and is not the one with which we started, then we'll # be checking that branch out first, and it instead becomes our # base. The base branch and rev will be used below in comparisons # to determine what changes to make. base_rev = local_rev base_branch = local_branch if _need_branch_change(branch, local_branch): if branch not in all_local_branches: # We're checking out a new branch, so the base_rev and # remote_rev will be identical. base_rev = remote_rev else: base_branch = branch # Desired branch exists locally and is not the current # branch. We'll be performing a checkout to that branch # eventually, but before we do that we need to find the # current SHA1. try: base_rev = __salt__['git.rev_parse']( target, branch + '^{commit}', user=user, password=password, ignore_retcode=True, output_encoding=output_encoding) except CommandExecutionError as exc: return _fail( ret, 'Unable to get position of local branch \'{0}\': ' '{1}'.format(branch, _strip_exc(exc)), comments ) remotes = __salt__['git.remotes'](target, user=user, password=password, redact_auth=False, output_encoding=output_encoding) revs_match = _revs_equal(local_rev, remote_rev, remote_rev_type) try: # If not a bare repo, check `git diff HEAD` to determine if # there are local changes. local_changes = bool( not bare and __salt__['git.diff'](target, 'HEAD', user=user, password=password, output_encoding=output_encoding) ) except CommandExecutionError: # No need to capture the error and log it, the _git_run() # helper in the git execution module will have already logged # the output from the command. log.warning( 'git.latest: Unable to determine if %s has local changes', target ) local_changes = False if local_changes and revs_match: if force_reset is True: msg = ( '{0} is up-to-date, but with uncommitted changes. ' 'Since \'force_reset\' is set to True, these local ' 'changes would be reset. To only reset when there are ' 'changes in the remote repository, set ' '\'force_reset\' to \'remote-changes\'.'.format(target) ) if __opts__['test']: ret['changes']['forced update'] = True if comments: msg += _format_comments(comments) return _neutral_test(ret, msg) log.debug(msg.replace('would', 'will')) else: log.debug( '%s up-to-date, but with uncommitted changes. Since ' '\'force_reset\' is set to %s, no changes will be ' 'made.', target, force_reset ) return _uptodate(ret, target, _format_comments(comments), local_changes) if remote_rev_type == 'sha1' \ and base_rev is not None \ and base_rev.startswith(remote_rev): # Either we're already checked out to the branch we need and it # is up-to-date, or the branch to which we need to switch is # on the same SHA1 as the desired remote revision. Either way, # we know we have the remote rev present already and no fetch # will be needed. has_remote_rev = True else: has_remote_rev = False if remote_rev is not None: try: __salt__['git.rev_parse']( target, remote_rev + '^{commit}', user=user, password=password, ignore_retcode=True, output_encoding=output_encoding) except CommandExecutionError: # Local checkout doesn't have the remote_rev pass else: # The object might exist enough to get a rev-parse to # work, while the local ref could have been # deleted/changed/force updated. Do some further sanity # checks to determine if we really do have the # remote_rev. if remote_rev_type == 'branch': if remote in remotes: try: # Do a rev-parse on <remote>/<rev> to get # the local SHA1 for it, so we can compare # it to the remote_rev SHA1. local_copy = __salt__['git.rev_parse']( target, desired_upstream, user=user, password=password, ignore_retcode=True, output_encoding=output_encoding) except CommandExecutionError: pass else: # If the SHA1s don't match, then the remote # branch was force-updated, and we need to # fetch to update our local copy the ref # for the remote branch. If they do match, # then we have the remote_rev and don't # need to fetch. if local_copy == remote_rev: has_remote_rev = True elif remote_rev_type == 'tag': if rev in all_local_tags: try: local_tag_sha1 = __salt__['git.rev_parse']( target, rev + '^{commit}', user=user, password=password, ignore_retcode=True, output_encoding=output_encoding) except CommandExecutionError: # Shouldn't happen if the tag exists # locally but account for this just in # case. local_tag_sha1 = None if local_tag_sha1 == remote_rev: has_remote_rev = True else: if not force_reset: # SHA1 of tag on remote repo is # different than local tag. Unless # we're doing a hard reset then we # don't need to proceed as we know that # the fetch will update the tag and the # only way to make the state succeed is # to reset the branch to point at the # tag's new location. return _fail( ret, '\'{0}\' is a tag, but the remote ' 'SHA1 for this tag ({1}) doesn\'t ' 'match the local SHA1 ({2}). Set ' '\'force_reset\' to True to force ' 'this update.'.format( rev, _short_sha(remote_rev), _short_sha(local_tag_sha1) ) ) elif remote_rev_type == 'sha1': has_remote_rev = True # If fast_forward is not boolean, then we don't yet know if this # will be a fast forward or not, because a fetch is required. fast_forward = False \ if (local_changes and force_reset != 'remote-changes') \ else None if has_remote_rev: if (not revs_match and not update_head) \ and (branch is None or branch == local_branch): ret['comment'] = ( '{0} is already present and local HEAD ({1}) does not ' 'match, but update_head=False. HEAD has not been ' 'updated locally.'.format( remote_loc.capitalize() if rev == 'HEAD' else remote_loc, local_rev[:7] ) ) return ret # No need to check if this is a fast_forward if we already know # that it won't be (due to local changes). if fast_forward is not False: if base_rev is None: # If we're here, the remote_rev exists in the local # checkout but there is still no HEAD locally. A # possible reason for this is that an empty repository # existed there and a remote was added and fetched, but # the repository was not fast-forwarded. Regardless, # going from no HEAD to a locally-present rev is # considered a fast-forward update. fast_forward = True else: fast_forward = __salt__['git.merge_base']( target, refs=[base_rev, remote_rev], is_ancestor=True, user=user, password=password, ignore_retcode=True, output_encoding=output_encoding) if fast_forward is False: if force_reset is False: return _not_fast_forward( ret, rev, base_rev, remote_rev, branch, local_branch, default_branch, local_changes, comments) merge_action = 'hard-reset' elif fast_forward is True: merge_action = 'fast-forwarded' else: merge_action = 'updated' if base_branch is None: # No local branch, no upstream tracking branch upstream = None else: try: upstream = __salt__['git.rev_parse']( target, base_branch + '@{upstream}', opts=['--abbrev-ref'], user=user, password=password, ignore_retcode=True, output_encoding=output_encoding) except CommandExecutionError: # There is a local branch but the rev-parse command # failed, so that means there is no upstream tracking # branch. This could be because it is just not set, or # because the branch was checked out to a SHA1 or tag # instead of a branch. Set upstream to False to make a # distinction between the case above where there is no # local_branch (when the local checkout is an empty # repository). upstream = False if remote in remotes: fetch_url = remotes[remote]['fetch'] else: log.debug( 'Remote \'%s\' not found in git checkout at %s', remote, target ) fetch_url = None if remote_rev is not None and desired_fetch_url != fetch_url: if __opts__['test']: actions = [ 'Remote \'{0}\' would be changed from {1} to {2}' .format( remote, salt.utils.url.redact_http_basic_auth(fetch_url), redacted_fetch_url ) ] if not has_remote_rev: actions.append('Remote would be fetched') if not revs_match: if update_head: ret['changes']['revision'] = { 'old': local_rev, 'new': remote_rev } if fast_forward is False: ret['changes']['forced update'] = True actions.append( 'Repository would be {0} to {1}'.format( merge_action, _short_sha(remote_rev) ) ) if ret['changes']: return _neutral_test(ret, _format_comments(actions)) else: if not revs_match and not update_head: # Repo content would not be modified but the remote # URL would be modified, so we can't just say that # the repo is up-to-date, we need to inform the # user of the actions taken. ret['comment'] = _format_comments(actions) return ret return _uptodate(ret, target, _format_comments(actions)) # The fetch_url for the desired remote does not match the # specified URL (or the remote does not exist), so set the # remote URL. __salt__['git.remote_set'](target, url=name, remote=remote, user=user, password=password, https_user=https_user, https_pass=https_pass, output_encoding=output_encoding) if fetch_url is None: comments.append( 'Remote \'{0}\' set to {1}'.format( remote, redacted_fetch_url ) ) ret['changes']['new'] = name + ' => ' + remote else: comments.append( 'Remote \'{0}\' changed from {1} to {2}'.format( remote, salt.utils.url.redact_http_basic_auth(fetch_url), redacted_fetch_url ) ) if remote_rev is not None: if __opts__['test']: actions = [] if not has_remote_rev: actions.append( 'Remote \'{0}\' would be fetched'.format(remote) ) if (not revs_match) \ and (update_head or (branch is not None and branch != local_branch)): ret['changes']['revision'] = { 'old': local_rev, 'new': remote_rev } if _need_branch_change(branch, local_branch): if branch not in all_local_branches: actions.append( 'New branch \'{0}\' would be checked ' 'out, with {1} as a starting ' 'point'.format(branch, remote_loc) ) if desired_upstream: actions.append( 'Tracking branch would be set to {0}' .format(desired_upstream) ) else: actions.append( 'Branch \'{0}\' would be checked out ' 'and {1} to {2}'.format( branch, merge_action, _short_sha(remote_rev) ) ) else: if not revs_match: if update_head: if fast_forward is True: actions.append( 'Repository would be fast-forwarded from ' '{0} to {1}'.format( _short_sha(local_rev), _short_sha(remote_rev) ) ) else: actions.append( 'Repository would be {0} from {1} to {2}' .format( 'hard-reset' if force_reset and has_remote_rev else 'updated', _short_sha(local_rev), _short_sha(remote_rev) ) ) else: actions.append( 'Local HEAD ({0}) does not match {1} but ' 'update_head=False, HEAD would not be ' 'updated locally'.format( local_rev[:7], remote_loc ) ) # Check if upstream needs changing if not upstream and desired_upstream: actions.append( 'Tracking branch would be set to {0}'.format( desired_upstream ) ) elif upstream and desired_upstream is False: actions.append( 'Tracking branch would be unset' ) elif desired_upstream and upstream != desired_upstream: actions.append( 'Tracking branch would be ' 'updated to {0}'.format(desired_upstream) ) if ret['changes']: return _neutral_test(ret, _format_comments(actions)) else: formatted_actions = _format_comments(actions) if not revs_match \ and not update_head \ and formatted_actions: ret['comment'] = formatted_actions return ret return _uptodate(ret, target, _format_comments(actions)) if not upstream and desired_upstream: upstream_action = ( 'Tracking branch was set to {0}'.format( desired_upstream ) ) branch_opts = _get_branch_opts( branch, local_branch, all_local_branches, desired_upstream, git_ver) elif upstream and desired_upstream is False: # If the remote_rev is a tag or SHA1, and there is an # upstream tracking branch, we will unset it. However, we # can only do this if the git version is 1.8.0 or newer, as # the --unset-upstream option was not added until that # version. if git_ver >= _LooseVersion('1.8.0'): upstream_action = 'Tracking branch was unset' branch_opts = ['--unset-upstream'] else: branch_opts = None elif desired_upstream and upstream != desired_upstream: upstream_action = ( 'Tracking branch was updated to {0}'.format( desired_upstream ) ) branch_opts = _get_branch_opts( branch, local_branch, all_local_branches, desired_upstream, git_ver) else: branch_opts = None if branch_opts is not None and local_branch is None: return _fail( ret, 'Cannot set/unset upstream tracking branch, local ' 'HEAD refers to nonexistent branch. This may have ' 'been caused by cloning a remote repository for which ' 'the default branch was renamed or deleted. If you ' 'are unable to fix the remote repository, you can ' 'work around this by setting the \'branch\' argument ' '(which will ensure that the named branch is created ' 'if it does not already exist).', comments ) remote_tags = set([ x.replace('refs/tags/', '') for x in __salt__['git.ls_remote']( cwd=target, remote=remote, opts="--tags", user=user, password=password, identity=identity, saltenv=__env__, ignore_retcode=True, output_encoding=output_encoding) if '^{}' not in x ]) if all_local_tags != remote_tags: has_remote_rev = False new_tags = remote_tags - all_local_tags deleted_tags = all_local_tags - remote_tags if new_tags: ret['changes']['new_tags'] = new_tags if sync_tags and deleted_tags: # Delete the local copy of the tags to keep up with the # remote repository. for tag_name in deleted_tags: try: if not __opts__['test']: __salt__['git.tag']( target, tag_name, opts='-d', user=user, password=password, output_encoding=output_encoding) except CommandExecutionError as exc: ret.setdefault('warnings', []).append( 'Failed to remove local tag \'{0}\':\n\n' '{1}\n\n'.format(tag_name, exc) ) else: ret['changes'].setdefault( 'deleted_tags', []).append(tag_name) if ret['changes'].get('deleted_tags'): comments.append( 'The following tags {0} removed from the local ' 'checkout: {1}'.format( 'would be' if __opts__['test'] else 'were', ', '.join(ret['changes']['deleted_tags']) ) ) if not has_remote_rev: try: fetch_changes = __salt__['git.fetch']( target, remote=remote, force=force_fetch, refspecs=refspecs, user=user, password=password, identity=identity, saltenv=__env__, output_encoding=output_encoding) except CommandExecutionError as exc: return _failed_fetch(ret, exc, comments) else: if fetch_changes: comments.append( '{0} was fetched, resulting in updated ' 'refs'.format(name) ) try: __salt__['git.rev_parse']( target, remote_rev + '^{commit}', user=user, password=password, ignore_retcode=True, output_encoding=output_encoding) except CommandExecutionError as exc: return _fail( ret, 'Fetch did not successfully retrieve rev \'{0}\' ' 'from {1}: {2}'.format(rev, name, exc) ) if (not revs_match and not update_head) \ and (branch is None or branch == local_branch): # Rev now exists locally (was fetched), and since we're # not updating HEAD we'll just exit here. ret['comment'] = remote_loc.capitalize() \ if rev == 'HEAD' \ else remote_loc ret['comment'] += ( ' is already present and local HEAD ({0}) does not ' 'match, but update_head=False. HEAD has not been ' 'updated locally.'.format(local_rev[:7]) ) return ret # Now that we've fetched, check again whether or not # the update is a fast-forward. if base_rev is None: fast_forward = True else: fast_forward = __salt__['git.merge_base']( target, refs=[base_rev, remote_rev], is_ancestor=True, user=user, password=password, output_encoding=output_encoding) if fast_forward is force_reset is False \ or (fast_forward is True and local_changes and force_reset is False): return _not_fast_forward( ret, rev, base_rev, remote_rev, branch, local_branch, default_branch, local_changes, comments) if _need_branch_change(branch, local_branch): if local_changes and not force_checkout: return _fail( ret, 'Local branch \'{0}\' has uncommitted ' 'changes. Set \'force_checkout\' to True to ' 'discard them and proceed.'.format(local_branch) ) # TODO: Maybe re-retrieve all_local_branches to handle # the corner case where the destination branch was # added to the local checkout during a fetch that takes # a long time to complete. if branch not in all_local_branches: if rev == 'HEAD': checkout_rev = remote_rev else: checkout_rev = desired_upstream \ if desired_upstream \ else rev checkout_opts = ['-b', branch] else: checkout_rev = branch checkout_opts = [] __salt__['git.checkout'](target, checkout_rev, force=force_checkout, opts=checkout_opts, user=user, password=password, output_encoding=output_encoding) if '-b' in checkout_opts: comments.append( 'New branch \'{0}\' was checked out, with {1} ' 'as a starting point'.format( branch, remote_loc ) ) else: comments.append( '\'{0}\' was checked out'.format(checkout_rev) ) if fast_forward is False: __salt__['git.reset']( target, opts=['--hard', remote_rev], user=user, password=password, output_encoding=output_encoding, **lfs_opts) ret['changes']['forced update'] = True if local_changes: comments.append('Uncommitted changes were discarded') comments.append( 'Repository was hard-reset to {0}'.format(remote_loc) ) elif fast_forward is True \ and local_changes \ and force_reset is not False: __salt__['git.discard_local_changes']( target, user=user, password=password, output_encoding=output_encoding) comments.append('Uncommitted changes were discarded') if branch_opts is not None: __salt__['git.branch']( target, opts=branch_opts, user=user, password=password, output_encoding=output_encoding) comments.append(upstream_action) # Fast-forward to the desired revision if fast_forward is True \ and not _revs_equal(base_rev, remote_rev, remote_rev_type): if desired_upstream or rev == 'HEAD': # Check first to see if we are on a branch before # trying to merge changes. (The call to # git.symbolic_ref will only return output if HEAD # points to a branch.) if __salt__['git.symbolic_ref']( target, 'HEAD', opts=['--quiet'], user=user, password=password, ignore_retcode=True, output_encoding=output_encoding): if git_ver >= _LooseVersion('1.8.1.6'): # --ff-only added in version 1.8.1.6. It's not # 100% necessary, but if we can use it, we'll # ensure that the merge doesn't go through if # not a fast-forward. Granted, the logic that # gets us to this point shouldn't allow us to # attempt this merge if it's not a # fast-forward, but it's an extra layer of # protection. merge_opts = ['--ff-only'] else: merge_opts = [] __salt__['git.merge']( target, rev=remote_rev, opts=merge_opts, user=user, password=password, output_encoding=output_encoding, **lfs_opts) comments.append( 'Repository was fast-forwarded to {0}' .format(remote_loc) ) else: return _fail( ret, 'Unable to fast-forward, HEAD is detached', comments ) else: # Update is a fast forward, but we cannot merge to that # commit so we'll reset to it. __salt__['git.reset']( target, opts=['--hard', remote_rev if rev == 'HEAD' else rev], user=user, password=password, output_encoding=output_encoding, **lfs_opts) comments.append( 'Repository was reset to {0} (fast-forward)' .format(rev) ) # TODO: Figure out how to add submodule update info to # test=True return data, and changes dict. if submodules: try: __salt__['git.submodule']( target, 'update', opts=['--init', '--recursive'], user=user, password=password, identity=identity, saltenv=__env__, output_encoding=output_encoding) except CommandExecutionError as exc: return _failed_submodule_update(ret, exc, comments) elif bare: if __opts__['test']: msg = ( 'Bare repository at {0} would be fetched' .format(target) ) if ret['changes']: return _neutral_test(ret, msg) else: return _uptodate(ret, target, msg) try: fetch_changes = __salt__['git.fetch']( target, remote=remote, force=force_fetch, refspecs=refspecs, user=user, password=password, identity=identity, saltenv=__env__, output_encoding=output_encoding) except CommandExecutionError as exc: return _failed_fetch(ret, exc, comments) else: comments.append( 'Bare repository at {0} was fetched{1}'.format( target, ', resulting in updated refs' if fetch_changes else '' ) ) try: new_rev = __salt__['git.revision']( cwd=target, user=user, password=password, ignore_retcode=True, output_encoding=output_encoding) except CommandExecutionError: new_rev = None except Exception as exc: log.error( 'Unexpected exception in git.latest state', exc_info=True ) if isinstance(exc, CommandExecutionError): msg = _strip_exc(exc) else: msg = six.text_type(exc) return _fail(ret, msg, comments) if not bare and not _revs_equal(new_rev, remote_rev, remote_rev_type): return _fail(ret, 'Failed to update repository', comments) if local_rev != new_rev: log.info( 'Repository %s updated: %s => %s', target, local_rev, new_rev ) ret['comment'] = _format_comments(comments) ret['changes']['revision'] = {'old': local_rev, 'new': new_rev} else: return _uptodate(ret, target, _format_comments(comments)) else: if os.path.isdir(target): target_contents = os.listdir(target) if force_clone: # Clone is required, and target directory exists, but the # ``force`` option is enabled, so we need to clear out its # contents to proceed. if __opts__['test']: ret['changes']['forced clone'] = True ret['changes']['new'] = name + ' => ' + target return _neutral_test( ret, 'Target directory {0} exists. Since force_clone=True, ' 'the contents of {0} would be deleted, and {1} would ' 'be cloned into this directory.'.format(target, name) ) log.debug( 'Removing contents of %s to clone repository %s in its ' 'place (force_clone=True set in git.latest state)', target, name ) removal_errors = {} for target_object in target_contents: target_path = os.path.join(target, target_object) try: salt.utils.files.rm_rf(target_path) except OSError as exc: if exc.errno != errno.ENOENT: removal_errors[target_path] = exc if removal_errors: err_strings = [ ' {0}\n {1}'.format(k, v) for k, v in six.iteritems(removal_errors) ] return _fail( ret, 'Unable to remove\n{0}'.format('\n'.join(err_strings)), comments ) ret['changes']['forced clone'] = True # Clone is required, but target dir exists and is non-empty. We # can't proceed. elif target_contents: return _fail( ret, 'Target \'{0}\' exists, is non-empty and is not a git ' 'repository. Set the \'force_clone\' option to True to ' 'remove this directory\'s contents and proceed with ' 'cloning the remote repository'.format(target) ) log.debug('Target %s is not found, \'git clone\' is required', target) if __opts__['test']: ret['changes']['new'] = name + ' => ' + target return _neutral_test( ret, 'Repository {0} would be cloned to {1}'.format( name, target ) ) try: clone_opts = ['--mirror'] if mirror else ['--bare'] if bare else [] if remote != 'origin': clone_opts.extend(['--origin', remote]) if depth is not None: clone_opts.extend(['--depth', six.text_type(depth), '--branch', rev]) # We're cloning a fresh repo, there is no local branch or revision local_branch = local_rev = None try: __salt__['git.clone'](target, name, user=user, password=password, opts=clone_opts, identity=identity, https_user=https_user, https_pass=https_pass, saltenv=__env__, output_encoding=output_encoding) except CommandExecutionError as exc: msg = 'Clone failed: {0}'.format(_strip_exc(exc)) return _fail(ret, msg, comments) ret['changes']['new'] = name + ' => ' + target comments.append( '{0} cloned to {1}{2}'.format( name, target, ' as mirror' if mirror else ' as bare repository' if bare else '' ) ) if not bare: if not remote_rev: if rev != 'HEAD': # No HEAD means the remote repo is empty, which means # our new clone will also be empty. This state has # failed, since a rev was specified but no matching rev # exists on the remote host. msg = ( '%s was cloned but is empty, so {0}/{1} ' 'cannot be checked out'.format(remote, rev) ) log.error(msg, name) # Disable check for string substitution return _fail(ret, msg % 'Repository', comments) # pylint: disable=E1321 else: if remote_rev_type == 'tag' \ and rev not in __salt__['git.list_tags']( target, user=user, password=password, output_encoding=output_encoding): return _fail( ret, 'Revision \'{0}\' does not exist in clone' .format(rev), comments ) if branch is not None: if branch not in \ __salt__['git.list_branches']( target, user=user, password=password, output_encoding=output_encoding): if rev == 'HEAD': checkout_rev = remote_rev else: checkout_rev = desired_upstream \ if desired_upstream \ else rev __salt__['git.checkout']( target, checkout_rev, opts=['-b', branch], user=user, password=password, output_encoding=output_encoding) comments.append( 'Branch \'{0}\' checked out, with {1} ' 'as a starting point'.format( branch, remote_loc ) ) local_rev, local_branch = _get_local_rev_and_branch( target, user, password, output_encoding=output_encoding) if local_branch is None \ and remote_rev is not None \ and 'HEAD' not in all_remote_refs: return _fail( ret, 'Remote HEAD refers to a ref that does not exist. ' 'This can happen when the default branch on the ' 'remote repository is renamed or deleted. If you ' 'are unable to fix the remote repository, you can ' 'work around this by setting the \'branch\' argument ' '(which will ensure that the named branch is created ' 'if it does not already exist).', comments ) if not _revs_equal(local_rev, remote_rev, remote_rev_type): __salt__['git.reset']( target, opts=['--hard', remote_rev], user=user, password=password, output_encoding=output_encoding) comments.append( 'Repository was reset to {0}'.format(remote_loc) ) try: upstream = __salt__['git.rev_parse']( target, local_branch + '@{upstream}', opts=['--abbrev-ref'], user=user, password=password, ignore_retcode=True, output_encoding=output_encoding) except CommandExecutionError: upstream = False if not upstream and desired_upstream: upstream_action = ( 'Tracking branch was set to {0}'.format( desired_upstream ) ) branch_opts = _get_branch_opts( branch, local_branch, __salt__['git.list_branches']( target, user=user, password=password, output_encoding=output_encoding), desired_upstream, git_ver) elif upstream and desired_upstream is False: # If the remote_rev is a tag or SHA1, and there is an # upstream tracking branch, we will unset it. However, # we can only do this if the git version is 1.8.0 or # newer, as the --unset-upstream option was not added # until that version. if git_ver >= _LooseVersion('1.8.0'): upstream_action = 'Tracking branch was unset' branch_opts = ['--unset-upstream'] else: branch_opts = None elif desired_upstream and upstream != desired_upstream: upstream_action = ( 'Tracking branch was updated to {0}'.format( desired_upstream ) ) branch_opts = _get_branch_opts( branch, local_branch, __salt__['git.list_branches']( target, user=user, password=password, output_encoding=output_encoding), desired_upstream, git_ver) else: branch_opts = None if branch_opts is not None: __salt__['git.branch']( target, opts=branch_opts, user=user, password=password, output_encoding=output_encoding) comments.append(upstream_action) if submodules and remote_rev: try: __salt__['git.submodule']( target, 'update', opts=['--init', '--recursive'], user=user, password=password, identity=identity, output_encoding=output_encoding) except CommandExecutionError as exc: return _failed_submodule_update(ret, exc, comments) try: new_rev = __salt__['git.revision']( cwd=target, user=user, password=password, ignore_retcode=True, output_encoding=output_encoding) except CommandExecutionError: new_rev = None except Exception as exc: log.error( 'Unexpected exception in git.latest state', exc_info=True ) if isinstance(exc, CommandExecutionError): msg = _strip_exc(exc) else: msg = six.text_type(exc) return _fail(ret, msg, comments) msg = _format_comments(comments) log.info(msg) ret['comment'] = msg if new_rev is not None: ret['changes']['revision'] = {'old': None, 'new': new_rev} return ret def present(name, force=False, bare=True, template=None, separate_git_dir=None, shared=None, user=None, password=None, output_encoding=None): ''' Ensure that a repository exists in the given directory .. warning:: If the minion has Git 2.5 or later installed, ``name`` points to a worktree_, and ``force`` is set to ``True``, then the worktree will be deleted. This has been corrected in Salt 2015.8.0. name Path to the directory .. versionchanged:: 2015.8.0 This path must now be absolute force : False If ``True``, and if ``name`` points to an existing directory which does not contain a git repository, then the contents of that directory will be recursively removed and a new repository will be initialized in its place. bare : True If ``True``, and a repository must be initialized, then the repository will be a bare repository. .. note:: This differs from the default behavior of :py:func:`git.init <salt.modules.git.init>`, make sure to set this value to ``False`` if a bare repo is not desired. template If a new repository is initialized, this argument will specify an alternate template directory. .. versionadded:: 2015.8.0 separate_git_dir If a new repository is initialized, this argument will specify an alternate ``$GIT_DIR`` .. versionadded:: 2015.8.0 shared Set sharing permissions on git repo. See `git-init(1)`_ for more details. .. versionadded:: 2015.5.0 user User under which to run git commands. By default, commands are run by the user under which the minion is running. .. versionadded:: 0.17.0 password Windows only. Required when specifying ``user``. This parameter will be ignored on non-Windows platforms. .. versionadded:: 2016.3.4 output_encoding Use this option to specify which encoding to use to decode the output from any git commands which are run. This should not be needed in most cases. .. note:: This should only be needed if the files in the repository were created with filenames using an encoding other than UTF-8 to handle Unicode characters. .. versionadded:: 2018.3.1 .. _`git-init(1)`: http://git-scm.com/docs/git-init .. _`worktree`: http://git-scm.com/docs/git-worktree ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} # If the named directory is a git repo return True if os.path.isdir(name): if bare and os.path.isfile(os.path.join(name, 'HEAD')): return ret elif not bare and \ (os.path.isdir(os.path.join(name, '.git')) or __salt__['git.is_worktree'](name, user=user, password=password, output_encoding=output_encoding)): return ret # Directory exists and is not a git repo, if force is set destroy the # directory and recreate, otherwise throw an error elif force: # Directory exists, and the ``force`` option is enabled, so we need # to clear out its contents to proceed. if __opts__['test']: ret['changes']['new'] = name ret['changes']['forced init'] = True return _neutral_test( ret, 'Target directory {0} exists. Since force=True, the ' 'contents of {0} would be deleted, and a {1}repository ' 'would be initialized in its place.' .format(name, 'bare ' if bare else '') ) log.debug( 'Removing contents of %s to initialize %srepository in its ' 'place (force=True set in git.present state)', name, 'bare ' if bare else '' ) try: if os.path.islink(name): os.unlink(name) else: salt.utils.files.rm_rf(name) except OSError as exc: return _fail( ret, 'Unable to remove {0}: {1}'.format(name, exc) ) else: ret['changes']['forced init'] = True elif os.listdir(name): return _fail( ret, 'Target \'{0}\' exists, is non-empty, and is not a git ' 'repository. Set the \'force\' option to True to remove ' 'this directory\'s contents and proceed with initializing a ' 'repository'.format(name) ) # Run test is set if __opts__['test']: ret['changes']['new'] = name return _neutral_test( ret, 'New {0}repository would be created'.format( 'bare ' if bare else '' ) ) __salt__['git.init'](cwd=name, bare=bare, template=template, separate_git_dir=separate_git_dir, shared=shared, user=user, password=password, output_encoding=output_encoding) actions = [ 'Initialized {0}repository in {1}'.format( 'bare ' if bare else '', name ) ] if template: actions.append('Template directory set to {0}'.format(template)) if separate_git_dir: actions.append('Gitdir set to {0}'.format(separate_git_dir)) message = '. '.join(actions) if len(actions) > 1: message += '.' log.info(message) ret['changes']['new'] = name ret['comment'] = message return ret def detached(name, rev, target=None, remote='origin', user=None, password=None, force_clone=False, force_checkout=False, fetch_remote=True, hard_reset=False, submodules=False, identity=None, https_user=None, https_pass=None, onlyif=None, unless=None, output_encoding=None, **kwargs): ''' .. versionadded:: 2016.3.0 Make sure a repository is cloned to the given target directory and is a detached HEAD checkout of the commit ID resolved from ``rev``. name Address of the remote repository. rev The branch, tag, or commit ID to checkout after clone. If a branch or tag is specified it will be resolved to a commit ID and checked out. target Name of the target directory where repository is about to be cloned. remote : origin Git remote to use. If this state needs to clone the repo, it will clone it using this value as the initial remote name. If the repository already exists, and a remote by this name is not present, one will be added. user User under which to run git commands. By default, commands are run by the user under which the minion is running. password Windows only. Required when specifying ``user``. This parameter will be ignored on non-Windows platforms. .. versionadded:: 2016.3.4 force_clone : False If the ``target`` directory exists and is not a git repository, then this state will fail. Set this argument to ``True`` to remove the contents of the target directory and clone the repo into it. force_checkout : False When checking out the revision ID, the state will fail if there are unwritten changes. Set this argument to ``True`` to discard unwritten changes when checking out. fetch_remote : True If ``False`` a fetch will not be performed and only local refs will be reachable. hard_reset : False If ``True`` a hard reset will be performed before the checkout and any uncommitted modifications to the working directory will be discarded. Untracked files will remain in place. .. note:: Changes resulting from a hard reset will not trigger requisites. submodules : False Update submodules identity A path on the minion (or a SaltStack fileserver URL, e.g. ``salt://path/to/identity_file``) to a private key to use for SSH authentication. https_user HTTP Basic Auth username for HTTPS (only) clones https_pass HTTP Basic Auth password for HTTPS (only) clones onlyif A command to run as a check, run the named command only if the command passed to the ``onlyif`` option returns true unless A command to run as a check, only run the named command if the command passed to the ``unless`` option returns false output_encoding Use this option to specify which encoding to use to decode the output from any git commands which are run. This should not be needed in most cases. .. note:: This should only be needed if the files in the repository were created with filenames using an encoding other than UTF-8 to handle Unicode characters. .. versionadded:: 2018.3.1 ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} kwargs = salt.utils.args.clean_kwargs(**kwargs) if kwargs: return _fail( ret, salt.utils.args.invalid_kwargs(kwargs, raise_exc=False) ) if not rev: return _fail( ret, '\'{0}\' is not a valid value for the \'rev\' argument'.format(rev) ) if not target: return _fail( ret, '\'{0}\' is not a valid value for the \'target\' argument'.format(rev) ) # Ensure that certain arguments are strings to ensure that comparisons work if not isinstance(rev, six.string_types): rev = six.text_type(rev) if target is not None: if not isinstance(target, six.string_types): target = six.text_type(target) if not os.path.isabs(target): return _fail( ret, 'Target \'{0}\' is not an absolute path'.format(target) ) if user is not None and not isinstance(user, six.string_types): user = six.text_type(user) if remote is not None and not isinstance(remote, six.string_types): remote = six.text_type(remote) if identity is not None: if isinstance(identity, six.string_types): identity = [identity] elif not isinstance(identity, list): return _fail(ret, 'Identity must be either a list or a string') identity = [os.path.expanduser(x) for x in identity] for ident_path in identity: if 'salt://' in ident_path: try: ident_path = __salt__['cp.cache_file'](ident_path) except IOError as exc: log.error('Failed to cache %s: %s', ident_path, exc) return _fail( ret, 'Identity \'{0}\' does not exist.'.format( ident_path ) ) if not os.path.isabs(ident_path): return _fail( ret, 'Identity \'{0}\' is not an absolute path'.format( ident_path ) ) if https_user is not None and not isinstance(https_user, six.string_types): https_user = six.text_type(https_user) if https_pass is not None and not isinstance(https_pass, six.string_types): https_pass = six.text_type(https_pass) if os.path.isfile(target): return _fail( ret, 'Target \'{0}\' exists and is a regular file, cannot proceed' .format(target) ) try: desired_fetch_url = salt.utils.url.add_http_basic_auth( name, https_user, https_pass, https_only=True ) except ValueError as exc: return _fail(ret, exc.__str__()) redacted_fetch_url = salt.utils.url.redact_http_basic_auth(desired_fetch_url) # Check if onlyif or unless conditions match run_check_cmd_kwargs = {'runas': user} if 'shell' in __grains__: run_check_cmd_kwargs['shell'] = __grains__['shell'] cret = mod_run_check( run_check_cmd_kwargs, onlyif, unless ) if isinstance(cret, dict): ret.update(cret) return ret # Determine if supplied ref is a hash remote_rev_type = 'ref' if len(rev) <= 40 \ and all(x in string.hexdigits for x in rev): rev = rev.lower() remote_rev_type = 'hash' comments = [] hash_exists_locally = False local_commit_id = None gitdir = os.path.join(target, '.git') if os.path.isdir(gitdir) \ or __salt__['git.is_worktree'](target, user=user, password=password, output_encoding=output_encoding): # Target directory is a git repository or git worktree local_commit_id = _get_local_rev_and_branch( target, user, password, output_encoding=output_encoding)[0] if remote_rev_type is 'hash': try: __salt__['git.describe'](target, rev, user=user, password=password, ignore_retcode=True, output_encoding=output_encoding) except CommandExecutionError: hash_exists_locally = False else: # The rev is a hash and it exists locally so skip to checkout hash_exists_locally = True else: # Check that remote is present and set to correct url remotes = __salt__['git.remotes'](target, user=user, password=password, redact_auth=False, output_encoding=output_encoding) if remote in remotes and name in remotes[remote]['fetch']: pass else: # The fetch_url for the desired remote does not match the # specified URL (or the remote does not exist), so set the # remote URL. current_fetch_url = None if remote in remotes: current_fetch_url = remotes[remote]['fetch'] if __opts__['test']: return _neutral_test( ret, 'Remote {0} would be set to {1}'.format( remote, name ) ) __salt__['git.remote_set'](target, url=name, remote=remote, user=user, password=password, https_user=https_user, https_pass=https_pass, output_encoding=output_encoding) comments.append( 'Remote {0} updated from \'{1}\' to \'{2}\''.format( remote, current_fetch_url, name ) ) else: # Clone repository if os.path.isdir(target): target_contents = os.listdir(target) if force_clone: # Clone is required, and target directory exists, but the # ``force`` option is enabled, so we need to clear out its # contents to proceed. if __opts__['test']: return _neutral_test( ret, 'Target directory {0} exists. Since force_clone=True, ' 'the contents of {0} would be deleted, and {1} would ' 'be cloned into this directory.'.format(target, name) ) log.debug( 'Removing contents of %s to clone repository %s in its ' 'place (force_clone=True set in git.detached state)', target, name ) removal_errors = {} for target_object in target_contents: target_path = os.path.join(target, target_object) try: salt.utils.files.rm_rf(target_path) except OSError as exc: if exc.errno != errno.ENOENT: removal_errors[target_path] = exc if removal_errors: err_strings = [ ' {0}\n {1}'.format(k, v) for k, v in six.iteritems(removal_errors) ] return _fail( ret, 'Unable to remove\n{0}'.format('\n'.join(err_strings)), comments ) ret['changes']['forced clone'] = True elif target_contents: # Clone is required, but target dir exists and is non-empty. We # can't proceed. return _fail( ret, 'Target \'{0}\' exists, is non-empty and is not a git ' 'repository. Set the \'force_clone\' option to True to ' 'remove this directory\'s contents and proceed with ' 'cloning the remote repository'.format(target) ) log.debug('Target %s is not found, \'git clone\' is required', target) if __opts__['test']: return _neutral_test( ret, 'Repository {0} would be cloned to {1}'.format( name, target ) ) try: clone_opts = ['--no-checkout'] if remote != 'origin': clone_opts.extend(['--origin', remote]) __salt__['git.clone'](target, name, user=user, password=password, opts=clone_opts, identity=identity, https_user=https_user, https_pass=https_pass, saltenv=__env__, output_encoding=output_encoding) comments.append('{0} cloned to {1}'.format(name, target)) except Exception as exc: log.error( 'Unexpected exception in git.detached state', exc_info=True ) if isinstance(exc, CommandExecutionError): msg = _strip_exc(exc) else: msg = six.text_type(exc) return _fail(ret, msg, comments) # Repository exists and is ready for fetch/checkout refspecs = [ 'refs/heads/*:refs/remotes/{0}/*'.format(remote), '+refs/tags/*:refs/tags/*' ] if hash_exists_locally or fetch_remote is False: pass else: # Fetch refs from remote if __opts__['test']: return _neutral_test( ret, 'Repository remote {0} would be fetched'.format(remote) ) try: fetch_changes = __salt__['git.fetch']( target, remote=remote, force=True, refspecs=refspecs, user=user, password=password, identity=identity, saltenv=__env__, output_encoding=output_encoding) except CommandExecutionError as exc: msg = 'Fetch failed' msg += ':\n\n' + six.text_type(exc) return _fail(ret, msg, comments) else: if fetch_changes: comments.append( 'Remote {0} was fetched, resulting in updated ' 'refs'.format(remote) ) # get refs and checkout checkout_commit_id = '' if remote_rev_type is 'hash': if __salt__['git.describe']( target, rev, user=user, password=password, output_encoding=output_encoding): checkout_commit_id = rev else: return _fail( ret, 'Revision \'{0}\' does not exist'.format(rev) ) else: try: all_remote_refs = __salt__['git.remote_refs']( target, user=user, password=password, identity=identity, https_user=https_user, https_pass=https_pass, ignore_retcode=False, output_encoding=output_encoding) if 'refs/remotes/'+remote+'/'+rev in all_remote_refs: checkout_commit_id = all_remote_refs['refs/remotes/' + remote + '/' + rev] elif 'refs/tags/' + rev in all_remote_refs: checkout_commit_id = all_remote_refs['refs/tags/' + rev] else: return _fail( ret, 'Revision \'{0}\' does not exist'.format(rev) ) except CommandExecutionError as exc: return _fail( ret, 'Failed to list refs for {0}: {1}'.format(remote, _strip_exc(exc)) ) if hard_reset: if __opts__['test']: return _neutral_test( ret, 'Hard reset to HEAD would be performed on {0}'.format(target) ) __salt__['git.reset']( target, opts=['--hard', 'HEAD'], user=user, password=password, output_encoding=output_encoding) comments.append( 'Repository was reset to HEAD before checking out revision' ) # TODO: implement clean function for git module and add clean flag if checkout_commit_id == local_commit_id: new_rev = None else: if __opts__['test']: ret['changes']['HEAD'] = {'old': local_commit_id, 'new': checkout_commit_id} return _neutral_test( ret, 'Commit ID {0} would be checked out at {1}'.format( checkout_commit_id, target ) ) __salt__['git.checkout'](target, checkout_commit_id, force=force_checkout, user=user, password=password, output_encoding=output_encoding) comments.append( 'Commit ID {0} was checked out at {1}'.format( checkout_commit_id, target ) ) try: new_rev = __salt__['git.revision']( cwd=target, user=user, password=password, ignore_retcode=True, output_encoding=output_encoding) except CommandExecutionError: new_rev = None if submodules: __salt__['git.submodule'](target, 'update', opts=['--init', '--recursive'], user=user, password=password, identity=identity, output_encoding=output_encoding) comments.append( 'Submodules were updated' ) if new_rev is not None: ret['changes']['HEAD'] = {'old': local_commit_id, 'new': new_rev} else: comments.append("Already checked out at correct revision") msg = _format_comments(comments) log.info(msg) ret['comment'] = msg return ret def config_unset(name, value_regex=None, repo=None, user=None, password=None, output_encoding=None, **kwargs): r''' .. versionadded:: 2015.8.0 Ensure that the named config key is not present name The name of the configuration key to unset. This value can be a regex, but the regex must match the entire key name. For example, ``foo\.`` would not match all keys in the ``foo`` section, it would be necessary to use ``foo\..+`` to do so. value_regex Regex indicating the values to unset for the matching key(s) .. note:: This option behaves differently depending on whether or not ``all`` is set to ``True``. If it is, then all values matching the regex will be deleted (this is the only way to delete multiple values from a multivar). If ``all`` is set to ``False``, then this state will fail if the regex matches more than one value in a multivar. all : False If ``True``, unset all matches repo Location of the git repository for which the config value should be set. Required unless ``global`` is set to ``True``. user User under which to run git commands. By default, commands are run by the user under which the minion is running. password Windows only. Required when specifying ``user``. This parameter will be ignored on non-Windows platforms. .. versionadded:: 2016.3.4 global : False If ``True``, this will set a global git config option output_encoding Use this option to specify which encoding to use to decode the output from any git commands which are run. This should not be needed in most cases. .. note:: This should only be needed if the files in the repository were created with filenames using an encoding other than UTF-8 to handle Unicode characters. .. versionadded:: 2018.3.1 **Examples:** .. code-block:: yaml # Value matching 'baz' mylocalrepo: git.config_unset: - name: foo.bar - value_regex: 'baz' - repo: /path/to/repo # Ensure entire multivar is unset mylocalrepo: git.config_unset: - name: foo.bar - all: True # Ensure all variables in 'foo' section are unset, including multivars mylocalrepo: git.config_unset: - name: 'foo\..+' - all: True # Ensure that global config value is unset mylocalrepo: git.config_unset: - name: foo.bar - global: True ''' ret = {'name': name, 'changes': {}, 'result': True, 'comment': 'No matching keys are set'} # Sanitize kwargs and make sure that no invalid ones were passed. This # allows us to accept 'global' as an argument to this function without # shadowing global(), while also not allowing unwanted arguments to be # passed. kwargs = salt.utils.args.clean_kwargs(**kwargs) global_ = kwargs.pop('global', False) all_ = kwargs.pop('all', False) if kwargs: return _fail( ret, salt.utils.args.invalid_kwargs(kwargs, raise_exc=False) ) if not global_ and not repo: return _fail( ret, 'Non-global config options require the \'repo\' argument to be ' 'set' ) if not isinstance(name, six.string_types): name = six.text_type(name) if value_regex is not None: if not isinstance(value_regex, six.string_types): value_regex = six.text_type(value_regex) # Ensure that the key regex matches the full key name key = '^' + name.lstrip('^').rstrip('$') + '$' # Get matching keys/values pre_matches = __salt__['git.config_get_regexp']( cwd=repo, key=key, value_regex=value_regex, user=user, password=password, ignore_retcode=True, output_encoding=output_encoding, **{'global': global_} ) if not pre_matches: # No changes need to be made return ret # Perform sanity check on the matches. We can't proceed if the value_regex # matches more than one value in a given key, and 'all' is not set to True if not all_: greedy_matches = ['{0} ({1})'.format(x, ', '.join(y)) for x, y in six.iteritems(pre_matches) if len(y) > 1] if greedy_matches: if value_regex is not None: return _fail( ret, 'Multiple values are matched by value_regex for the ' 'following keys (set \'all\' to True to force removal): ' '{0}'.format('; '.join(greedy_matches)) ) else: return _fail( ret, 'Multivar(s) matched by the key expression (set \'all\' ' 'to True to force removal): {0}'.format( '; '.join(greedy_matches) ) ) if __opts__['test']: ret['changes'] = pre_matches return _neutral_test( ret, '{0} key(s) would have value(s) unset'.format(len(pre_matches)) ) if value_regex is None: pre = pre_matches else: # Get all keys matching the key expression, so we can accurately report # on changes made. pre = __salt__['git.config_get_regexp']( cwd=repo, key=key, value_regex=None, user=user, password=password, ignore_retcode=True, output_encoding=output_encoding, **{'global': global_} ) failed = [] # Unset the specified value(s). There is no unset for regexes so loop # through the pre_matches dict and unset each matching key individually. for key_name in pre_matches: try: __salt__['git.config_unset']( cwd=repo, key=name, value_regex=value_regex, all=all_, user=user, password=password, output_encoding=output_encoding, **{'global': global_} ) except CommandExecutionError as exc: msg = 'Failed to unset \'{0}\''.format(key_name) if value_regex is not None: msg += ' using value_regex \'{1}\'' msg += ': ' + _strip_exc(exc) log.error(msg) failed.append(key_name) if failed: return _fail( ret, 'Error(s) occurred unsetting values for the following keys (see ' 'the minion log for details): {0}'.format(', '.join(failed)) ) post = __salt__['git.config_get_regexp']( cwd=repo, key=key, value_regex=None, user=user, password=password, ignore_retcode=True, output_encoding=output_encoding, **{'global': global_} ) for key_name in pre: if key_name not in post: ret['changes'][key_name] = pre[key_name] unset = [x for x in pre[key_name] if x not in post[key_name]] if unset: ret['changes'][key_name] = unset if value_regex is None: post_matches = post else: post_matches = __salt__['git.config_get_regexp']( cwd=repo, key=key, value_regex=value_regex, user=user, password=password, ignore_retcode=True, output_encoding=output_encoding, **{'global': global_} ) if post_matches: failed = ['{0} ({1})'.format(x, ', '.join(y)) for x, y in six.iteritems(post_matches)] return _fail( ret, 'Failed to unset value(s): {0}'.format('; '.join(failed)) ) ret['comment'] = 'Value(s) successfully unset' return ret def config_set(name, value=None, multivar=None, repo=None, user=None, password=None, output_encoding=None, **kwargs): ''' .. versionadded:: 2014.7.0 .. versionchanged:: 2015.8.0 Renamed from ``git.config`` to ``git.config_set``. For earlier versions, use ``git.config``. Ensure that a config value is set to the desired value(s) name Name of the git config value to set value Set a single value for the config item multivar Set multiple values for the config item .. note:: The order matters here, if the same parameters are set but in a different order, they will be removed and replaced in the order specified. .. versionadded:: 2015.8.0 repo Location of the git repository for which the config value should be set. Required unless ``global`` is set to ``True``. user User under which to run git commands. By default, the commands are run by the user under which the minion is running. password Windows only. Required when specifying ``user``. This parameter will be ignored on non-Windows platforms. .. versionadded:: 2016.3.4 global : False If ``True``, this will set a global git config option output_encoding Use this option to specify which encoding to use to decode the output from any git commands which are run. This should not be needed in most cases. .. note:: This should only be needed if the files in the repository were created with filenames using an encoding other than UTF-8 to handle Unicode characters. .. versionadded:: 2018.3.1 **Local Config Example:** .. code-block:: yaml # Single value mylocalrepo: git.config_set: - name: user.email - value: foo@bar.net - repo: /path/to/repo # Multiple values mylocalrepo: git.config_set: - name: mysection.myattribute - multivar: - foo - bar - baz - repo: /path/to/repo **Global Config Example (User ``foo``):** .. code-block:: yaml mylocalrepo: git.config_set: - name: user.name - value: Foo Bar - user: foo - global: True ''' ret = {'name': name, 'changes': {}, 'result': True, 'comment': ''} if value is not None and multivar is not None: return _fail( ret, 'Only one of \'value\' and \'multivar\' is permitted' ) # Sanitize kwargs and make sure that no invalid ones were passed. This # allows us to accept 'global' as an argument to this function without # shadowing global(), while also not allowing unwanted arguments to be # passed. kwargs = salt.utils.args.clean_kwargs(**kwargs) global_ = kwargs.pop('global', False) if kwargs: return _fail( ret, salt.utils.args.invalid_kwargs(kwargs, raise_exc=False) ) if not global_ and not repo: return _fail( ret, 'Non-global config options require the \'repo\' argument to be ' 'set' ) if not isinstance(name, six.string_types): name = six.text_type(name) if value is not None: if not isinstance(value, six.string_types): value = six.text_type(value) value_comment = '\'' + value + '\'' desired = [value] if multivar is not None: if not isinstance(multivar, list): try: multivar = multivar.split(',') except AttributeError: multivar = six.text_type(multivar).split(',') else: new_multivar = [] for item in multivar: if isinstance(item, six.string_types): new_multivar.append(item) else: new_multivar.append(six.text_type(item)) multivar = new_multivar value_comment = multivar desired = multivar # Get current value pre = __salt__['git.config_get']( cwd=repo, key=name, user=user, password=password, ignore_retcode=True, output_encoding=output_encoding, **{'all': True, 'global': global_} ) if desired == pre: ret['comment'] = '{0}\'{1}\' is already set to {2}'.format( 'Global key ' if global_ else '', name, value_comment ) return ret if __opts__['test']: ret['changes'] = {'old': pre, 'new': desired} msg = '{0}\'{1}\' would be {2} {3}'.format( 'Global key ' if global_ else '', name, 'added as' if pre is None else 'set to', value_comment ) return _neutral_test(ret, msg) try: # Set/update config value post = __salt__['git.config_set']( cwd=repo, key=name, value=value, multivar=multivar, user=user, password=password, output_encoding=output_encoding, **{'global': global_} ) except CommandExecutionError as exc: return _fail( ret, 'Failed to set {0}\'{1}\' to {2}: {3}'.format( 'global key ' if global_ else '', name, value_comment, _strip_exc(exc) ) ) if pre != post: ret['changes'][name] = {'old': pre, 'new': post} if post != desired: return _fail( ret, 'Failed to set {0}\'{1}\' to {2}'.format( 'global key ' if global_ else '', name, value_comment ) ) ret['comment'] = '{0}\'{1}\' was {2} {3}'.format( 'Global key ' if global_ else '', name, 'added as' if pre is None else 'set to', value_comment ) return ret def mod_run_check(cmd_kwargs, onlyif, unless): ''' Execute the onlyif and unless logic. Return a result dict if: * onlyif failed (onlyif != 0) * unless succeeded (unless == 0) Otherwise, returns ``True`` ''' cmd_kwargs = copy.deepcopy(cmd_kwargs) cmd_kwargs.update({ 'use_vt': False, 'bg': False, 'ignore_retcode': True, 'python_shell': True, }) if onlyif is not None: if not isinstance(onlyif, list): onlyif = [onlyif] for command in onlyif: if not isinstance(command, six.string_types) and command: # Boolean or some other non-string which resolves to True continue try: if __salt__['cmd.retcode'](command, **cmd_kwargs) == 0: # Command exited with a zero retcode continue except Exception as exc: log.exception( 'The following onlyif command raised an error: %s', command ) return { 'comment': 'onlyif raised error ({0}), see log for ' 'more details'.format(exc), 'result': False } return {'comment': 'onlyif condition is false', 'skip_watch': True, 'result': True} if unless is not None: if not isinstance(unless, list): unless = [unless] for command in unless: if not isinstance(command, six.string_types) and not command: # Boolean or some other non-string which resolves to False break try: if __salt__['cmd.retcode'](command, **cmd_kwargs) != 0: # Command exited with a non-zero retcode break except Exception as exc: log.exception( 'The following unless command raised an error: %s', command ) return { 'comment': 'unless raised error ({0}), see log for ' 'more details'.format(exc), 'result': False } else: return {'comment': 'unless condition is true', 'skip_watch': True, 'result': True} return True
saltstack/salt
salt/states/git.py
config_unset
python
def config_unset(name, value_regex=None, repo=None, user=None, password=None, output_encoding=None, **kwargs): r''' .. versionadded:: 2015.8.0 Ensure that the named config key is not present name The name of the configuration key to unset. This value can be a regex, but the regex must match the entire key name. For example, ``foo\.`` would not match all keys in the ``foo`` section, it would be necessary to use ``foo\..+`` to do so. value_regex Regex indicating the values to unset for the matching key(s) .. note:: This option behaves differently depending on whether or not ``all`` is set to ``True``. If it is, then all values matching the regex will be deleted (this is the only way to delete multiple values from a multivar). If ``all`` is set to ``False``, then this state will fail if the regex matches more than one value in a multivar. all : False If ``True``, unset all matches repo Location of the git repository for which the config value should be set. Required unless ``global`` is set to ``True``. user User under which to run git commands. By default, commands are run by the user under which the minion is running. password Windows only. Required when specifying ``user``. This parameter will be ignored on non-Windows platforms. .. versionadded:: 2016.3.4 global : False If ``True``, this will set a global git config option output_encoding Use this option to specify which encoding to use to decode the output from any git commands which are run. This should not be needed in most cases. .. note:: This should only be needed if the files in the repository were created with filenames using an encoding other than UTF-8 to handle Unicode characters. .. versionadded:: 2018.3.1 **Examples:** .. code-block:: yaml # Value matching 'baz' mylocalrepo: git.config_unset: - name: foo.bar - value_regex: 'baz' - repo: /path/to/repo # Ensure entire multivar is unset mylocalrepo: git.config_unset: - name: foo.bar - all: True # Ensure all variables in 'foo' section are unset, including multivars mylocalrepo: git.config_unset: - name: 'foo\..+' - all: True # Ensure that global config value is unset mylocalrepo: git.config_unset: - name: foo.bar - global: True ''' ret = {'name': name, 'changes': {}, 'result': True, 'comment': 'No matching keys are set'} # Sanitize kwargs and make sure that no invalid ones were passed. This # allows us to accept 'global' as an argument to this function without # shadowing global(), while also not allowing unwanted arguments to be # passed. kwargs = salt.utils.args.clean_kwargs(**kwargs) global_ = kwargs.pop('global', False) all_ = kwargs.pop('all', False) if kwargs: return _fail( ret, salt.utils.args.invalid_kwargs(kwargs, raise_exc=False) ) if not global_ and not repo: return _fail( ret, 'Non-global config options require the \'repo\' argument to be ' 'set' ) if not isinstance(name, six.string_types): name = six.text_type(name) if value_regex is not None: if not isinstance(value_regex, six.string_types): value_regex = six.text_type(value_regex) # Ensure that the key regex matches the full key name key = '^' + name.lstrip('^').rstrip('$') + '$' # Get matching keys/values pre_matches = __salt__['git.config_get_regexp']( cwd=repo, key=key, value_regex=value_regex, user=user, password=password, ignore_retcode=True, output_encoding=output_encoding, **{'global': global_} ) if not pre_matches: # No changes need to be made return ret # Perform sanity check on the matches. We can't proceed if the value_regex # matches more than one value in a given key, and 'all' is not set to True if not all_: greedy_matches = ['{0} ({1})'.format(x, ', '.join(y)) for x, y in six.iteritems(pre_matches) if len(y) > 1] if greedy_matches: if value_regex is not None: return _fail( ret, 'Multiple values are matched by value_regex for the ' 'following keys (set \'all\' to True to force removal): ' '{0}'.format('; '.join(greedy_matches)) ) else: return _fail( ret, 'Multivar(s) matched by the key expression (set \'all\' ' 'to True to force removal): {0}'.format( '; '.join(greedy_matches) ) ) if __opts__['test']: ret['changes'] = pre_matches return _neutral_test( ret, '{0} key(s) would have value(s) unset'.format(len(pre_matches)) ) if value_regex is None: pre = pre_matches else: # Get all keys matching the key expression, so we can accurately report # on changes made. pre = __salt__['git.config_get_regexp']( cwd=repo, key=key, value_regex=None, user=user, password=password, ignore_retcode=True, output_encoding=output_encoding, **{'global': global_} ) failed = [] # Unset the specified value(s). There is no unset for regexes so loop # through the pre_matches dict and unset each matching key individually. for key_name in pre_matches: try: __salt__['git.config_unset']( cwd=repo, key=name, value_regex=value_regex, all=all_, user=user, password=password, output_encoding=output_encoding, **{'global': global_} ) except CommandExecutionError as exc: msg = 'Failed to unset \'{0}\''.format(key_name) if value_regex is not None: msg += ' using value_regex \'{1}\'' msg += ': ' + _strip_exc(exc) log.error(msg) failed.append(key_name) if failed: return _fail( ret, 'Error(s) occurred unsetting values for the following keys (see ' 'the minion log for details): {0}'.format(', '.join(failed)) ) post = __salt__['git.config_get_regexp']( cwd=repo, key=key, value_regex=None, user=user, password=password, ignore_retcode=True, output_encoding=output_encoding, **{'global': global_} ) for key_name in pre: if key_name not in post: ret['changes'][key_name] = pre[key_name] unset = [x for x in pre[key_name] if x not in post[key_name]] if unset: ret['changes'][key_name] = unset if value_regex is None: post_matches = post else: post_matches = __salt__['git.config_get_regexp']( cwd=repo, key=key, value_regex=value_regex, user=user, password=password, ignore_retcode=True, output_encoding=output_encoding, **{'global': global_} ) if post_matches: failed = ['{0} ({1})'.format(x, ', '.join(y)) for x, y in six.iteritems(post_matches)] return _fail( ret, 'Failed to unset value(s): {0}'.format('; '.join(failed)) ) ret['comment'] = 'Value(s) successfully unset' return ret
r''' .. versionadded:: 2015.8.0 Ensure that the named config key is not present name The name of the configuration key to unset. This value can be a regex, but the regex must match the entire key name. For example, ``foo\.`` would not match all keys in the ``foo`` section, it would be necessary to use ``foo\..+`` to do so. value_regex Regex indicating the values to unset for the matching key(s) .. note:: This option behaves differently depending on whether or not ``all`` is set to ``True``. If it is, then all values matching the regex will be deleted (this is the only way to delete multiple values from a multivar). If ``all`` is set to ``False``, then this state will fail if the regex matches more than one value in a multivar. all : False If ``True``, unset all matches repo Location of the git repository for which the config value should be set. Required unless ``global`` is set to ``True``. user User under which to run git commands. By default, commands are run by the user under which the minion is running. password Windows only. Required when specifying ``user``. This parameter will be ignored on non-Windows platforms. .. versionadded:: 2016.3.4 global : False If ``True``, this will set a global git config option output_encoding Use this option to specify which encoding to use to decode the output from any git commands which are run. This should not be needed in most cases. .. note:: This should only be needed if the files in the repository were created with filenames using an encoding other than UTF-8 to handle Unicode characters. .. versionadded:: 2018.3.1 **Examples:** .. code-block:: yaml # Value matching 'baz' mylocalrepo: git.config_unset: - name: foo.bar - value_regex: 'baz' - repo: /path/to/repo # Ensure entire multivar is unset mylocalrepo: git.config_unset: - name: foo.bar - all: True # Ensure all variables in 'foo' section are unset, including multivars mylocalrepo: git.config_unset: - name: 'foo\..+' - all: True # Ensure that global config value is unset mylocalrepo: git.config_unset: - name: foo.bar - global: True
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/git.py#L2960-L3217
[ "def clean_kwargs(**kwargs):\n '''\n Return a dict without any of the __pub* keys (or any other keys starting\n with a dunder) from the kwargs dict passed into the execution module\n functions. These keys are useful for tracking what was used to invoke\n the function call, but they may not be desirable to have if passing the\n kwargs forward wholesale.\n\n Usage example:\n\n .. code-block:: python\n\n kwargs = __utils__['args.clean_kwargs'](**kwargs)\n '''\n ret = {}\n for key, val in six.iteritems(kwargs):\n if not key.startswith('__'):\n ret[key] = val\n return ret\n", "def invalid_kwargs(invalid_kwargs, raise_exc=True):\n '''\n Raise a SaltInvocationError if invalid_kwargs is non-empty\n '''\n if invalid_kwargs:\n if isinstance(invalid_kwargs, dict):\n new_invalid = [\n '{0}={1}'.format(x, y)\n for x, y in six.iteritems(invalid_kwargs)\n ]\n invalid_kwargs = new_invalid\n msg = (\n 'The following keyword arguments are not valid: {0}'\n .format(', '.join(invalid_kwargs))\n )\n if raise_exc:\n raise SaltInvocationError(msg)\n else:\n return msg\n", "def _strip_exc(exc):\n '''\n Strip the actual command that was run from exc.strerror to leave just the\n error message\n '''\n return re.sub(r'^Command [\\'\"].+[\\'\"] failed: ', '', exc.strerror)\n", "def _neutral_test(ret, comment):\n ret['result'] = None\n ret['comment'] = comment\n return ret\n", "def _fail(ret, msg, comments=None):\n ret['result'] = False\n if comments:\n msg += '\\n\\nChanges already made: ' + _format_comments(comments)\n ret['comment'] = msg\n return ret\n" ]
# -*- coding: utf-8 -*- ''' States to manage git repositories and git configuration .. important:: Before using git over ssh, make sure your remote host fingerprint exists in your ``~/.ssh/known_hosts`` file. .. versionchanged:: 2015.8.8 This state module now requires git 1.6.5 (released 10 October 2009) or newer. ''' from __future__ import absolute_import, print_function, unicode_literals # Import python libs import copy import errno import logging import os import re import string # Import salt libs import salt.utils.args import salt.utils.files import salt.utils.url import salt.utils.versions from salt.exceptions import CommandExecutionError from salt.utils.versions import LooseVersion as _LooseVersion # Import 3rd-party libs from salt.ext import six log = logging.getLogger(__name__) def __virtual__(): ''' Only load if git is available ''' if 'git.version' not in __salt__: return False git_ver = _LooseVersion(__salt__['git.version'](versioninfo=False)) return git_ver >= _LooseVersion('1.6.5') def _revs_equal(rev1, rev2, rev_type): ''' Shorthand helper function for comparing SHA1s. If rev_type == 'sha1' then the comparison will be done using str.startwith() to allow short SHA1s to compare successfully. NOTE: This means that rev2 must be the short rev. ''' if (rev1 is None and rev2 is not None) \ or (rev2 is None and rev1 is not None): return False elif rev1 is rev2 is None: return True elif rev_type == 'sha1': return rev1.startswith(rev2) else: return rev1 == rev2 def _short_sha(sha1): return sha1[:7] if sha1 is not None else None def _format_comments(comments): ''' Return a joined list ''' ret = '. '.join(comments) if len(comments) > 1: ret += '.' return ret def _need_branch_change(branch, local_branch): ''' Short hand for telling when a new branch is needed ''' return branch is not None and branch != local_branch def _get_branch_opts(branch, local_branch, all_local_branches, desired_upstream, git_ver=None): ''' DRY helper to build list of opts for git.branch, for the purposes of setting upstream tracking branch ''' if branch is not None and branch not in all_local_branches: # We won't be setting upstream because the act of checking out a new # branch will set upstream for us return None if git_ver is None: git_ver = _LooseVersion(__salt__['git.version'](versioninfo=False)) ret = [] if git_ver >= _LooseVersion('1.8.0'): ret.extend(['--set-upstream-to', desired_upstream]) else: ret.append('--set-upstream') # --set-upstream does not assume the current branch, so we have to # tell it which branch we'll be using ret.append(local_branch if branch is None else branch) ret.append(desired_upstream) return ret def _get_local_rev_and_branch(target, user, password, output_encoding=None): ''' Return the local revision for before/after comparisons ''' log.info('Checking local revision for %s', target) try: local_rev = __salt__['git.revision']( target, user=user, password=password, ignore_retcode=True, output_encoding=output_encoding) except CommandExecutionError: log.info('No local revision for %s', target) local_rev = None log.info('Checking local branch for %s', target) try: local_branch = __salt__['git.current_branch']( target, user=user, password=password, ignore_retcode=True, output_encoding=output_encoding) except CommandExecutionError: log.info('No local branch for %s', target) local_branch = None return local_rev, local_branch def _strip_exc(exc): ''' Strip the actual command that was run from exc.strerror to leave just the error message ''' return re.sub(r'^Command [\'"].+[\'"] failed: ', '', exc.strerror) def _uptodate(ret, target, comments=None, local_changes=False): ret['comment'] = 'Repository {0} is up-to-date'.format(target) if local_changes: ret['comment'] += ( ', but with uncommitted changes. Set \'force_reset\' to True to ' 'purge uncommitted changes.' ) if comments: # Shouldn't be making any changes if the repo was up to date, but # report on them so we are alerted to potential problems with our # logic. ret['comment'] += ( '\n\nChanges {0}made: {1}'.format( 'that would be ' if __opts__['test'] else '', _format_comments(comments) ) ) return ret def _neutral_test(ret, comment): ret['result'] = None ret['comment'] = comment return ret def _fail(ret, msg, comments=None): ret['result'] = False if comments: msg += '\n\nChanges already made: ' + _format_comments(comments) ret['comment'] = msg return ret def _already_cloned(ret, target, branch=None, comments=None): ret['result'] = True ret['comment'] = 'Repository already exists at {0}{1}'.format( target, ' and is checked out to branch \'{0}\''.format(branch) if branch else '' ) if comments: ret['comment'] += ( '\n\nChanges {0}made: {1}'.format( 'that would be ' if __opts__['test'] else '', _format_comments(comments) ) ) return ret def _failed_fetch(ret, exc, comments=None): msg = ( 'Fetch failed. Set \'force_fetch\' to True to force the fetch if the ' 'failure was due to not being able to fast-forward. Output of the fetch ' 'command follows:\n\n{0}'.format(_strip_exc(exc)) ) return _fail(ret, msg, comments) def _failed_submodule_update(ret, exc, comments=None): msg = 'Failed to update submodules: ' + _strip_exc(exc) return _fail(ret, msg, comments) def _not_fast_forward(ret, rev, pre, post, branch, local_branch, default_branch, local_changes, comments): branch_msg = '' if branch is None: if rev != 'HEAD': if local_branch != rev: branch_msg = ( ' The desired rev ({0}) differs from the name of the ' 'local branch ({1}), if the desired rev is a branch name ' 'then a forced update could possibly be avoided by ' 'setting the \'branch\' argument to \'{0}\' instead.' .format(rev, local_branch) ) else: if default_branch is not None and local_branch != default_branch: branch_msg = ( ' The default remote branch ({0}) differs from the ' 'local branch ({1}). This could be caused by changing the ' 'default remote branch, or if the local branch was ' 'manually changed. Rather than forcing an update, it ' 'may be advisable to set the \'branch\' argument to ' '\'{0}\' instead. To ensure that this state follows the ' '\'{0}\' branch instead of the remote HEAD, set the ' '\'rev\' argument to \'{0}\'.' .format(default_branch, local_branch) ) pre = _short_sha(pre) post = _short_sha(post) return _fail( ret, 'Repository would be updated {0}{1}, but {2}. Set \'force_reset\' to ' 'True{3} to force this update{4}.{5}'.format( 'from {0} to {1}'.format(pre, post) if local_changes and pre != post else 'to {0}'.format(post), ' (after checking out local branch \'{0}\')'.format(branch) if _need_branch_change(branch, local_branch) else '', 'this is not a fast-forward merge' if not local_changes else 'there are uncommitted changes', ' (or \'remote-changes\')' if local_changes else '', ' and discard these changes' if local_changes else '', branch_msg, ), comments ) def latest(name, rev='HEAD', target=None, branch=None, user=None, password=None, update_head=True, force_checkout=False, force_clone=False, force_fetch=False, force_reset=False, submodules=False, bare=False, mirror=False, remote='origin', fetch_tags=True, sync_tags=True, depth=None, identity=None, https_user=None, https_pass=None, onlyif=None, unless=None, refspec_branch='*', refspec_tag='*', output_encoding=None, **kwargs): ''' Make sure the repository is cloned to the given directory and is up-to-date. name Address of the remote repository, as passed to ``git clone`` .. note:: From the `Git documentation`_, there are two URL formats supported for SSH authentication. The below two examples are equivalent: .. code-block:: text # ssh:// URL ssh://user@server/project.git # SCP-like syntax user@server:project.git A common mistake is to use an ``ssh://`` URL, but with a colon after the domain instead of a slash. This is invalid syntax in Git, and will therefore not work in Salt. When in doubt, confirm that a ``git clone`` works for the URL before using it in Salt. It has been reported by some users that SCP-like syntax is incompatible with git repos hosted on `Atlassian Stash/BitBucket Server`_. In these cases, it may be necessary to use ``ssh://`` URLs for SSH authentication. .. _`Git documentation`: https://git-scm.com/book/en/v2/Git-on-the-Server-The-Protocols#The-SSH-Protocol .. _`Atlassian Stash/BitBucket Server`: https://www.atlassian.com/software/bitbucket/server rev : HEAD The remote branch, tag, or revision ID to checkout after clone / before update. If specified, then Salt will also ensure that the tracking branch is set to ``<remote>/<rev>``, unless ``rev`` refers to a tag or SHA1, in which case Salt will ensure that the tracking branch is unset. If ``rev`` is not specified, it will be assumed to be ``HEAD``, and Salt will not manage the tracking branch at all. .. versionchanged:: 2015.8.0 If not specified, ``rev`` now defaults to the remote repository's HEAD. target Name of the target directory where repository is about to be cloned branch Name of the local branch into which to checkout the specified rev. If not specified, then Salt will not care what branch is being used locally and will just use whatever branch is currently there. .. versionadded:: 2015.8.0 .. note:: If this argument is not specified, this means that Salt will not change the local branch if the repository is reset to another branch/tag/SHA1. For example, assume that the following state was run initially: .. code-block:: yaml foo_app: git.latest: - name: https://mydomain.tld/apps/foo.git - target: /var/www/foo - user: www This would have cloned the HEAD of that repo (since a ``rev`` wasn't specified), and because ``branch`` is not specified, the branch in the local clone at ``/var/www/foo`` would be whatever the default branch is on the remote repository (usually ``master``, but not always). Now, assume that it becomes necessary to switch this checkout to the ``dev`` branch. This would require ``rev`` to be set, and probably would also require ``force_reset`` to be enabled: .. code-block:: yaml foo_app: git.latest: - name: https://mydomain.tld/apps/foo.git - target: /var/www/foo - user: www - rev: dev - force_reset: True The result of this state would be to perform a hard-reset to ``origin/dev``. Since ``branch`` was not specified though, while ``/var/www/foo`` would reflect the contents of the remote repo's ``dev`` branch, the local branch would still remain whatever it was when it was cloned. To make the local branch match the remote one, set ``branch`` as well, like so: .. code-block:: yaml foo_app: git.latest: - name: https://mydomain.tld/apps/foo.git - target: /var/www/foo - user: www - rev: dev - branch: dev - force_reset: True This may seem redundant, but Salt tries to support a wide variety of use cases, and doing it this way allows for the use case where the local branch doesn't need to be strictly managed. user Local system user under which to run git commands. By default, commands are run by the user under which the minion is running. .. note:: This is not to be confused with the username for http(s)/SSH authentication. .. versionadded:: 0.17.0 password Windows only. Required when specifying ``user``. This parameter will be ignored on non-Windows platforms. .. versionadded:: 2016.3.4 update_head : True If set to ``False``, then the remote repository will be fetched (if necessary) to ensure that the commit to which ``rev`` points exists in the local checkout, but no changes will be made to the local HEAD. .. versionadded:: 2015.8.3 force_checkout : False When checking out the local branch, the state will fail if there are unwritten changes. Set this argument to ``True`` to discard unwritten changes when checking out. force_clone : False If the ``target`` directory exists and is not a git repository, then this state will fail. Set this argument to ``True`` to remove the contents of the target directory and clone the repo into it. force_fetch : False If a fetch needs to be performed, non-fast-forward fetches will cause this state to fail. Set this argument to ``True`` to force the fetch even if it is a non-fast-forward update. .. versionadded:: 2015.8.0 force_reset : False If the update is not a fast-forward, this state will fail. Set this argument to ``True`` to force a hard-reset to the remote revision in these cases. .. versionchanged:: 2019.2.0 This option can now be set to ``remote-changes``, which will instruct Salt not to discard local changes if the repo is up-to-date with the remote repository. submodules : False Update submodules on clone or branch change bare : False Set to ``True`` if the repository is to be a bare clone of the remote repository. .. note: Setting this option to ``True`` is incompatible with the ``rev`` argument. mirror Set to ``True`` if the repository is to be a mirror of the remote repository. This implies that ``bare`` set to ``True``, and thus is incompatible with ``rev``. remote : origin Git remote to use. If this state needs to clone the repo, it will clone it using this value as the initial remote name. If the repository already exists, and a remote by this name is not present, one will be added. fetch_tags : True If ``True``, then when a fetch is performed all tags will be fetched, even those which are not reachable by any branch on the remote. sync_tags : True If ``True``, then Salt will delete tags which exist in the local clone but are not found on the remote repository. .. versionadded:: 2018.3.4 depth Defines depth in history when git a clone is needed in order to ensure latest. E.g. ``depth: 1`` is useful when deploying from a repository with a long history. Use rev to specify branch or tag. This is not compatible with revision IDs. .. versionchanged:: 2019.2.0 This option now supports tags as well as branches, on Git 1.8.0 and newer. identity Path to a private key to use for ssh URLs. This can be either a single string, or a list of strings. For example: .. code-block:: yaml # Single key git@github.com:user/repo.git: git.latest: - user: deployer - identity: /home/deployer/.ssh/id_rsa # Two keys git@github.com:user/repo.git: git.latest: - user: deployer - identity: - /home/deployer/.ssh/id_rsa - /home/deployer/.ssh/id_rsa_alternate If multiple keys are specified, they will be tried one-by-one in order for each git command which needs to authenticate. .. warning:: Unless Salt is invoked from the minion using ``salt-call``, the key(s) must be passphraseless. For greater security with passphraseless private keys, see the `sshd(8)`_ manpage for information on securing the keypair from the remote side in the ``authorized_keys`` file. .. _`sshd(8)`: http://www.man7.org/linux/man-pages/man8/sshd.8.html#AUTHORIZED_KEYS_FILE%20FORMAT .. versionchanged:: 2015.8.7 Salt will no longer attempt to use passphrase-protected keys unless invoked from the minion using ``salt-call``, to prevent blocking waiting for user input. .. versionchanged:: 2016.3.0 Key can now be specified as a SaltStack fileserver URL (e.g. ``salt://path/to/identity_file``). https_user HTTP Basic Auth username for HTTPS (only) clones .. versionadded:: 2015.5.0 https_pass HTTP Basic Auth password for HTTPS (only) clones .. versionadded:: 2015.5.0 onlyif A command to run as a check, run the named command only if the command passed to the ``onlyif`` option returns true unless A command to run as a check, only run the named command if the command passed to the ``unless`` option returns false refspec_branch : * A glob expression defining which branches to retrieve when fetching. See `git-fetch(1)`_ for more information on how refspecs work. .. versionadded:: 2017.7.0 refspec_tag : * A glob expression defining which tags to retrieve when fetching. See `git-fetch(1)`_ for more information on how refspecs work. .. versionadded:: 2017.7.0 output_encoding Use this option to specify which encoding to use to decode the output from any git commands which are run. This should not be needed in most cases. .. note:: This should only be needed if the files in the repository were created with filenames using an encoding other than UTF-8 to handle Unicode characters. .. versionadded:: 2018.3.1 .. _`git-fetch(1)`: http://git-scm.com/docs/git-fetch .. note:: Clashing ID declarations can be avoided when including different branches from the same git repository in the same SLS file by using the ``name`` argument. The example below checks out the ``gh-pages`` and ``gh-pages-prod`` branches from the same repository into separate directories. The example also sets up the ``ssh_known_hosts`` ssh key required to perform the git checkout. Also, it has been reported that the SCP-like syntax for .. code-block:: yaml gitlab.example.com: ssh_known_hosts: - present - user: root - enc: ecdsa - fingerprint: 4e:94:b0:54:c1:5b:29:a2:70:0e:e1:a3:51:ee:ee:e3 git-website-staging: git.latest: - name: git@gitlab.example.com:user/website.git - rev: gh-pages - target: /usr/share/nginx/staging - identity: /root/.ssh/website_id_rsa - require: - pkg: git - ssh_known_hosts: gitlab.example.com git-website-staging: git.latest: - name: git@gitlab.example.com:user/website.git - rev: gh-pages - target: /usr/share/nginx/staging - identity: salt://website/id_rsa - require: - pkg: git - ssh_known_hosts: gitlab.example.com git-website-prod: git.latest: - name: git@gitlab.example.com:user/website.git - rev: gh-pages-prod - target: /usr/share/nginx/prod - identity: /root/.ssh/website_id_rsa - require: - pkg: git - ssh_known_hosts: gitlab.example.com ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} kwargs = salt.utils.args.clean_kwargs(**kwargs) if kwargs: return _fail( ret, salt.utils.args.invalid_kwargs(kwargs, raise_exc=False) ) if not remote: return _fail(ret, '\'remote\' argument is required') if not target: return _fail(ret, '\'target\' argument is required') if not rev: return _fail( ret, '\'{0}\' is not a valid value for the \'rev\' argument'.format(rev) ) if force_reset not in (True, False, 'remote-changes'): return _fail( ret, '\'force_reset\' must be one of True, False, or \'remote-changes\'' ) # Ensure that certain arguments are strings to ensure that comparisons work if not isinstance(rev, six.string_types): rev = six.text_type(rev) if target is not None: if not isinstance(target, six.string_types): target = six.text_type(target) if not os.path.isabs(target): return _fail( ret, 'target \'{0}\' is not an absolute path'.format(target) ) if branch is not None and not isinstance(branch, six.string_types): branch = six.text_type(branch) if user is not None and not isinstance(user, six.string_types): user = six.text_type(user) if password is not None and not isinstance(password, six.string_types): password = six.text_type(password) if remote is not None and not isinstance(remote, six.string_types): remote = six.text_type(remote) if identity is not None: if isinstance(identity, six.string_types): identity = [identity] elif not isinstance(identity, list): return _fail(ret, 'identity must be either a list or a string') identity = [os.path.expanduser(x) for x in identity] for ident_path in identity: if 'salt://' in ident_path: try: ident_path = __salt__['cp.cache_file'](ident_path, __env__) except IOError as exc: log.exception('Failed to cache %s', ident_path) return _fail( ret, 'identity \'{0}\' does not exist.'.format( ident_path ) ) if not os.path.isabs(ident_path): return _fail( ret, 'identity \'{0}\' is not an absolute path'.format( ident_path ) ) if https_user is not None and not isinstance(https_user, six.string_types): https_user = six.text_type(https_user) if https_pass is not None and not isinstance(https_pass, six.string_types): https_pass = six.text_type(https_pass) # Check for lfs filter settings, and setup lfs_opts accordingly. These opts # will be passed where appropriate to ensure that these commands are # authenticated and that the git LFS plugin can download files. use_lfs = bool( __salt__['git.config_get_regexp']( r'filter\.lfs\.', **{'global': True})) lfs_opts = {'identity': identity} if use_lfs else {} if os.path.isfile(target): return _fail( ret, 'Target \'{0}\' exists and is a regular file, cannot proceed' .format(target) ) try: desired_fetch_url = salt.utils.url.add_http_basic_auth( name, https_user, https_pass, https_only=True ) except ValueError as exc: return _fail(ret, exc.__str__()) redacted_fetch_url = \ salt.utils.url.redact_http_basic_auth(desired_fetch_url) if mirror: bare = True # Check to make sure rev and mirror/bare are not both in use if rev != 'HEAD' and bare: return _fail(ret, ('\'rev\' is not compatible with the \'mirror\' and ' '\'bare\' arguments')) run_check_cmd_kwargs = {'runas': user, 'password': password} if 'shell' in __grains__: run_check_cmd_kwargs['shell'] = __grains__['shell'] # check if git.latest should be applied cret = mod_run_check( run_check_cmd_kwargs, onlyif, unless ) if isinstance(cret, dict): ret.update(cret) return ret refspecs = [ 'refs/heads/{0}:refs/remotes/{1}/{0}'.format(refspec_branch, remote), '+refs/tags/{0}:refs/tags/{0}'.format(refspec_tag) ] if fetch_tags else [] log.info('Checking remote revision for %s', name) try: all_remote_refs = __salt__['git.remote_refs']( name, heads=False, tags=False, user=user, password=password, identity=identity, https_user=https_user, https_pass=https_pass, ignore_retcode=False, saltenv=__env__, output_encoding=output_encoding) except CommandExecutionError as exc: return _fail( ret, 'Failed to check remote refs: {0}'.format(_strip_exc(exc)) ) except NameError as exc: if 'global name' in exc.message: raise CommandExecutionError( 'Failed to check remote refs: You may need to install ' 'GitPython or PyGit2') raise if 'HEAD' in all_remote_refs: head_rev = all_remote_refs['HEAD'] for refname, refsha in six.iteritems(all_remote_refs): if refname.startswith('refs/heads/'): if refsha == head_rev: default_branch = refname.partition('refs/heads/')[-1] break else: default_branch = None else: head_rev = None default_branch = None desired_upstream = False if bare: remote_rev = None remote_rev_type = None else: if rev == 'HEAD': if head_rev is not None: remote_rev = head_rev # Just go with whatever the upstream currently is desired_upstream = None remote_rev_type = 'sha1' else: # Empty remote repo remote_rev = None remote_rev_type = None elif 'refs/heads/' + rev in all_remote_refs: remote_rev = all_remote_refs['refs/heads/' + rev] desired_upstream = '/'.join((remote, rev)) remote_rev_type = 'branch' elif 'refs/tags/' + rev + '^{}' in all_remote_refs: # Annotated tag remote_rev = all_remote_refs['refs/tags/' + rev + '^{}'] remote_rev_type = 'tag' elif 'refs/tags/' + rev in all_remote_refs: # Non-annotated tag remote_rev = all_remote_refs['refs/tags/' + rev] remote_rev_type = 'tag' else: if len(rev) <= 40 \ and all(x in string.hexdigits for x in rev): # git ls-remote did not find the rev, and because it's a # hex string <= 40 chars we're going to assume that the # desired rev is a SHA1 rev = rev.lower() remote_rev = rev remote_rev_type = 'sha1' else: remote_rev = None remote_rev_type = None # For the comment field of the state return dict, the remote location # (and short-sha1, if rev is not a sha1) is referenced several times, # determine it once here and reuse the value below. if remote_rev_type == 'sha1': if rev == 'HEAD': remote_loc = 'remote HEAD (' + remote_rev[:7] + ')' else: remote_loc = remote_rev[:7] elif remote_rev is not None: remote_loc = '{0} ({1})'.format( desired_upstream if remote_rev_type == 'branch' else rev, remote_rev[:7] ) else: # Shouldn't happen but log a warning here for future # troubleshooting purposes in the event we find a corner case. log.warning( 'Unable to determine remote_loc. rev is %s, remote_rev is ' '%s, remove_rev_type is %s, desired_upstream is %s, and bare ' 'is%s set', rev, remote_rev, remote_rev_type, desired_upstream, ' not' if not bare else '' ) remote_loc = None if depth is not None and remote_rev_type not in ('branch', 'tag'): return _fail( ret, 'When \'depth\' is used, \'rev\' must be set to the name of a ' 'branch or tag on the remote repository' ) if remote_rev is None and not bare: if rev != 'HEAD': # A specific rev is desired, but that rev doesn't exist on the # remote repo. return _fail( ret, 'No revision matching \'{0}\' exists in the remote ' 'repository'.format(rev) ) git_ver = _LooseVersion(__salt__['git.version'](versioninfo=False)) check = 'refs' if bare else '.git' gitdir = os.path.join(target, check) comments = [] if os.path.isdir(gitdir) \ or __salt__['git.is_worktree']( target, user=user, password=password, output_encoding=output_encoding): # Target directory is a git repository or git worktree try: all_local_branches = __salt__['git.list_branches']( target, user=user, password=password, output_encoding=output_encoding) all_local_tags = set( __salt__['git.list_tags']( target, user=user, password=password, output_encoding=output_encoding ) ) local_rev, local_branch = _get_local_rev_and_branch( target, user, password, output_encoding) if not bare and remote_rev is None and local_rev is not None: return _fail( ret, 'Remote repository is empty, cannot update from a ' 'non-empty to an empty repository' ) # Base rev and branch are the ones from which any reset or merge # will take place. If the branch is not being specified, the base # will be the "local" rev and branch, i.e. those we began with # before this state was run. If a branch is being specified and it # both exists and is not the one with which we started, then we'll # be checking that branch out first, and it instead becomes our # base. The base branch and rev will be used below in comparisons # to determine what changes to make. base_rev = local_rev base_branch = local_branch if _need_branch_change(branch, local_branch): if branch not in all_local_branches: # We're checking out a new branch, so the base_rev and # remote_rev will be identical. base_rev = remote_rev else: base_branch = branch # Desired branch exists locally and is not the current # branch. We'll be performing a checkout to that branch # eventually, but before we do that we need to find the # current SHA1. try: base_rev = __salt__['git.rev_parse']( target, branch + '^{commit}', user=user, password=password, ignore_retcode=True, output_encoding=output_encoding) except CommandExecutionError as exc: return _fail( ret, 'Unable to get position of local branch \'{0}\': ' '{1}'.format(branch, _strip_exc(exc)), comments ) remotes = __salt__['git.remotes'](target, user=user, password=password, redact_auth=False, output_encoding=output_encoding) revs_match = _revs_equal(local_rev, remote_rev, remote_rev_type) try: # If not a bare repo, check `git diff HEAD` to determine if # there are local changes. local_changes = bool( not bare and __salt__['git.diff'](target, 'HEAD', user=user, password=password, output_encoding=output_encoding) ) except CommandExecutionError: # No need to capture the error and log it, the _git_run() # helper in the git execution module will have already logged # the output from the command. log.warning( 'git.latest: Unable to determine if %s has local changes', target ) local_changes = False if local_changes and revs_match: if force_reset is True: msg = ( '{0} is up-to-date, but with uncommitted changes. ' 'Since \'force_reset\' is set to True, these local ' 'changes would be reset. To only reset when there are ' 'changes in the remote repository, set ' '\'force_reset\' to \'remote-changes\'.'.format(target) ) if __opts__['test']: ret['changes']['forced update'] = True if comments: msg += _format_comments(comments) return _neutral_test(ret, msg) log.debug(msg.replace('would', 'will')) else: log.debug( '%s up-to-date, but with uncommitted changes. Since ' '\'force_reset\' is set to %s, no changes will be ' 'made.', target, force_reset ) return _uptodate(ret, target, _format_comments(comments), local_changes) if remote_rev_type == 'sha1' \ and base_rev is not None \ and base_rev.startswith(remote_rev): # Either we're already checked out to the branch we need and it # is up-to-date, or the branch to which we need to switch is # on the same SHA1 as the desired remote revision. Either way, # we know we have the remote rev present already and no fetch # will be needed. has_remote_rev = True else: has_remote_rev = False if remote_rev is not None: try: __salt__['git.rev_parse']( target, remote_rev + '^{commit}', user=user, password=password, ignore_retcode=True, output_encoding=output_encoding) except CommandExecutionError: # Local checkout doesn't have the remote_rev pass else: # The object might exist enough to get a rev-parse to # work, while the local ref could have been # deleted/changed/force updated. Do some further sanity # checks to determine if we really do have the # remote_rev. if remote_rev_type == 'branch': if remote in remotes: try: # Do a rev-parse on <remote>/<rev> to get # the local SHA1 for it, so we can compare # it to the remote_rev SHA1. local_copy = __salt__['git.rev_parse']( target, desired_upstream, user=user, password=password, ignore_retcode=True, output_encoding=output_encoding) except CommandExecutionError: pass else: # If the SHA1s don't match, then the remote # branch was force-updated, and we need to # fetch to update our local copy the ref # for the remote branch. If they do match, # then we have the remote_rev and don't # need to fetch. if local_copy == remote_rev: has_remote_rev = True elif remote_rev_type == 'tag': if rev in all_local_tags: try: local_tag_sha1 = __salt__['git.rev_parse']( target, rev + '^{commit}', user=user, password=password, ignore_retcode=True, output_encoding=output_encoding) except CommandExecutionError: # Shouldn't happen if the tag exists # locally but account for this just in # case. local_tag_sha1 = None if local_tag_sha1 == remote_rev: has_remote_rev = True else: if not force_reset: # SHA1 of tag on remote repo is # different than local tag. Unless # we're doing a hard reset then we # don't need to proceed as we know that # the fetch will update the tag and the # only way to make the state succeed is # to reset the branch to point at the # tag's new location. return _fail( ret, '\'{0}\' is a tag, but the remote ' 'SHA1 for this tag ({1}) doesn\'t ' 'match the local SHA1 ({2}). Set ' '\'force_reset\' to True to force ' 'this update.'.format( rev, _short_sha(remote_rev), _short_sha(local_tag_sha1) ) ) elif remote_rev_type == 'sha1': has_remote_rev = True # If fast_forward is not boolean, then we don't yet know if this # will be a fast forward or not, because a fetch is required. fast_forward = False \ if (local_changes and force_reset != 'remote-changes') \ else None if has_remote_rev: if (not revs_match and not update_head) \ and (branch is None or branch == local_branch): ret['comment'] = ( '{0} is already present and local HEAD ({1}) does not ' 'match, but update_head=False. HEAD has not been ' 'updated locally.'.format( remote_loc.capitalize() if rev == 'HEAD' else remote_loc, local_rev[:7] ) ) return ret # No need to check if this is a fast_forward if we already know # that it won't be (due to local changes). if fast_forward is not False: if base_rev is None: # If we're here, the remote_rev exists in the local # checkout but there is still no HEAD locally. A # possible reason for this is that an empty repository # existed there and a remote was added and fetched, but # the repository was not fast-forwarded. Regardless, # going from no HEAD to a locally-present rev is # considered a fast-forward update. fast_forward = True else: fast_forward = __salt__['git.merge_base']( target, refs=[base_rev, remote_rev], is_ancestor=True, user=user, password=password, ignore_retcode=True, output_encoding=output_encoding) if fast_forward is False: if force_reset is False: return _not_fast_forward( ret, rev, base_rev, remote_rev, branch, local_branch, default_branch, local_changes, comments) merge_action = 'hard-reset' elif fast_forward is True: merge_action = 'fast-forwarded' else: merge_action = 'updated' if base_branch is None: # No local branch, no upstream tracking branch upstream = None else: try: upstream = __salt__['git.rev_parse']( target, base_branch + '@{upstream}', opts=['--abbrev-ref'], user=user, password=password, ignore_retcode=True, output_encoding=output_encoding) except CommandExecutionError: # There is a local branch but the rev-parse command # failed, so that means there is no upstream tracking # branch. This could be because it is just not set, or # because the branch was checked out to a SHA1 or tag # instead of a branch. Set upstream to False to make a # distinction between the case above where there is no # local_branch (when the local checkout is an empty # repository). upstream = False if remote in remotes: fetch_url = remotes[remote]['fetch'] else: log.debug( 'Remote \'%s\' not found in git checkout at %s', remote, target ) fetch_url = None if remote_rev is not None and desired_fetch_url != fetch_url: if __opts__['test']: actions = [ 'Remote \'{0}\' would be changed from {1} to {2}' .format( remote, salt.utils.url.redact_http_basic_auth(fetch_url), redacted_fetch_url ) ] if not has_remote_rev: actions.append('Remote would be fetched') if not revs_match: if update_head: ret['changes']['revision'] = { 'old': local_rev, 'new': remote_rev } if fast_forward is False: ret['changes']['forced update'] = True actions.append( 'Repository would be {0} to {1}'.format( merge_action, _short_sha(remote_rev) ) ) if ret['changes']: return _neutral_test(ret, _format_comments(actions)) else: if not revs_match and not update_head: # Repo content would not be modified but the remote # URL would be modified, so we can't just say that # the repo is up-to-date, we need to inform the # user of the actions taken. ret['comment'] = _format_comments(actions) return ret return _uptodate(ret, target, _format_comments(actions)) # The fetch_url for the desired remote does not match the # specified URL (or the remote does not exist), so set the # remote URL. __salt__['git.remote_set'](target, url=name, remote=remote, user=user, password=password, https_user=https_user, https_pass=https_pass, output_encoding=output_encoding) if fetch_url is None: comments.append( 'Remote \'{0}\' set to {1}'.format( remote, redacted_fetch_url ) ) ret['changes']['new'] = name + ' => ' + remote else: comments.append( 'Remote \'{0}\' changed from {1} to {2}'.format( remote, salt.utils.url.redact_http_basic_auth(fetch_url), redacted_fetch_url ) ) if remote_rev is not None: if __opts__['test']: actions = [] if not has_remote_rev: actions.append( 'Remote \'{0}\' would be fetched'.format(remote) ) if (not revs_match) \ and (update_head or (branch is not None and branch != local_branch)): ret['changes']['revision'] = { 'old': local_rev, 'new': remote_rev } if _need_branch_change(branch, local_branch): if branch not in all_local_branches: actions.append( 'New branch \'{0}\' would be checked ' 'out, with {1} as a starting ' 'point'.format(branch, remote_loc) ) if desired_upstream: actions.append( 'Tracking branch would be set to {0}' .format(desired_upstream) ) else: actions.append( 'Branch \'{0}\' would be checked out ' 'and {1} to {2}'.format( branch, merge_action, _short_sha(remote_rev) ) ) else: if not revs_match: if update_head: if fast_forward is True: actions.append( 'Repository would be fast-forwarded from ' '{0} to {1}'.format( _short_sha(local_rev), _short_sha(remote_rev) ) ) else: actions.append( 'Repository would be {0} from {1} to {2}' .format( 'hard-reset' if force_reset and has_remote_rev else 'updated', _short_sha(local_rev), _short_sha(remote_rev) ) ) else: actions.append( 'Local HEAD ({0}) does not match {1} but ' 'update_head=False, HEAD would not be ' 'updated locally'.format( local_rev[:7], remote_loc ) ) # Check if upstream needs changing if not upstream and desired_upstream: actions.append( 'Tracking branch would be set to {0}'.format( desired_upstream ) ) elif upstream and desired_upstream is False: actions.append( 'Tracking branch would be unset' ) elif desired_upstream and upstream != desired_upstream: actions.append( 'Tracking branch would be ' 'updated to {0}'.format(desired_upstream) ) if ret['changes']: return _neutral_test(ret, _format_comments(actions)) else: formatted_actions = _format_comments(actions) if not revs_match \ and not update_head \ and formatted_actions: ret['comment'] = formatted_actions return ret return _uptodate(ret, target, _format_comments(actions)) if not upstream and desired_upstream: upstream_action = ( 'Tracking branch was set to {0}'.format( desired_upstream ) ) branch_opts = _get_branch_opts( branch, local_branch, all_local_branches, desired_upstream, git_ver) elif upstream and desired_upstream is False: # If the remote_rev is a tag or SHA1, and there is an # upstream tracking branch, we will unset it. However, we # can only do this if the git version is 1.8.0 or newer, as # the --unset-upstream option was not added until that # version. if git_ver >= _LooseVersion('1.8.0'): upstream_action = 'Tracking branch was unset' branch_opts = ['--unset-upstream'] else: branch_opts = None elif desired_upstream and upstream != desired_upstream: upstream_action = ( 'Tracking branch was updated to {0}'.format( desired_upstream ) ) branch_opts = _get_branch_opts( branch, local_branch, all_local_branches, desired_upstream, git_ver) else: branch_opts = None if branch_opts is not None and local_branch is None: return _fail( ret, 'Cannot set/unset upstream tracking branch, local ' 'HEAD refers to nonexistent branch. This may have ' 'been caused by cloning a remote repository for which ' 'the default branch was renamed or deleted. If you ' 'are unable to fix the remote repository, you can ' 'work around this by setting the \'branch\' argument ' '(which will ensure that the named branch is created ' 'if it does not already exist).', comments ) remote_tags = set([ x.replace('refs/tags/', '') for x in __salt__['git.ls_remote']( cwd=target, remote=remote, opts="--tags", user=user, password=password, identity=identity, saltenv=__env__, ignore_retcode=True, output_encoding=output_encoding) if '^{}' not in x ]) if all_local_tags != remote_tags: has_remote_rev = False new_tags = remote_tags - all_local_tags deleted_tags = all_local_tags - remote_tags if new_tags: ret['changes']['new_tags'] = new_tags if sync_tags and deleted_tags: # Delete the local copy of the tags to keep up with the # remote repository. for tag_name in deleted_tags: try: if not __opts__['test']: __salt__['git.tag']( target, tag_name, opts='-d', user=user, password=password, output_encoding=output_encoding) except CommandExecutionError as exc: ret.setdefault('warnings', []).append( 'Failed to remove local tag \'{0}\':\n\n' '{1}\n\n'.format(tag_name, exc) ) else: ret['changes'].setdefault( 'deleted_tags', []).append(tag_name) if ret['changes'].get('deleted_tags'): comments.append( 'The following tags {0} removed from the local ' 'checkout: {1}'.format( 'would be' if __opts__['test'] else 'were', ', '.join(ret['changes']['deleted_tags']) ) ) if not has_remote_rev: try: fetch_changes = __salt__['git.fetch']( target, remote=remote, force=force_fetch, refspecs=refspecs, user=user, password=password, identity=identity, saltenv=__env__, output_encoding=output_encoding) except CommandExecutionError as exc: return _failed_fetch(ret, exc, comments) else: if fetch_changes: comments.append( '{0} was fetched, resulting in updated ' 'refs'.format(name) ) try: __salt__['git.rev_parse']( target, remote_rev + '^{commit}', user=user, password=password, ignore_retcode=True, output_encoding=output_encoding) except CommandExecutionError as exc: return _fail( ret, 'Fetch did not successfully retrieve rev \'{0}\' ' 'from {1}: {2}'.format(rev, name, exc) ) if (not revs_match and not update_head) \ and (branch is None or branch == local_branch): # Rev now exists locally (was fetched), and since we're # not updating HEAD we'll just exit here. ret['comment'] = remote_loc.capitalize() \ if rev == 'HEAD' \ else remote_loc ret['comment'] += ( ' is already present and local HEAD ({0}) does not ' 'match, but update_head=False. HEAD has not been ' 'updated locally.'.format(local_rev[:7]) ) return ret # Now that we've fetched, check again whether or not # the update is a fast-forward. if base_rev is None: fast_forward = True else: fast_forward = __salt__['git.merge_base']( target, refs=[base_rev, remote_rev], is_ancestor=True, user=user, password=password, output_encoding=output_encoding) if fast_forward is force_reset is False \ or (fast_forward is True and local_changes and force_reset is False): return _not_fast_forward( ret, rev, base_rev, remote_rev, branch, local_branch, default_branch, local_changes, comments) if _need_branch_change(branch, local_branch): if local_changes and not force_checkout: return _fail( ret, 'Local branch \'{0}\' has uncommitted ' 'changes. Set \'force_checkout\' to True to ' 'discard them and proceed.'.format(local_branch) ) # TODO: Maybe re-retrieve all_local_branches to handle # the corner case where the destination branch was # added to the local checkout during a fetch that takes # a long time to complete. if branch not in all_local_branches: if rev == 'HEAD': checkout_rev = remote_rev else: checkout_rev = desired_upstream \ if desired_upstream \ else rev checkout_opts = ['-b', branch] else: checkout_rev = branch checkout_opts = [] __salt__['git.checkout'](target, checkout_rev, force=force_checkout, opts=checkout_opts, user=user, password=password, output_encoding=output_encoding) if '-b' in checkout_opts: comments.append( 'New branch \'{0}\' was checked out, with {1} ' 'as a starting point'.format( branch, remote_loc ) ) else: comments.append( '\'{0}\' was checked out'.format(checkout_rev) ) if fast_forward is False: __salt__['git.reset']( target, opts=['--hard', remote_rev], user=user, password=password, output_encoding=output_encoding, **lfs_opts) ret['changes']['forced update'] = True if local_changes: comments.append('Uncommitted changes were discarded') comments.append( 'Repository was hard-reset to {0}'.format(remote_loc) ) elif fast_forward is True \ and local_changes \ and force_reset is not False: __salt__['git.discard_local_changes']( target, user=user, password=password, output_encoding=output_encoding) comments.append('Uncommitted changes were discarded') if branch_opts is not None: __salt__['git.branch']( target, opts=branch_opts, user=user, password=password, output_encoding=output_encoding) comments.append(upstream_action) # Fast-forward to the desired revision if fast_forward is True \ and not _revs_equal(base_rev, remote_rev, remote_rev_type): if desired_upstream or rev == 'HEAD': # Check first to see if we are on a branch before # trying to merge changes. (The call to # git.symbolic_ref will only return output if HEAD # points to a branch.) if __salt__['git.symbolic_ref']( target, 'HEAD', opts=['--quiet'], user=user, password=password, ignore_retcode=True, output_encoding=output_encoding): if git_ver >= _LooseVersion('1.8.1.6'): # --ff-only added in version 1.8.1.6. It's not # 100% necessary, but if we can use it, we'll # ensure that the merge doesn't go through if # not a fast-forward. Granted, the logic that # gets us to this point shouldn't allow us to # attempt this merge if it's not a # fast-forward, but it's an extra layer of # protection. merge_opts = ['--ff-only'] else: merge_opts = [] __salt__['git.merge']( target, rev=remote_rev, opts=merge_opts, user=user, password=password, output_encoding=output_encoding, **lfs_opts) comments.append( 'Repository was fast-forwarded to {0}' .format(remote_loc) ) else: return _fail( ret, 'Unable to fast-forward, HEAD is detached', comments ) else: # Update is a fast forward, but we cannot merge to that # commit so we'll reset to it. __salt__['git.reset']( target, opts=['--hard', remote_rev if rev == 'HEAD' else rev], user=user, password=password, output_encoding=output_encoding, **lfs_opts) comments.append( 'Repository was reset to {0} (fast-forward)' .format(rev) ) # TODO: Figure out how to add submodule update info to # test=True return data, and changes dict. if submodules: try: __salt__['git.submodule']( target, 'update', opts=['--init', '--recursive'], user=user, password=password, identity=identity, saltenv=__env__, output_encoding=output_encoding) except CommandExecutionError as exc: return _failed_submodule_update(ret, exc, comments) elif bare: if __opts__['test']: msg = ( 'Bare repository at {0} would be fetched' .format(target) ) if ret['changes']: return _neutral_test(ret, msg) else: return _uptodate(ret, target, msg) try: fetch_changes = __salt__['git.fetch']( target, remote=remote, force=force_fetch, refspecs=refspecs, user=user, password=password, identity=identity, saltenv=__env__, output_encoding=output_encoding) except CommandExecutionError as exc: return _failed_fetch(ret, exc, comments) else: comments.append( 'Bare repository at {0} was fetched{1}'.format( target, ', resulting in updated refs' if fetch_changes else '' ) ) try: new_rev = __salt__['git.revision']( cwd=target, user=user, password=password, ignore_retcode=True, output_encoding=output_encoding) except CommandExecutionError: new_rev = None except Exception as exc: log.error( 'Unexpected exception in git.latest state', exc_info=True ) if isinstance(exc, CommandExecutionError): msg = _strip_exc(exc) else: msg = six.text_type(exc) return _fail(ret, msg, comments) if not bare and not _revs_equal(new_rev, remote_rev, remote_rev_type): return _fail(ret, 'Failed to update repository', comments) if local_rev != new_rev: log.info( 'Repository %s updated: %s => %s', target, local_rev, new_rev ) ret['comment'] = _format_comments(comments) ret['changes']['revision'] = {'old': local_rev, 'new': new_rev} else: return _uptodate(ret, target, _format_comments(comments)) else: if os.path.isdir(target): target_contents = os.listdir(target) if force_clone: # Clone is required, and target directory exists, but the # ``force`` option is enabled, so we need to clear out its # contents to proceed. if __opts__['test']: ret['changes']['forced clone'] = True ret['changes']['new'] = name + ' => ' + target return _neutral_test( ret, 'Target directory {0} exists. Since force_clone=True, ' 'the contents of {0} would be deleted, and {1} would ' 'be cloned into this directory.'.format(target, name) ) log.debug( 'Removing contents of %s to clone repository %s in its ' 'place (force_clone=True set in git.latest state)', target, name ) removal_errors = {} for target_object in target_contents: target_path = os.path.join(target, target_object) try: salt.utils.files.rm_rf(target_path) except OSError as exc: if exc.errno != errno.ENOENT: removal_errors[target_path] = exc if removal_errors: err_strings = [ ' {0}\n {1}'.format(k, v) for k, v in six.iteritems(removal_errors) ] return _fail( ret, 'Unable to remove\n{0}'.format('\n'.join(err_strings)), comments ) ret['changes']['forced clone'] = True # Clone is required, but target dir exists and is non-empty. We # can't proceed. elif target_contents: return _fail( ret, 'Target \'{0}\' exists, is non-empty and is not a git ' 'repository. Set the \'force_clone\' option to True to ' 'remove this directory\'s contents and proceed with ' 'cloning the remote repository'.format(target) ) log.debug('Target %s is not found, \'git clone\' is required', target) if __opts__['test']: ret['changes']['new'] = name + ' => ' + target return _neutral_test( ret, 'Repository {0} would be cloned to {1}'.format( name, target ) ) try: clone_opts = ['--mirror'] if mirror else ['--bare'] if bare else [] if remote != 'origin': clone_opts.extend(['--origin', remote]) if depth is not None: clone_opts.extend(['--depth', six.text_type(depth), '--branch', rev]) # We're cloning a fresh repo, there is no local branch or revision local_branch = local_rev = None try: __salt__['git.clone'](target, name, user=user, password=password, opts=clone_opts, identity=identity, https_user=https_user, https_pass=https_pass, saltenv=__env__, output_encoding=output_encoding) except CommandExecutionError as exc: msg = 'Clone failed: {0}'.format(_strip_exc(exc)) return _fail(ret, msg, comments) ret['changes']['new'] = name + ' => ' + target comments.append( '{0} cloned to {1}{2}'.format( name, target, ' as mirror' if mirror else ' as bare repository' if bare else '' ) ) if not bare: if not remote_rev: if rev != 'HEAD': # No HEAD means the remote repo is empty, which means # our new clone will also be empty. This state has # failed, since a rev was specified but no matching rev # exists on the remote host. msg = ( '%s was cloned but is empty, so {0}/{1} ' 'cannot be checked out'.format(remote, rev) ) log.error(msg, name) # Disable check for string substitution return _fail(ret, msg % 'Repository', comments) # pylint: disable=E1321 else: if remote_rev_type == 'tag' \ and rev not in __salt__['git.list_tags']( target, user=user, password=password, output_encoding=output_encoding): return _fail( ret, 'Revision \'{0}\' does not exist in clone' .format(rev), comments ) if branch is not None: if branch not in \ __salt__['git.list_branches']( target, user=user, password=password, output_encoding=output_encoding): if rev == 'HEAD': checkout_rev = remote_rev else: checkout_rev = desired_upstream \ if desired_upstream \ else rev __salt__['git.checkout']( target, checkout_rev, opts=['-b', branch], user=user, password=password, output_encoding=output_encoding) comments.append( 'Branch \'{0}\' checked out, with {1} ' 'as a starting point'.format( branch, remote_loc ) ) local_rev, local_branch = _get_local_rev_and_branch( target, user, password, output_encoding=output_encoding) if local_branch is None \ and remote_rev is not None \ and 'HEAD' not in all_remote_refs: return _fail( ret, 'Remote HEAD refers to a ref that does not exist. ' 'This can happen when the default branch on the ' 'remote repository is renamed or deleted. If you ' 'are unable to fix the remote repository, you can ' 'work around this by setting the \'branch\' argument ' '(which will ensure that the named branch is created ' 'if it does not already exist).', comments ) if not _revs_equal(local_rev, remote_rev, remote_rev_type): __salt__['git.reset']( target, opts=['--hard', remote_rev], user=user, password=password, output_encoding=output_encoding) comments.append( 'Repository was reset to {0}'.format(remote_loc) ) try: upstream = __salt__['git.rev_parse']( target, local_branch + '@{upstream}', opts=['--abbrev-ref'], user=user, password=password, ignore_retcode=True, output_encoding=output_encoding) except CommandExecutionError: upstream = False if not upstream and desired_upstream: upstream_action = ( 'Tracking branch was set to {0}'.format( desired_upstream ) ) branch_opts = _get_branch_opts( branch, local_branch, __salt__['git.list_branches']( target, user=user, password=password, output_encoding=output_encoding), desired_upstream, git_ver) elif upstream and desired_upstream is False: # If the remote_rev is a tag or SHA1, and there is an # upstream tracking branch, we will unset it. However, # we can only do this if the git version is 1.8.0 or # newer, as the --unset-upstream option was not added # until that version. if git_ver >= _LooseVersion('1.8.0'): upstream_action = 'Tracking branch was unset' branch_opts = ['--unset-upstream'] else: branch_opts = None elif desired_upstream and upstream != desired_upstream: upstream_action = ( 'Tracking branch was updated to {0}'.format( desired_upstream ) ) branch_opts = _get_branch_opts( branch, local_branch, __salt__['git.list_branches']( target, user=user, password=password, output_encoding=output_encoding), desired_upstream, git_ver) else: branch_opts = None if branch_opts is not None: __salt__['git.branch']( target, opts=branch_opts, user=user, password=password, output_encoding=output_encoding) comments.append(upstream_action) if submodules and remote_rev: try: __salt__['git.submodule']( target, 'update', opts=['--init', '--recursive'], user=user, password=password, identity=identity, output_encoding=output_encoding) except CommandExecutionError as exc: return _failed_submodule_update(ret, exc, comments) try: new_rev = __salt__['git.revision']( cwd=target, user=user, password=password, ignore_retcode=True, output_encoding=output_encoding) except CommandExecutionError: new_rev = None except Exception as exc: log.error( 'Unexpected exception in git.latest state', exc_info=True ) if isinstance(exc, CommandExecutionError): msg = _strip_exc(exc) else: msg = six.text_type(exc) return _fail(ret, msg, comments) msg = _format_comments(comments) log.info(msg) ret['comment'] = msg if new_rev is not None: ret['changes']['revision'] = {'old': None, 'new': new_rev} return ret def present(name, force=False, bare=True, template=None, separate_git_dir=None, shared=None, user=None, password=None, output_encoding=None): ''' Ensure that a repository exists in the given directory .. warning:: If the minion has Git 2.5 or later installed, ``name`` points to a worktree_, and ``force`` is set to ``True``, then the worktree will be deleted. This has been corrected in Salt 2015.8.0. name Path to the directory .. versionchanged:: 2015.8.0 This path must now be absolute force : False If ``True``, and if ``name`` points to an existing directory which does not contain a git repository, then the contents of that directory will be recursively removed and a new repository will be initialized in its place. bare : True If ``True``, and a repository must be initialized, then the repository will be a bare repository. .. note:: This differs from the default behavior of :py:func:`git.init <salt.modules.git.init>`, make sure to set this value to ``False`` if a bare repo is not desired. template If a new repository is initialized, this argument will specify an alternate template directory. .. versionadded:: 2015.8.0 separate_git_dir If a new repository is initialized, this argument will specify an alternate ``$GIT_DIR`` .. versionadded:: 2015.8.0 shared Set sharing permissions on git repo. See `git-init(1)`_ for more details. .. versionadded:: 2015.5.0 user User under which to run git commands. By default, commands are run by the user under which the minion is running. .. versionadded:: 0.17.0 password Windows only. Required when specifying ``user``. This parameter will be ignored on non-Windows platforms. .. versionadded:: 2016.3.4 output_encoding Use this option to specify which encoding to use to decode the output from any git commands which are run. This should not be needed in most cases. .. note:: This should only be needed if the files in the repository were created with filenames using an encoding other than UTF-8 to handle Unicode characters. .. versionadded:: 2018.3.1 .. _`git-init(1)`: http://git-scm.com/docs/git-init .. _`worktree`: http://git-scm.com/docs/git-worktree ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} # If the named directory is a git repo return True if os.path.isdir(name): if bare and os.path.isfile(os.path.join(name, 'HEAD')): return ret elif not bare and \ (os.path.isdir(os.path.join(name, '.git')) or __salt__['git.is_worktree'](name, user=user, password=password, output_encoding=output_encoding)): return ret # Directory exists and is not a git repo, if force is set destroy the # directory and recreate, otherwise throw an error elif force: # Directory exists, and the ``force`` option is enabled, so we need # to clear out its contents to proceed. if __opts__['test']: ret['changes']['new'] = name ret['changes']['forced init'] = True return _neutral_test( ret, 'Target directory {0} exists. Since force=True, the ' 'contents of {0} would be deleted, and a {1}repository ' 'would be initialized in its place.' .format(name, 'bare ' if bare else '') ) log.debug( 'Removing contents of %s to initialize %srepository in its ' 'place (force=True set in git.present state)', name, 'bare ' if bare else '' ) try: if os.path.islink(name): os.unlink(name) else: salt.utils.files.rm_rf(name) except OSError as exc: return _fail( ret, 'Unable to remove {0}: {1}'.format(name, exc) ) else: ret['changes']['forced init'] = True elif os.listdir(name): return _fail( ret, 'Target \'{0}\' exists, is non-empty, and is not a git ' 'repository. Set the \'force\' option to True to remove ' 'this directory\'s contents and proceed with initializing a ' 'repository'.format(name) ) # Run test is set if __opts__['test']: ret['changes']['new'] = name return _neutral_test( ret, 'New {0}repository would be created'.format( 'bare ' if bare else '' ) ) __salt__['git.init'](cwd=name, bare=bare, template=template, separate_git_dir=separate_git_dir, shared=shared, user=user, password=password, output_encoding=output_encoding) actions = [ 'Initialized {0}repository in {1}'.format( 'bare ' if bare else '', name ) ] if template: actions.append('Template directory set to {0}'.format(template)) if separate_git_dir: actions.append('Gitdir set to {0}'.format(separate_git_dir)) message = '. '.join(actions) if len(actions) > 1: message += '.' log.info(message) ret['changes']['new'] = name ret['comment'] = message return ret def detached(name, rev, target=None, remote='origin', user=None, password=None, force_clone=False, force_checkout=False, fetch_remote=True, hard_reset=False, submodules=False, identity=None, https_user=None, https_pass=None, onlyif=None, unless=None, output_encoding=None, **kwargs): ''' .. versionadded:: 2016.3.0 Make sure a repository is cloned to the given target directory and is a detached HEAD checkout of the commit ID resolved from ``rev``. name Address of the remote repository. rev The branch, tag, or commit ID to checkout after clone. If a branch or tag is specified it will be resolved to a commit ID and checked out. target Name of the target directory where repository is about to be cloned. remote : origin Git remote to use. If this state needs to clone the repo, it will clone it using this value as the initial remote name. If the repository already exists, and a remote by this name is not present, one will be added. user User under which to run git commands. By default, commands are run by the user under which the minion is running. password Windows only. Required when specifying ``user``. This parameter will be ignored on non-Windows platforms. .. versionadded:: 2016.3.4 force_clone : False If the ``target`` directory exists and is not a git repository, then this state will fail. Set this argument to ``True`` to remove the contents of the target directory and clone the repo into it. force_checkout : False When checking out the revision ID, the state will fail if there are unwritten changes. Set this argument to ``True`` to discard unwritten changes when checking out. fetch_remote : True If ``False`` a fetch will not be performed and only local refs will be reachable. hard_reset : False If ``True`` a hard reset will be performed before the checkout and any uncommitted modifications to the working directory will be discarded. Untracked files will remain in place. .. note:: Changes resulting from a hard reset will not trigger requisites. submodules : False Update submodules identity A path on the minion (or a SaltStack fileserver URL, e.g. ``salt://path/to/identity_file``) to a private key to use for SSH authentication. https_user HTTP Basic Auth username for HTTPS (only) clones https_pass HTTP Basic Auth password for HTTPS (only) clones onlyif A command to run as a check, run the named command only if the command passed to the ``onlyif`` option returns true unless A command to run as a check, only run the named command if the command passed to the ``unless`` option returns false output_encoding Use this option to specify which encoding to use to decode the output from any git commands which are run. This should not be needed in most cases. .. note:: This should only be needed if the files in the repository were created with filenames using an encoding other than UTF-8 to handle Unicode characters. .. versionadded:: 2018.3.1 ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} kwargs = salt.utils.args.clean_kwargs(**kwargs) if kwargs: return _fail( ret, salt.utils.args.invalid_kwargs(kwargs, raise_exc=False) ) if not rev: return _fail( ret, '\'{0}\' is not a valid value for the \'rev\' argument'.format(rev) ) if not target: return _fail( ret, '\'{0}\' is not a valid value for the \'target\' argument'.format(rev) ) # Ensure that certain arguments are strings to ensure that comparisons work if not isinstance(rev, six.string_types): rev = six.text_type(rev) if target is not None: if not isinstance(target, six.string_types): target = six.text_type(target) if not os.path.isabs(target): return _fail( ret, 'Target \'{0}\' is not an absolute path'.format(target) ) if user is not None and not isinstance(user, six.string_types): user = six.text_type(user) if remote is not None and not isinstance(remote, six.string_types): remote = six.text_type(remote) if identity is not None: if isinstance(identity, six.string_types): identity = [identity] elif not isinstance(identity, list): return _fail(ret, 'Identity must be either a list or a string') identity = [os.path.expanduser(x) for x in identity] for ident_path in identity: if 'salt://' in ident_path: try: ident_path = __salt__['cp.cache_file'](ident_path) except IOError as exc: log.error('Failed to cache %s: %s', ident_path, exc) return _fail( ret, 'Identity \'{0}\' does not exist.'.format( ident_path ) ) if not os.path.isabs(ident_path): return _fail( ret, 'Identity \'{0}\' is not an absolute path'.format( ident_path ) ) if https_user is not None and not isinstance(https_user, six.string_types): https_user = six.text_type(https_user) if https_pass is not None and not isinstance(https_pass, six.string_types): https_pass = six.text_type(https_pass) if os.path.isfile(target): return _fail( ret, 'Target \'{0}\' exists and is a regular file, cannot proceed' .format(target) ) try: desired_fetch_url = salt.utils.url.add_http_basic_auth( name, https_user, https_pass, https_only=True ) except ValueError as exc: return _fail(ret, exc.__str__()) redacted_fetch_url = salt.utils.url.redact_http_basic_auth(desired_fetch_url) # Check if onlyif or unless conditions match run_check_cmd_kwargs = {'runas': user} if 'shell' in __grains__: run_check_cmd_kwargs['shell'] = __grains__['shell'] cret = mod_run_check( run_check_cmd_kwargs, onlyif, unless ) if isinstance(cret, dict): ret.update(cret) return ret # Determine if supplied ref is a hash remote_rev_type = 'ref' if len(rev) <= 40 \ and all(x in string.hexdigits for x in rev): rev = rev.lower() remote_rev_type = 'hash' comments = [] hash_exists_locally = False local_commit_id = None gitdir = os.path.join(target, '.git') if os.path.isdir(gitdir) \ or __salt__['git.is_worktree'](target, user=user, password=password, output_encoding=output_encoding): # Target directory is a git repository or git worktree local_commit_id = _get_local_rev_and_branch( target, user, password, output_encoding=output_encoding)[0] if remote_rev_type is 'hash': try: __salt__['git.describe'](target, rev, user=user, password=password, ignore_retcode=True, output_encoding=output_encoding) except CommandExecutionError: hash_exists_locally = False else: # The rev is a hash and it exists locally so skip to checkout hash_exists_locally = True else: # Check that remote is present and set to correct url remotes = __salt__['git.remotes'](target, user=user, password=password, redact_auth=False, output_encoding=output_encoding) if remote in remotes and name in remotes[remote]['fetch']: pass else: # The fetch_url for the desired remote does not match the # specified URL (or the remote does not exist), so set the # remote URL. current_fetch_url = None if remote in remotes: current_fetch_url = remotes[remote]['fetch'] if __opts__['test']: return _neutral_test( ret, 'Remote {0} would be set to {1}'.format( remote, name ) ) __salt__['git.remote_set'](target, url=name, remote=remote, user=user, password=password, https_user=https_user, https_pass=https_pass, output_encoding=output_encoding) comments.append( 'Remote {0} updated from \'{1}\' to \'{2}\''.format( remote, current_fetch_url, name ) ) else: # Clone repository if os.path.isdir(target): target_contents = os.listdir(target) if force_clone: # Clone is required, and target directory exists, but the # ``force`` option is enabled, so we need to clear out its # contents to proceed. if __opts__['test']: return _neutral_test( ret, 'Target directory {0} exists. Since force_clone=True, ' 'the contents of {0} would be deleted, and {1} would ' 'be cloned into this directory.'.format(target, name) ) log.debug( 'Removing contents of %s to clone repository %s in its ' 'place (force_clone=True set in git.detached state)', target, name ) removal_errors = {} for target_object in target_contents: target_path = os.path.join(target, target_object) try: salt.utils.files.rm_rf(target_path) except OSError as exc: if exc.errno != errno.ENOENT: removal_errors[target_path] = exc if removal_errors: err_strings = [ ' {0}\n {1}'.format(k, v) for k, v in six.iteritems(removal_errors) ] return _fail( ret, 'Unable to remove\n{0}'.format('\n'.join(err_strings)), comments ) ret['changes']['forced clone'] = True elif target_contents: # Clone is required, but target dir exists and is non-empty. We # can't proceed. return _fail( ret, 'Target \'{0}\' exists, is non-empty and is not a git ' 'repository. Set the \'force_clone\' option to True to ' 'remove this directory\'s contents and proceed with ' 'cloning the remote repository'.format(target) ) log.debug('Target %s is not found, \'git clone\' is required', target) if __opts__['test']: return _neutral_test( ret, 'Repository {0} would be cloned to {1}'.format( name, target ) ) try: clone_opts = ['--no-checkout'] if remote != 'origin': clone_opts.extend(['--origin', remote]) __salt__['git.clone'](target, name, user=user, password=password, opts=clone_opts, identity=identity, https_user=https_user, https_pass=https_pass, saltenv=__env__, output_encoding=output_encoding) comments.append('{0} cloned to {1}'.format(name, target)) except Exception as exc: log.error( 'Unexpected exception in git.detached state', exc_info=True ) if isinstance(exc, CommandExecutionError): msg = _strip_exc(exc) else: msg = six.text_type(exc) return _fail(ret, msg, comments) # Repository exists and is ready for fetch/checkout refspecs = [ 'refs/heads/*:refs/remotes/{0}/*'.format(remote), '+refs/tags/*:refs/tags/*' ] if hash_exists_locally or fetch_remote is False: pass else: # Fetch refs from remote if __opts__['test']: return _neutral_test( ret, 'Repository remote {0} would be fetched'.format(remote) ) try: fetch_changes = __salt__['git.fetch']( target, remote=remote, force=True, refspecs=refspecs, user=user, password=password, identity=identity, saltenv=__env__, output_encoding=output_encoding) except CommandExecutionError as exc: msg = 'Fetch failed' msg += ':\n\n' + six.text_type(exc) return _fail(ret, msg, comments) else: if fetch_changes: comments.append( 'Remote {0} was fetched, resulting in updated ' 'refs'.format(remote) ) # get refs and checkout checkout_commit_id = '' if remote_rev_type is 'hash': if __salt__['git.describe']( target, rev, user=user, password=password, output_encoding=output_encoding): checkout_commit_id = rev else: return _fail( ret, 'Revision \'{0}\' does not exist'.format(rev) ) else: try: all_remote_refs = __salt__['git.remote_refs']( target, user=user, password=password, identity=identity, https_user=https_user, https_pass=https_pass, ignore_retcode=False, output_encoding=output_encoding) if 'refs/remotes/'+remote+'/'+rev in all_remote_refs: checkout_commit_id = all_remote_refs['refs/remotes/' + remote + '/' + rev] elif 'refs/tags/' + rev in all_remote_refs: checkout_commit_id = all_remote_refs['refs/tags/' + rev] else: return _fail( ret, 'Revision \'{0}\' does not exist'.format(rev) ) except CommandExecutionError as exc: return _fail( ret, 'Failed to list refs for {0}: {1}'.format(remote, _strip_exc(exc)) ) if hard_reset: if __opts__['test']: return _neutral_test( ret, 'Hard reset to HEAD would be performed on {0}'.format(target) ) __salt__['git.reset']( target, opts=['--hard', 'HEAD'], user=user, password=password, output_encoding=output_encoding) comments.append( 'Repository was reset to HEAD before checking out revision' ) # TODO: implement clean function for git module and add clean flag if checkout_commit_id == local_commit_id: new_rev = None else: if __opts__['test']: ret['changes']['HEAD'] = {'old': local_commit_id, 'new': checkout_commit_id} return _neutral_test( ret, 'Commit ID {0} would be checked out at {1}'.format( checkout_commit_id, target ) ) __salt__['git.checkout'](target, checkout_commit_id, force=force_checkout, user=user, password=password, output_encoding=output_encoding) comments.append( 'Commit ID {0} was checked out at {1}'.format( checkout_commit_id, target ) ) try: new_rev = __salt__['git.revision']( cwd=target, user=user, password=password, ignore_retcode=True, output_encoding=output_encoding) except CommandExecutionError: new_rev = None if submodules: __salt__['git.submodule'](target, 'update', opts=['--init', '--recursive'], user=user, password=password, identity=identity, output_encoding=output_encoding) comments.append( 'Submodules were updated' ) if new_rev is not None: ret['changes']['HEAD'] = {'old': local_commit_id, 'new': new_rev} else: comments.append("Already checked out at correct revision") msg = _format_comments(comments) log.info(msg) ret['comment'] = msg return ret def cloned(name, target=None, branch=None, user=None, password=None, identity=None, https_user=None, https_pass=None, output_encoding=None): ''' .. versionadded:: 2018.3.3,2019.2.0 Ensure that a repository has been cloned to the specified target directory. If not, clone that repository. No fetches will be performed once cloned. name Address of the remote repository target Name of the target directory where repository should be cloned branch Remote branch to check out. If unspecified, the default branch (i.e. the one to the remote HEAD points) will be checked out. .. note:: The local branch name will match the remote branch name. If the branch name is changed, then that branch will be checked out locally, but keep in mind that remote repository will not be fetched. If your use case requires that you keep the clone up to date with the remote repository, then consider using :py:func:`git.latest <salt.states.git.latest>`. user User under which to run git commands. By default, commands are run by the user under which the minion is running. password Windows only. Required when specifying ``user``. This parameter will be ignored on non-Windows platforms. identity Path to a private key to use for ssh URLs. Works the same way as in :py:func:`git.latest <salt.states.git.latest>`, see that state's documentation for more information. https_user HTTP Basic Auth username for HTTPS (only) clones https_pass HTTP Basic Auth password for HTTPS (only) clones output_encoding Use this option to specify which encoding to use to decode the output from any git commands which are run. This should not be needed in most cases. .. note:: This should only be needed if the files in the repository were created with filenames using an encoding other than UTF-8 to handle Unicode characters. ''' ret = {'name': name, 'result': False, 'comment': '', 'changes': {}} if target is None: ret['comment'] = '\'target\' argument is required' return ret elif not isinstance(target, six.string_types): target = six.text_type(target) if not os.path.isabs(target): ret['comment'] = '\'target\' path must be absolute' return ret if branch is not None: if not isinstance(branch, six.string_types): branch = six.text_type(branch) if not branch: ret['comment'] = 'Invalid \'branch\' argument' return ret if not os.path.exists(target): need_clone = True else: try: __salt__['git.status'](target, user=user, password=password, output_encoding=output_encoding) except Exception as exc: ret['comment'] = six.text_type(exc) return ret else: need_clone = False comments = [] def _clone_changes(ret): ret['changes']['new'] = name + ' => ' + target def _branch_changes(ret, old, new): ret['changes']['branch'] = {'old': old, 'new': new} if need_clone: if __opts__['test']: _clone_changes(ret) comment = '{0} would be cloned to {1}{2}'.format( name, target, ' with branch \'{0}\''.format(branch) if branch is not None else '' ) return _neutral_test(ret, comment) clone_opts = ['--branch', branch] if branch is not None else None try: __salt__['git.clone'](target, name, opts=clone_opts, user=user, password=password, identity=identity, https_user=https_user, https_pass=https_pass, output_encoding=output_encoding) except CommandExecutionError as exc: msg = 'Clone failed: {0}'.format(_strip_exc(exc)) return _fail(ret, msg, comments) comments.append( '{0} cloned to {1}{2}'.format( name, target, ' with branch \'{0}\''.format(branch) if branch is not None else '' ) ) _clone_changes(ret) ret['comment'] = _format_comments(comments) ret['result'] = True return ret else: if branch is None: return _already_cloned(ret, target, branch, comments) else: current_branch = __salt__['git.current_branch']( target, user=user, password=password, output_encoding=output_encoding) if current_branch == branch: return _already_cloned(ret, target, branch, comments) else: if __opts__['test']: _branch_changes(ret, current_branch, branch) return _neutral_test( ret, 'Branch would be changed to \'{0}\''.format(branch)) try: __salt__['git.rev_parse']( target, rev=branch, user=user, password=password, ignore_retcode=True, output_encoding=output_encoding) except CommandExecutionError: # Local head does not exist, so we need to check out a new # branch at the remote rev checkout_rev = '/'.join(('origin', branch)) checkout_opts = ['-b', branch] else: # Local head exists, so we just need to check it out checkout_rev = branch checkout_opts = None try: __salt__['git.checkout']( target, rev=checkout_rev, opts=checkout_opts, user=user, password=password, output_encoding=output_encoding) except CommandExecutionError as exc: msg = 'Failed to change branch to \'{0}\': {1}'.format(branch, exc) return _fail(ret, msg, comments) else: comments.append('Branch changed to \'{0}\''.format(branch)) _branch_changes(ret, current_branch, branch) ret['comment'] = _format_comments(comments) ret['result'] = True return ret def config_set(name, value=None, multivar=None, repo=None, user=None, password=None, output_encoding=None, **kwargs): ''' .. versionadded:: 2014.7.0 .. versionchanged:: 2015.8.0 Renamed from ``git.config`` to ``git.config_set``. For earlier versions, use ``git.config``. Ensure that a config value is set to the desired value(s) name Name of the git config value to set value Set a single value for the config item multivar Set multiple values for the config item .. note:: The order matters here, if the same parameters are set but in a different order, they will be removed and replaced in the order specified. .. versionadded:: 2015.8.0 repo Location of the git repository for which the config value should be set. Required unless ``global`` is set to ``True``. user User under which to run git commands. By default, the commands are run by the user under which the minion is running. password Windows only. Required when specifying ``user``. This parameter will be ignored on non-Windows platforms. .. versionadded:: 2016.3.4 global : False If ``True``, this will set a global git config option output_encoding Use this option to specify which encoding to use to decode the output from any git commands which are run. This should not be needed in most cases. .. note:: This should only be needed if the files in the repository were created with filenames using an encoding other than UTF-8 to handle Unicode characters. .. versionadded:: 2018.3.1 **Local Config Example:** .. code-block:: yaml # Single value mylocalrepo: git.config_set: - name: user.email - value: foo@bar.net - repo: /path/to/repo # Multiple values mylocalrepo: git.config_set: - name: mysection.myattribute - multivar: - foo - bar - baz - repo: /path/to/repo **Global Config Example (User ``foo``):** .. code-block:: yaml mylocalrepo: git.config_set: - name: user.name - value: Foo Bar - user: foo - global: True ''' ret = {'name': name, 'changes': {}, 'result': True, 'comment': ''} if value is not None and multivar is not None: return _fail( ret, 'Only one of \'value\' and \'multivar\' is permitted' ) # Sanitize kwargs and make sure that no invalid ones were passed. This # allows us to accept 'global' as an argument to this function without # shadowing global(), while also not allowing unwanted arguments to be # passed. kwargs = salt.utils.args.clean_kwargs(**kwargs) global_ = kwargs.pop('global', False) if kwargs: return _fail( ret, salt.utils.args.invalid_kwargs(kwargs, raise_exc=False) ) if not global_ and not repo: return _fail( ret, 'Non-global config options require the \'repo\' argument to be ' 'set' ) if not isinstance(name, six.string_types): name = six.text_type(name) if value is not None: if not isinstance(value, six.string_types): value = six.text_type(value) value_comment = '\'' + value + '\'' desired = [value] if multivar is not None: if not isinstance(multivar, list): try: multivar = multivar.split(',') except AttributeError: multivar = six.text_type(multivar).split(',') else: new_multivar = [] for item in multivar: if isinstance(item, six.string_types): new_multivar.append(item) else: new_multivar.append(six.text_type(item)) multivar = new_multivar value_comment = multivar desired = multivar # Get current value pre = __salt__['git.config_get']( cwd=repo, key=name, user=user, password=password, ignore_retcode=True, output_encoding=output_encoding, **{'all': True, 'global': global_} ) if desired == pre: ret['comment'] = '{0}\'{1}\' is already set to {2}'.format( 'Global key ' if global_ else '', name, value_comment ) return ret if __opts__['test']: ret['changes'] = {'old': pre, 'new': desired} msg = '{0}\'{1}\' would be {2} {3}'.format( 'Global key ' if global_ else '', name, 'added as' if pre is None else 'set to', value_comment ) return _neutral_test(ret, msg) try: # Set/update config value post = __salt__['git.config_set']( cwd=repo, key=name, value=value, multivar=multivar, user=user, password=password, output_encoding=output_encoding, **{'global': global_} ) except CommandExecutionError as exc: return _fail( ret, 'Failed to set {0}\'{1}\' to {2}: {3}'.format( 'global key ' if global_ else '', name, value_comment, _strip_exc(exc) ) ) if pre != post: ret['changes'][name] = {'old': pre, 'new': post} if post != desired: return _fail( ret, 'Failed to set {0}\'{1}\' to {2}'.format( 'global key ' if global_ else '', name, value_comment ) ) ret['comment'] = '{0}\'{1}\' was {2} {3}'.format( 'Global key ' if global_ else '', name, 'added as' if pre is None else 'set to', value_comment ) return ret def mod_run_check(cmd_kwargs, onlyif, unless): ''' Execute the onlyif and unless logic. Return a result dict if: * onlyif failed (onlyif != 0) * unless succeeded (unless == 0) Otherwise, returns ``True`` ''' cmd_kwargs = copy.deepcopy(cmd_kwargs) cmd_kwargs.update({ 'use_vt': False, 'bg': False, 'ignore_retcode': True, 'python_shell': True, }) if onlyif is not None: if not isinstance(onlyif, list): onlyif = [onlyif] for command in onlyif: if not isinstance(command, six.string_types) and command: # Boolean or some other non-string which resolves to True continue try: if __salt__['cmd.retcode'](command, **cmd_kwargs) == 0: # Command exited with a zero retcode continue except Exception as exc: log.exception( 'The following onlyif command raised an error: %s', command ) return { 'comment': 'onlyif raised error ({0}), see log for ' 'more details'.format(exc), 'result': False } return {'comment': 'onlyif condition is false', 'skip_watch': True, 'result': True} if unless is not None: if not isinstance(unless, list): unless = [unless] for command in unless: if not isinstance(command, six.string_types) and not command: # Boolean or some other non-string which resolves to False break try: if __salt__['cmd.retcode'](command, **cmd_kwargs) != 0: # Command exited with a non-zero retcode break except Exception as exc: log.exception( 'The following unless command raised an error: %s', command ) return { 'comment': 'unless raised error ({0}), see log for ' 'more details'.format(exc), 'result': False } else: return {'comment': 'unless condition is true', 'skip_watch': True, 'result': True} return True
saltstack/salt
salt/states/git.py
config_set
python
def config_set(name, value=None, multivar=None, repo=None, user=None, password=None, output_encoding=None, **kwargs): ''' .. versionadded:: 2014.7.0 .. versionchanged:: 2015.8.0 Renamed from ``git.config`` to ``git.config_set``. For earlier versions, use ``git.config``. Ensure that a config value is set to the desired value(s) name Name of the git config value to set value Set a single value for the config item multivar Set multiple values for the config item .. note:: The order matters here, if the same parameters are set but in a different order, they will be removed and replaced in the order specified. .. versionadded:: 2015.8.0 repo Location of the git repository for which the config value should be set. Required unless ``global`` is set to ``True``. user User under which to run git commands. By default, the commands are run by the user under which the minion is running. password Windows only. Required when specifying ``user``. This parameter will be ignored on non-Windows platforms. .. versionadded:: 2016.3.4 global : False If ``True``, this will set a global git config option output_encoding Use this option to specify which encoding to use to decode the output from any git commands which are run. This should not be needed in most cases. .. note:: This should only be needed if the files in the repository were created with filenames using an encoding other than UTF-8 to handle Unicode characters. .. versionadded:: 2018.3.1 **Local Config Example:** .. code-block:: yaml # Single value mylocalrepo: git.config_set: - name: user.email - value: foo@bar.net - repo: /path/to/repo # Multiple values mylocalrepo: git.config_set: - name: mysection.myattribute - multivar: - foo - bar - baz - repo: /path/to/repo **Global Config Example (User ``foo``):** .. code-block:: yaml mylocalrepo: git.config_set: - name: user.name - value: Foo Bar - user: foo - global: True ''' ret = {'name': name, 'changes': {}, 'result': True, 'comment': ''} if value is not None and multivar is not None: return _fail( ret, 'Only one of \'value\' and \'multivar\' is permitted' ) # Sanitize kwargs and make sure that no invalid ones were passed. This # allows us to accept 'global' as an argument to this function without # shadowing global(), while also not allowing unwanted arguments to be # passed. kwargs = salt.utils.args.clean_kwargs(**kwargs) global_ = kwargs.pop('global', False) if kwargs: return _fail( ret, salt.utils.args.invalid_kwargs(kwargs, raise_exc=False) ) if not global_ and not repo: return _fail( ret, 'Non-global config options require the \'repo\' argument to be ' 'set' ) if not isinstance(name, six.string_types): name = six.text_type(name) if value is not None: if not isinstance(value, six.string_types): value = six.text_type(value) value_comment = '\'' + value + '\'' desired = [value] if multivar is not None: if not isinstance(multivar, list): try: multivar = multivar.split(',') except AttributeError: multivar = six.text_type(multivar).split(',') else: new_multivar = [] for item in multivar: if isinstance(item, six.string_types): new_multivar.append(item) else: new_multivar.append(six.text_type(item)) multivar = new_multivar value_comment = multivar desired = multivar # Get current value pre = __salt__['git.config_get']( cwd=repo, key=name, user=user, password=password, ignore_retcode=True, output_encoding=output_encoding, **{'all': True, 'global': global_} ) if desired == pre: ret['comment'] = '{0}\'{1}\' is already set to {2}'.format( 'Global key ' if global_ else '', name, value_comment ) return ret if __opts__['test']: ret['changes'] = {'old': pre, 'new': desired} msg = '{0}\'{1}\' would be {2} {3}'.format( 'Global key ' if global_ else '', name, 'added as' if pre is None else 'set to', value_comment ) return _neutral_test(ret, msg) try: # Set/update config value post = __salt__['git.config_set']( cwd=repo, key=name, value=value, multivar=multivar, user=user, password=password, output_encoding=output_encoding, **{'global': global_} ) except CommandExecutionError as exc: return _fail( ret, 'Failed to set {0}\'{1}\' to {2}: {3}'.format( 'global key ' if global_ else '', name, value_comment, _strip_exc(exc) ) ) if pre != post: ret['changes'][name] = {'old': pre, 'new': post} if post != desired: return _fail( ret, 'Failed to set {0}\'{1}\' to {2}'.format( 'global key ' if global_ else '', name, value_comment ) ) ret['comment'] = '{0}\'{1}\' was {2} {3}'.format( 'Global key ' if global_ else '', name, 'added as' if pre is None else 'set to', value_comment ) return ret
.. versionadded:: 2014.7.0 .. versionchanged:: 2015.8.0 Renamed from ``git.config`` to ``git.config_set``. For earlier versions, use ``git.config``. Ensure that a config value is set to the desired value(s) name Name of the git config value to set value Set a single value for the config item multivar Set multiple values for the config item .. note:: The order matters here, if the same parameters are set but in a different order, they will be removed and replaced in the order specified. .. versionadded:: 2015.8.0 repo Location of the git repository for which the config value should be set. Required unless ``global`` is set to ``True``. user User under which to run git commands. By default, the commands are run by the user under which the minion is running. password Windows only. Required when specifying ``user``. This parameter will be ignored on non-Windows platforms. .. versionadded:: 2016.3.4 global : False If ``True``, this will set a global git config option output_encoding Use this option to specify which encoding to use to decode the output from any git commands which are run. This should not be needed in most cases. .. note:: This should only be needed if the files in the repository were created with filenames using an encoding other than UTF-8 to handle Unicode characters. .. versionadded:: 2018.3.1 **Local Config Example:** .. code-block:: yaml # Single value mylocalrepo: git.config_set: - name: user.email - value: foo@bar.net - repo: /path/to/repo # Multiple values mylocalrepo: git.config_set: - name: mysection.myattribute - multivar: - foo - bar - baz - repo: /path/to/repo **Global Config Example (User ``foo``):** .. code-block:: yaml mylocalrepo: git.config_set: - name: user.name - value: Foo Bar - user: foo - global: True
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/git.py#L3220-L3438
[ "def clean_kwargs(**kwargs):\n '''\n Return a dict without any of the __pub* keys (or any other keys starting\n with a dunder) from the kwargs dict passed into the execution module\n functions. These keys are useful for tracking what was used to invoke\n the function call, but they may not be desirable to have if passing the\n kwargs forward wholesale.\n\n Usage example:\n\n .. code-block:: python\n\n kwargs = __utils__['args.clean_kwargs'](**kwargs)\n '''\n ret = {}\n for key, val in six.iteritems(kwargs):\n if not key.startswith('__'):\n ret[key] = val\n return ret\n", "def invalid_kwargs(invalid_kwargs, raise_exc=True):\n '''\n Raise a SaltInvocationError if invalid_kwargs is non-empty\n '''\n if invalid_kwargs:\n if isinstance(invalid_kwargs, dict):\n new_invalid = [\n '{0}={1}'.format(x, y)\n for x, y in six.iteritems(invalid_kwargs)\n ]\n invalid_kwargs = new_invalid\n msg = (\n 'The following keyword arguments are not valid: {0}'\n .format(', '.join(invalid_kwargs))\n )\n if raise_exc:\n raise SaltInvocationError(msg)\n else:\n return msg\n", "def _strip_exc(exc):\n '''\n Strip the actual command that was run from exc.strerror to leave just the\n error message\n '''\n return re.sub(r'^Command [\\'\"].+[\\'\"] failed: ', '', exc.strerror)\n", "def _neutral_test(ret, comment):\n ret['result'] = None\n ret['comment'] = comment\n return ret\n", "def _fail(ret, msg, comments=None):\n ret['result'] = False\n if comments:\n msg += '\\n\\nChanges already made: ' + _format_comments(comments)\n ret['comment'] = msg\n return ret\n" ]
# -*- coding: utf-8 -*- ''' States to manage git repositories and git configuration .. important:: Before using git over ssh, make sure your remote host fingerprint exists in your ``~/.ssh/known_hosts`` file. .. versionchanged:: 2015.8.8 This state module now requires git 1.6.5 (released 10 October 2009) or newer. ''' from __future__ import absolute_import, print_function, unicode_literals # Import python libs import copy import errno import logging import os import re import string # Import salt libs import salt.utils.args import salt.utils.files import salt.utils.url import salt.utils.versions from salt.exceptions import CommandExecutionError from salt.utils.versions import LooseVersion as _LooseVersion # Import 3rd-party libs from salt.ext import six log = logging.getLogger(__name__) def __virtual__(): ''' Only load if git is available ''' if 'git.version' not in __salt__: return False git_ver = _LooseVersion(__salt__['git.version'](versioninfo=False)) return git_ver >= _LooseVersion('1.6.5') def _revs_equal(rev1, rev2, rev_type): ''' Shorthand helper function for comparing SHA1s. If rev_type == 'sha1' then the comparison will be done using str.startwith() to allow short SHA1s to compare successfully. NOTE: This means that rev2 must be the short rev. ''' if (rev1 is None and rev2 is not None) \ or (rev2 is None and rev1 is not None): return False elif rev1 is rev2 is None: return True elif rev_type == 'sha1': return rev1.startswith(rev2) else: return rev1 == rev2 def _short_sha(sha1): return sha1[:7] if sha1 is not None else None def _format_comments(comments): ''' Return a joined list ''' ret = '. '.join(comments) if len(comments) > 1: ret += '.' return ret def _need_branch_change(branch, local_branch): ''' Short hand for telling when a new branch is needed ''' return branch is not None and branch != local_branch def _get_branch_opts(branch, local_branch, all_local_branches, desired_upstream, git_ver=None): ''' DRY helper to build list of opts for git.branch, for the purposes of setting upstream tracking branch ''' if branch is not None and branch not in all_local_branches: # We won't be setting upstream because the act of checking out a new # branch will set upstream for us return None if git_ver is None: git_ver = _LooseVersion(__salt__['git.version'](versioninfo=False)) ret = [] if git_ver >= _LooseVersion('1.8.0'): ret.extend(['--set-upstream-to', desired_upstream]) else: ret.append('--set-upstream') # --set-upstream does not assume the current branch, so we have to # tell it which branch we'll be using ret.append(local_branch if branch is None else branch) ret.append(desired_upstream) return ret def _get_local_rev_and_branch(target, user, password, output_encoding=None): ''' Return the local revision for before/after comparisons ''' log.info('Checking local revision for %s', target) try: local_rev = __salt__['git.revision']( target, user=user, password=password, ignore_retcode=True, output_encoding=output_encoding) except CommandExecutionError: log.info('No local revision for %s', target) local_rev = None log.info('Checking local branch for %s', target) try: local_branch = __salt__['git.current_branch']( target, user=user, password=password, ignore_retcode=True, output_encoding=output_encoding) except CommandExecutionError: log.info('No local branch for %s', target) local_branch = None return local_rev, local_branch def _strip_exc(exc): ''' Strip the actual command that was run from exc.strerror to leave just the error message ''' return re.sub(r'^Command [\'"].+[\'"] failed: ', '', exc.strerror) def _uptodate(ret, target, comments=None, local_changes=False): ret['comment'] = 'Repository {0} is up-to-date'.format(target) if local_changes: ret['comment'] += ( ', but with uncommitted changes. Set \'force_reset\' to True to ' 'purge uncommitted changes.' ) if comments: # Shouldn't be making any changes if the repo was up to date, but # report on them so we are alerted to potential problems with our # logic. ret['comment'] += ( '\n\nChanges {0}made: {1}'.format( 'that would be ' if __opts__['test'] else '', _format_comments(comments) ) ) return ret def _neutral_test(ret, comment): ret['result'] = None ret['comment'] = comment return ret def _fail(ret, msg, comments=None): ret['result'] = False if comments: msg += '\n\nChanges already made: ' + _format_comments(comments) ret['comment'] = msg return ret def _already_cloned(ret, target, branch=None, comments=None): ret['result'] = True ret['comment'] = 'Repository already exists at {0}{1}'.format( target, ' and is checked out to branch \'{0}\''.format(branch) if branch else '' ) if comments: ret['comment'] += ( '\n\nChanges {0}made: {1}'.format( 'that would be ' if __opts__['test'] else '', _format_comments(comments) ) ) return ret def _failed_fetch(ret, exc, comments=None): msg = ( 'Fetch failed. Set \'force_fetch\' to True to force the fetch if the ' 'failure was due to not being able to fast-forward. Output of the fetch ' 'command follows:\n\n{0}'.format(_strip_exc(exc)) ) return _fail(ret, msg, comments) def _failed_submodule_update(ret, exc, comments=None): msg = 'Failed to update submodules: ' + _strip_exc(exc) return _fail(ret, msg, comments) def _not_fast_forward(ret, rev, pre, post, branch, local_branch, default_branch, local_changes, comments): branch_msg = '' if branch is None: if rev != 'HEAD': if local_branch != rev: branch_msg = ( ' The desired rev ({0}) differs from the name of the ' 'local branch ({1}), if the desired rev is a branch name ' 'then a forced update could possibly be avoided by ' 'setting the \'branch\' argument to \'{0}\' instead.' .format(rev, local_branch) ) else: if default_branch is not None and local_branch != default_branch: branch_msg = ( ' The default remote branch ({0}) differs from the ' 'local branch ({1}). This could be caused by changing the ' 'default remote branch, or if the local branch was ' 'manually changed. Rather than forcing an update, it ' 'may be advisable to set the \'branch\' argument to ' '\'{0}\' instead. To ensure that this state follows the ' '\'{0}\' branch instead of the remote HEAD, set the ' '\'rev\' argument to \'{0}\'.' .format(default_branch, local_branch) ) pre = _short_sha(pre) post = _short_sha(post) return _fail( ret, 'Repository would be updated {0}{1}, but {2}. Set \'force_reset\' to ' 'True{3} to force this update{4}.{5}'.format( 'from {0} to {1}'.format(pre, post) if local_changes and pre != post else 'to {0}'.format(post), ' (after checking out local branch \'{0}\')'.format(branch) if _need_branch_change(branch, local_branch) else '', 'this is not a fast-forward merge' if not local_changes else 'there are uncommitted changes', ' (or \'remote-changes\')' if local_changes else '', ' and discard these changes' if local_changes else '', branch_msg, ), comments ) def latest(name, rev='HEAD', target=None, branch=None, user=None, password=None, update_head=True, force_checkout=False, force_clone=False, force_fetch=False, force_reset=False, submodules=False, bare=False, mirror=False, remote='origin', fetch_tags=True, sync_tags=True, depth=None, identity=None, https_user=None, https_pass=None, onlyif=None, unless=None, refspec_branch='*', refspec_tag='*', output_encoding=None, **kwargs): ''' Make sure the repository is cloned to the given directory and is up-to-date. name Address of the remote repository, as passed to ``git clone`` .. note:: From the `Git documentation`_, there are two URL formats supported for SSH authentication. The below two examples are equivalent: .. code-block:: text # ssh:// URL ssh://user@server/project.git # SCP-like syntax user@server:project.git A common mistake is to use an ``ssh://`` URL, but with a colon after the domain instead of a slash. This is invalid syntax in Git, and will therefore not work in Salt. When in doubt, confirm that a ``git clone`` works for the URL before using it in Salt. It has been reported by some users that SCP-like syntax is incompatible with git repos hosted on `Atlassian Stash/BitBucket Server`_. In these cases, it may be necessary to use ``ssh://`` URLs for SSH authentication. .. _`Git documentation`: https://git-scm.com/book/en/v2/Git-on-the-Server-The-Protocols#The-SSH-Protocol .. _`Atlassian Stash/BitBucket Server`: https://www.atlassian.com/software/bitbucket/server rev : HEAD The remote branch, tag, or revision ID to checkout after clone / before update. If specified, then Salt will also ensure that the tracking branch is set to ``<remote>/<rev>``, unless ``rev`` refers to a tag or SHA1, in which case Salt will ensure that the tracking branch is unset. If ``rev`` is not specified, it will be assumed to be ``HEAD``, and Salt will not manage the tracking branch at all. .. versionchanged:: 2015.8.0 If not specified, ``rev`` now defaults to the remote repository's HEAD. target Name of the target directory where repository is about to be cloned branch Name of the local branch into which to checkout the specified rev. If not specified, then Salt will not care what branch is being used locally and will just use whatever branch is currently there. .. versionadded:: 2015.8.0 .. note:: If this argument is not specified, this means that Salt will not change the local branch if the repository is reset to another branch/tag/SHA1. For example, assume that the following state was run initially: .. code-block:: yaml foo_app: git.latest: - name: https://mydomain.tld/apps/foo.git - target: /var/www/foo - user: www This would have cloned the HEAD of that repo (since a ``rev`` wasn't specified), and because ``branch`` is not specified, the branch in the local clone at ``/var/www/foo`` would be whatever the default branch is on the remote repository (usually ``master``, but not always). Now, assume that it becomes necessary to switch this checkout to the ``dev`` branch. This would require ``rev`` to be set, and probably would also require ``force_reset`` to be enabled: .. code-block:: yaml foo_app: git.latest: - name: https://mydomain.tld/apps/foo.git - target: /var/www/foo - user: www - rev: dev - force_reset: True The result of this state would be to perform a hard-reset to ``origin/dev``. Since ``branch`` was not specified though, while ``/var/www/foo`` would reflect the contents of the remote repo's ``dev`` branch, the local branch would still remain whatever it was when it was cloned. To make the local branch match the remote one, set ``branch`` as well, like so: .. code-block:: yaml foo_app: git.latest: - name: https://mydomain.tld/apps/foo.git - target: /var/www/foo - user: www - rev: dev - branch: dev - force_reset: True This may seem redundant, but Salt tries to support a wide variety of use cases, and doing it this way allows for the use case where the local branch doesn't need to be strictly managed. user Local system user under which to run git commands. By default, commands are run by the user under which the minion is running. .. note:: This is not to be confused with the username for http(s)/SSH authentication. .. versionadded:: 0.17.0 password Windows only. Required when specifying ``user``. This parameter will be ignored on non-Windows platforms. .. versionadded:: 2016.3.4 update_head : True If set to ``False``, then the remote repository will be fetched (if necessary) to ensure that the commit to which ``rev`` points exists in the local checkout, but no changes will be made to the local HEAD. .. versionadded:: 2015.8.3 force_checkout : False When checking out the local branch, the state will fail if there are unwritten changes. Set this argument to ``True`` to discard unwritten changes when checking out. force_clone : False If the ``target`` directory exists and is not a git repository, then this state will fail. Set this argument to ``True`` to remove the contents of the target directory and clone the repo into it. force_fetch : False If a fetch needs to be performed, non-fast-forward fetches will cause this state to fail. Set this argument to ``True`` to force the fetch even if it is a non-fast-forward update. .. versionadded:: 2015.8.0 force_reset : False If the update is not a fast-forward, this state will fail. Set this argument to ``True`` to force a hard-reset to the remote revision in these cases. .. versionchanged:: 2019.2.0 This option can now be set to ``remote-changes``, which will instruct Salt not to discard local changes if the repo is up-to-date with the remote repository. submodules : False Update submodules on clone or branch change bare : False Set to ``True`` if the repository is to be a bare clone of the remote repository. .. note: Setting this option to ``True`` is incompatible with the ``rev`` argument. mirror Set to ``True`` if the repository is to be a mirror of the remote repository. This implies that ``bare`` set to ``True``, and thus is incompatible with ``rev``. remote : origin Git remote to use. If this state needs to clone the repo, it will clone it using this value as the initial remote name. If the repository already exists, and a remote by this name is not present, one will be added. fetch_tags : True If ``True``, then when a fetch is performed all tags will be fetched, even those which are not reachable by any branch on the remote. sync_tags : True If ``True``, then Salt will delete tags which exist in the local clone but are not found on the remote repository. .. versionadded:: 2018.3.4 depth Defines depth in history when git a clone is needed in order to ensure latest. E.g. ``depth: 1`` is useful when deploying from a repository with a long history. Use rev to specify branch or tag. This is not compatible with revision IDs. .. versionchanged:: 2019.2.0 This option now supports tags as well as branches, on Git 1.8.0 and newer. identity Path to a private key to use for ssh URLs. This can be either a single string, or a list of strings. For example: .. code-block:: yaml # Single key git@github.com:user/repo.git: git.latest: - user: deployer - identity: /home/deployer/.ssh/id_rsa # Two keys git@github.com:user/repo.git: git.latest: - user: deployer - identity: - /home/deployer/.ssh/id_rsa - /home/deployer/.ssh/id_rsa_alternate If multiple keys are specified, they will be tried one-by-one in order for each git command which needs to authenticate. .. warning:: Unless Salt is invoked from the minion using ``salt-call``, the key(s) must be passphraseless. For greater security with passphraseless private keys, see the `sshd(8)`_ manpage for information on securing the keypair from the remote side in the ``authorized_keys`` file. .. _`sshd(8)`: http://www.man7.org/linux/man-pages/man8/sshd.8.html#AUTHORIZED_KEYS_FILE%20FORMAT .. versionchanged:: 2015.8.7 Salt will no longer attempt to use passphrase-protected keys unless invoked from the minion using ``salt-call``, to prevent blocking waiting for user input. .. versionchanged:: 2016.3.0 Key can now be specified as a SaltStack fileserver URL (e.g. ``salt://path/to/identity_file``). https_user HTTP Basic Auth username for HTTPS (only) clones .. versionadded:: 2015.5.0 https_pass HTTP Basic Auth password for HTTPS (only) clones .. versionadded:: 2015.5.0 onlyif A command to run as a check, run the named command only if the command passed to the ``onlyif`` option returns true unless A command to run as a check, only run the named command if the command passed to the ``unless`` option returns false refspec_branch : * A glob expression defining which branches to retrieve when fetching. See `git-fetch(1)`_ for more information on how refspecs work. .. versionadded:: 2017.7.0 refspec_tag : * A glob expression defining which tags to retrieve when fetching. See `git-fetch(1)`_ for more information on how refspecs work. .. versionadded:: 2017.7.0 output_encoding Use this option to specify which encoding to use to decode the output from any git commands which are run. This should not be needed in most cases. .. note:: This should only be needed if the files in the repository were created with filenames using an encoding other than UTF-8 to handle Unicode characters. .. versionadded:: 2018.3.1 .. _`git-fetch(1)`: http://git-scm.com/docs/git-fetch .. note:: Clashing ID declarations can be avoided when including different branches from the same git repository in the same SLS file by using the ``name`` argument. The example below checks out the ``gh-pages`` and ``gh-pages-prod`` branches from the same repository into separate directories. The example also sets up the ``ssh_known_hosts`` ssh key required to perform the git checkout. Also, it has been reported that the SCP-like syntax for .. code-block:: yaml gitlab.example.com: ssh_known_hosts: - present - user: root - enc: ecdsa - fingerprint: 4e:94:b0:54:c1:5b:29:a2:70:0e:e1:a3:51:ee:ee:e3 git-website-staging: git.latest: - name: git@gitlab.example.com:user/website.git - rev: gh-pages - target: /usr/share/nginx/staging - identity: /root/.ssh/website_id_rsa - require: - pkg: git - ssh_known_hosts: gitlab.example.com git-website-staging: git.latest: - name: git@gitlab.example.com:user/website.git - rev: gh-pages - target: /usr/share/nginx/staging - identity: salt://website/id_rsa - require: - pkg: git - ssh_known_hosts: gitlab.example.com git-website-prod: git.latest: - name: git@gitlab.example.com:user/website.git - rev: gh-pages-prod - target: /usr/share/nginx/prod - identity: /root/.ssh/website_id_rsa - require: - pkg: git - ssh_known_hosts: gitlab.example.com ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} kwargs = salt.utils.args.clean_kwargs(**kwargs) if kwargs: return _fail( ret, salt.utils.args.invalid_kwargs(kwargs, raise_exc=False) ) if not remote: return _fail(ret, '\'remote\' argument is required') if not target: return _fail(ret, '\'target\' argument is required') if not rev: return _fail( ret, '\'{0}\' is not a valid value for the \'rev\' argument'.format(rev) ) if force_reset not in (True, False, 'remote-changes'): return _fail( ret, '\'force_reset\' must be one of True, False, or \'remote-changes\'' ) # Ensure that certain arguments are strings to ensure that comparisons work if not isinstance(rev, six.string_types): rev = six.text_type(rev) if target is not None: if not isinstance(target, six.string_types): target = six.text_type(target) if not os.path.isabs(target): return _fail( ret, 'target \'{0}\' is not an absolute path'.format(target) ) if branch is not None and not isinstance(branch, six.string_types): branch = six.text_type(branch) if user is not None and not isinstance(user, six.string_types): user = six.text_type(user) if password is not None and not isinstance(password, six.string_types): password = six.text_type(password) if remote is not None and not isinstance(remote, six.string_types): remote = six.text_type(remote) if identity is not None: if isinstance(identity, six.string_types): identity = [identity] elif not isinstance(identity, list): return _fail(ret, 'identity must be either a list or a string') identity = [os.path.expanduser(x) for x in identity] for ident_path in identity: if 'salt://' in ident_path: try: ident_path = __salt__['cp.cache_file'](ident_path, __env__) except IOError as exc: log.exception('Failed to cache %s', ident_path) return _fail( ret, 'identity \'{0}\' does not exist.'.format( ident_path ) ) if not os.path.isabs(ident_path): return _fail( ret, 'identity \'{0}\' is not an absolute path'.format( ident_path ) ) if https_user is not None and not isinstance(https_user, six.string_types): https_user = six.text_type(https_user) if https_pass is not None and not isinstance(https_pass, six.string_types): https_pass = six.text_type(https_pass) # Check for lfs filter settings, and setup lfs_opts accordingly. These opts # will be passed where appropriate to ensure that these commands are # authenticated and that the git LFS plugin can download files. use_lfs = bool( __salt__['git.config_get_regexp']( r'filter\.lfs\.', **{'global': True})) lfs_opts = {'identity': identity} if use_lfs else {} if os.path.isfile(target): return _fail( ret, 'Target \'{0}\' exists and is a regular file, cannot proceed' .format(target) ) try: desired_fetch_url = salt.utils.url.add_http_basic_auth( name, https_user, https_pass, https_only=True ) except ValueError as exc: return _fail(ret, exc.__str__()) redacted_fetch_url = \ salt.utils.url.redact_http_basic_auth(desired_fetch_url) if mirror: bare = True # Check to make sure rev and mirror/bare are not both in use if rev != 'HEAD' and bare: return _fail(ret, ('\'rev\' is not compatible with the \'mirror\' and ' '\'bare\' arguments')) run_check_cmd_kwargs = {'runas': user, 'password': password} if 'shell' in __grains__: run_check_cmd_kwargs['shell'] = __grains__['shell'] # check if git.latest should be applied cret = mod_run_check( run_check_cmd_kwargs, onlyif, unless ) if isinstance(cret, dict): ret.update(cret) return ret refspecs = [ 'refs/heads/{0}:refs/remotes/{1}/{0}'.format(refspec_branch, remote), '+refs/tags/{0}:refs/tags/{0}'.format(refspec_tag) ] if fetch_tags else [] log.info('Checking remote revision for %s', name) try: all_remote_refs = __salt__['git.remote_refs']( name, heads=False, tags=False, user=user, password=password, identity=identity, https_user=https_user, https_pass=https_pass, ignore_retcode=False, saltenv=__env__, output_encoding=output_encoding) except CommandExecutionError as exc: return _fail( ret, 'Failed to check remote refs: {0}'.format(_strip_exc(exc)) ) except NameError as exc: if 'global name' in exc.message: raise CommandExecutionError( 'Failed to check remote refs: You may need to install ' 'GitPython or PyGit2') raise if 'HEAD' in all_remote_refs: head_rev = all_remote_refs['HEAD'] for refname, refsha in six.iteritems(all_remote_refs): if refname.startswith('refs/heads/'): if refsha == head_rev: default_branch = refname.partition('refs/heads/')[-1] break else: default_branch = None else: head_rev = None default_branch = None desired_upstream = False if bare: remote_rev = None remote_rev_type = None else: if rev == 'HEAD': if head_rev is not None: remote_rev = head_rev # Just go with whatever the upstream currently is desired_upstream = None remote_rev_type = 'sha1' else: # Empty remote repo remote_rev = None remote_rev_type = None elif 'refs/heads/' + rev in all_remote_refs: remote_rev = all_remote_refs['refs/heads/' + rev] desired_upstream = '/'.join((remote, rev)) remote_rev_type = 'branch' elif 'refs/tags/' + rev + '^{}' in all_remote_refs: # Annotated tag remote_rev = all_remote_refs['refs/tags/' + rev + '^{}'] remote_rev_type = 'tag' elif 'refs/tags/' + rev in all_remote_refs: # Non-annotated tag remote_rev = all_remote_refs['refs/tags/' + rev] remote_rev_type = 'tag' else: if len(rev) <= 40 \ and all(x in string.hexdigits for x in rev): # git ls-remote did not find the rev, and because it's a # hex string <= 40 chars we're going to assume that the # desired rev is a SHA1 rev = rev.lower() remote_rev = rev remote_rev_type = 'sha1' else: remote_rev = None remote_rev_type = None # For the comment field of the state return dict, the remote location # (and short-sha1, if rev is not a sha1) is referenced several times, # determine it once here and reuse the value below. if remote_rev_type == 'sha1': if rev == 'HEAD': remote_loc = 'remote HEAD (' + remote_rev[:7] + ')' else: remote_loc = remote_rev[:7] elif remote_rev is not None: remote_loc = '{0} ({1})'.format( desired_upstream if remote_rev_type == 'branch' else rev, remote_rev[:7] ) else: # Shouldn't happen but log a warning here for future # troubleshooting purposes in the event we find a corner case. log.warning( 'Unable to determine remote_loc. rev is %s, remote_rev is ' '%s, remove_rev_type is %s, desired_upstream is %s, and bare ' 'is%s set', rev, remote_rev, remote_rev_type, desired_upstream, ' not' if not bare else '' ) remote_loc = None if depth is not None and remote_rev_type not in ('branch', 'tag'): return _fail( ret, 'When \'depth\' is used, \'rev\' must be set to the name of a ' 'branch or tag on the remote repository' ) if remote_rev is None and not bare: if rev != 'HEAD': # A specific rev is desired, but that rev doesn't exist on the # remote repo. return _fail( ret, 'No revision matching \'{0}\' exists in the remote ' 'repository'.format(rev) ) git_ver = _LooseVersion(__salt__['git.version'](versioninfo=False)) check = 'refs' if bare else '.git' gitdir = os.path.join(target, check) comments = [] if os.path.isdir(gitdir) \ or __salt__['git.is_worktree']( target, user=user, password=password, output_encoding=output_encoding): # Target directory is a git repository or git worktree try: all_local_branches = __salt__['git.list_branches']( target, user=user, password=password, output_encoding=output_encoding) all_local_tags = set( __salt__['git.list_tags']( target, user=user, password=password, output_encoding=output_encoding ) ) local_rev, local_branch = _get_local_rev_and_branch( target, user, password, output_encoding) if not bare and remote_rev is None and local_rev is not None: return _fail( ret, 'Remote repository is empty, cannot update from a ' 'non-empty to an empty repository' ) # Base rev and branch are the ones from which any reset or merge # will take place. If the branch is not being specified, the base # will be the "local" rev and branch, i.e. those we began with # before this state was run. If a branch is being specified and it # both exists and is not the one with which we started, then we'll # be checking that branch out first, and it instead becomes our # base. The base branch and rev will be used below in comparisons # to determine what changes to make. base_rev = local_rev base_branch = local_branch if _need_branch_change(branch, local_branch): if branch not in all_local_branches: # We're checking out a new branch, so the base_rev and # remote_rev will be identical. base_rev = remote_rev else: base_branch = branch # Desired branch exists locally and is not the current # branch. We'll be performing a checkout to that branch # eventually, but before we do that we need to find the # current SHA1. try: base_rev = __salt__['git.rev_parse']( target, branch + '^{commit}', user=user, password=password, ignore_retcode=True, output_encoding=output_encoding) except CommandExecutionError as exc: return _fail( ret, 'Unable to get position of local branch \'{0}\': ' '{1}'.format(branch, _strip_exc(exc)), comments ) remotes = __salt__['git.remotes'](target, user=user, password=password, redact_auth=False, output_encoding=output_encoding) revs_match = _revs_equal(local_rev, remote_rev, remote_rev_type) try: # If not a bare repo, check `git diff HEAD` to determine if # there are local changes. local_changes = bool( not bare and __salt__['git.diff'](target, 'HEAD', user=user, password=password, output_encoding=output_encoding) ) except CommandExecutionError: # No need to capture the error and log it, the _git_run() # helper in the git execution module will have already logged # the output from the command. log.warning( 'git.latest: Unable to determine if %s has local changes', target ) local_changes = False if local_changes and revs_match: if force_reset is True: msg = ( '{0} is up-to-date, but with uncommitted changes. ' 'Since \'force_reset\' is set to True, these local ' 'changes would be reset. To only reset when there are ' 'changes in the remote repository, set ' '\'force_reset\' to \'remote-changes\'.'.format(target) ) if __opts__['test']: ret['changes']['forced update'] = True if comments: msg += _format_comments(comments) return _neutral_test(ret, msg) log.debug(msg.replace('would', 'will')) else: log.debug( '%s up-to-date, but with uncommitted changes. Since ' '\'force_reset\' is set to %s, no changes will be ' 'made.', target, force_reset ) return _uptodate(ret, target, _format_comments(comments), local_changes) if remote_rev_type == 'sha1' \ and base_rev is not None \ and base_rev.startswith(remote_rev): # Either we're already checked out to the branch we need and it # is up-to-date, or the branch to which we need to switch is # on the same SHA1 as the desired remote revision. Either way, # we know we have the remote rev present already and no fetch # will be needed. has_remote_rev = True else: has_remote_rev = False if remote_rev is not None: try: __salt__['git.rev_parse']( target, remote_rev + '^{commit}', user=user, password=password, ignore_retcode=True, output_encoding=output_encoding) except CommandExecutionError: # Local checkout doesn't have the remote_rev pass else: # The object might exist enough to get a rev-parse to # work, while the local ref could have been # deleted/changed/force updated. Do some further sanity # checks to determine if we really do have the # remote_rev. if remote_rev_type == 'branch': if remote in remotes: try: # Do a rev-parse on <remote>/<rev> to get # the local SHA1 for it, so we can compare # it to the remote_rev SHA1. local_copy = __salt__['git.rev_parse']( target, desired_upstream, user=user, password=password, ignore_retcode=True, output_encoding=output_encoding) except CommandExecutionError: pass else: # If the SHA1s don't match, then the remote # branch was force-updated, and we need to # fetch to update our local copy the ref # for the remote branch. If they do match, # then we have the remote_rev and don't # need to fetch. if local_copy == remote_rev: has_remote_rev = True elif remote_rev_type == 'tag': if rev in all_local_tags: try: local_tag_sha1 = __salt__['git.rev_parse']( target, rev + '^{commit}', user=user, password=password, ignore_retcode=True, output_encoding=output_encoding) except CommandExecutionError: # Shouldn't happen if the tag exists # locally but account for this just in # case. local_tag_sha1 = None if local_tag_sha1 == remote_rev: has_remote_rev = True else: if not force_reset: # SHA1 of tag on remote repo is # different than local tag. Unless # we're doing a hard reset then we # don't need to proceed as we know that # the fetch will update the tag and the # only way to make the state succeed is # to reset the branch to point at the # tag's new location. return _fail( ret, '\'{0}\' is a tag, but the remote ' 'SHA1 for this tag ({1}) doesn\'t ' 'match the local SHA1 ({2}). Set ' '\'force_reset\' to True to force ' 'this update.'.format( rev, _short_sha(remote_rev), _short_sha(local_tag_sha1) ) ) elif remote_rev_type == 'sha1': has_remote_rev = True # If fast_forward is not boolean, then we don't yet know if this # will be a fast forward or not, because a fetch is required. fast_forward = False \ if (local_changes and force_reset != 'remote-changes') \ else None if has_remote_rev: if (not revs_match and not update_head) \ and (branch is None or branch == local_branch): ret['comment'] = ( '{0} is already present and local HEAD ({1}) does not ' 'match, but update_head=False. HEAD has not been ' 'updated locally.'.format( remote_loc.capitalize() if rev == 'HEAD' else remote_loc, local_rev[:7] ) ) return ret # No need to check if this is a fast_forward if we already know # that it won't be (due to local changes). if fast_forward is not False: if base_rev is None: # If we're here, the remote_rev exists in the local # checkout but there is still no HEAD locally. A # possible reason for this is that an empty repository # existed there and a remote was added and fetched, but # the repository was not fast-forwarded. Regardless, # going from no HEAD to a locally-present rev is # considered a fast-forward update. fast_forward = True else: fast_forward = __salt__['git.merge_base']( target, refs=[base_rev, remote_rev], is_ancestor=True, user=user, password=password, ignore_retcode=True, output_encoding=output_encoding) if fast_forward is False: if force_reset is False: return _not_fast_forward( ret, rev, base_rev, remote_rev, branch, local_branch, default_branch, local_changes, comments) merge_action = 'hard-reset' elif fast_forward is True: merge_action = 'fast-forwarded' else: merge_action = 'updated' if base_branch is None: # No local branch, no upstream tracking branch upstream = None else: try: upstream = __salt__['git.rev_parse']( target, base_branch + '@{upstream}', opts=['--abbrev-ref'], user=user, password=password, ignore_retcode=True, output_encoding=output_encoding) except CommandExecutionError: # There is a local branch but the rev-parse command # failed, so that means there is no upstream tracking # branch. This could be because it is just not set, or # because the branch was checked out to a SHA1 or tag # instead of a branch. Set upstream to False to make a # distinction between the case above where there is no # local_branch (when the local checkout is an empty # repository). upstream = False if remote in remotes: fetch_url = remotes[remote]['fetch'] else: log.debug( 'Remote \'%s\' not found in git checkout at %s', remote, target ) fetch_url = None if remote_rev is not None and desired_fetch_url != fetch_url: if __opts__['test']: actions = [ 'Remote \'{0}\' would be changed from {1} to {2}' .format( remote, salt.utils.url.redact_http_basic_auth(fetch_url), redacted_fetch_url ) ] if not has_remote_rev: actions.append('Remote would be fetched') if not revs_match: if update_head: ret['changes']['revision'] = { 'old': local_rev, 'new': remote_rev } if fast_forward is False: ret['changes']['forced update'] = True actions.append( 'Repository would be {0} to {1}'.format( merge_action, _short_sha(remote_rev) ) ) if ret['changes']: return _neutral_test(ret, _format_comments(actions)) else: if not revs_match and not update_head: # Repo content would not be modified but the remote # URL would be modified, so we can't just say that # the repo is up-to-date, we need to inform the # user of the actions taken. ret['comment'] = _format_comments(actions) return ret return _uptodate(ret, target, _format_comments(actions)) # The fetch_url for the desired remote does not match the # specified URL (or the remote does not exist), so set the # remote URL. __salt__['git.remote_set'](target, url=name, remote=remote, user=user, password=password, https_user=https_user, https_pass=https_pass, output_encoding=output_encoding) if fetch_url is None: comments.append( 'Remote \'{0}\' set to {1}'.format( remote, redacted_fetch_url ) ) ret['changes']['new'] = name + ' => ' + remote else: comments.append( 'Remote \'{0}\' changed from {1} to {2}'.format( remote, salt.utils.url.redact_http_basic_auth(fetch_url), redacted_fetch_url ) ) if remote_rev is not None: if __opts__['test']: actions = [] if not has_remote_rev: actions.append( 'Remote \'{0}\' would be fetched'.format(remote) ) if (not revs_match) \ and (update_head or (branch is not None and branch != local_branch)): ret['changes']['revision'] = { 'old': local_rev, 'new': remote_rev } if _need_branch_change(branch, local_branch): if branch not in all_local_branches: actions.append( 'New branch \'{0}\' would be checked ' 'out, with {1} as a starting ' 'point'.format(branch, remote_loc) ) if desired_upstream: actions.append( 'Tracking branch would be set to {0}' .format(desired_upstream) ) else: actions.append( 'Branch \'{0}\' would be checked out ' 'and {1} to {2}'.format( branch, merge_action, _short_sha(remote_rev) ) ) else: if not revs_match: if update_head: if fast_forward is True: actions.append( 'Repository would be fast-forwarded from ' '{0} to {1}'.format( _short_sha(local_rev), _short_sha(remote_rev) ) ) else: actions.append( 'Repository would be {0} from {1} to {2}' .format( 'hard-reset' if force_reset and has_remote_rev else 'updated', _short_sha(local_rev), _short_sha(remote_rev) ) ) else: actions.append( 'Local HEAD ({0}) does not match {1} but ' 'update_head=False, HEAD would not be ' 'updated locally'.format( local_rev[:7], remote_loc ) ) # Check if upstream needs changing if not upstream and desired_upstream: actions.append( 'Tracking branch would be set to {0}'.format( desired_upstream ) ) elif upstream and desired_upstream is False: actions.append( 'Tracking branch would be unset' ) elif desired_upstream and upstream != desired_upstream: actions.append( 'Tracking branch would be ' 'updated to {0}'.format(desired_upstream) ) if ret['changes']: return _neutral_test(ret, _format_comments(actions)) else: formatted_actions = _format_comments(actions) if not revs_match \ and not update_head \ and formatted_actions: ret['comment'] = formatted_actions return ret return _uptodate(ret, target, _format_comments(actions)) if not upstream and desired_upstream: upstream_action = ( 'Tracking branch was set to {0}'.format( desired_upstream ) ) branch_opts = _get_branch_opts( branch, local_branch, all_local_branches, desired_upstream, git_ver) elif upstream and desired_upstream is False: # If the remote_rev is a tag or SHA1, and there is an # upstream tracking branch, we will unset it. However, we # can only do this if the git version is 1.8.0 or newer, as # the --unset-upstream option was not added until that # version. if git_ver >= _LooseVersion('1.8.0'): upstream_action = 'Tracking branch was unset' branch_opts = ['--unset-upstream'] else: branch_opts = None elif desired_upstream and upstream != desired_upstream: upstream_action = ( 'Tracking branch was updated to {0}'.format( desired_upstream ) ) branch_opts = _get_branch_opts( branch, local_branch, all_local_branches, desired_upstream, git_ver) else: branch_opts = None if branch_opts is not None and local_branch is None: return _fail( ret, 'Cannot set/unset upstream tracking branch, local ' 'HEAD refers to nonexistent branch. This may have ' 'been caused by cloning a remote repository for which ' 'the default branch was renamed or deleted. If you ' 'are unable to fix the remote repository, you can ' 'work around this by setting the \'branch\' argument ' '(which will ensure that the named branch is created ' 'if it does not already exist).', comments ) remote_tags = set([ x.replace('refs/tags/', '') for x in __salt__['git.ls_remote']( cwd=target, remote=remote, opts="--tags", user=user, password=password, identity=identity, saltenv=__env__, ignore_retcode=True, output_encoding=output_encoding) if '^{}' not in x ]) if all_local_tags != remote_tags: has_remote_rev = False new_tags = remote_tags - all_local_tags deleted_tags = all_local_tags - remote_tags if new_tags: ret['changes']['new_tags'] = new_tags if sync_tags and deleted_tags: # Delete the local copy of the tags to keep up with the # remote repository. for tag_name in deleted_tags: try: if not __opts__['test']: __salt__['git.tag']( target, tag_name, opts='-d', user=user, password=password, output_encoding=output_encoding) except CommandExecutionError as exc: ret.setdefault('warnings', []).append( 'Failed to remove local tag \'{0}\':\n\n' '{1}\n\n'.format(tag_name, exc) ) else: ret['changes'].setdefault( 'deleted_tags', []).append(tag_name) if ret['changes'].get('deleted_tags'): comments.append( 'The following tags {0} removed from the local ' 'checkout: {1}'.format( 'would be' if __opts__['test'] else 'were', ', '.join(ret['changes']['deleted_tags']) ) ) if not has_remote_rev: try: fetch_changes = __salt__['git.fetch']( target, remote=remote, force=force_fetch, refspecs=refspecs, user=user, password=password, identity=identity, saltenv=__env__, output_encoding=output_encoding) except CommandExecutionError as exc: return _failed_fetch(ret, exc, comments) else: if fetch_changes: comments.append( '{0} was fetched, resulting in updated ' 'refs'.format(name) ) try: __salt__['git.rev_parse']( target, remote_rev + '^{commit}', user=user, password=password, ignore_retcode=True, output_encoding=output_encoding) except CommandExecutionError as exc: return _fail( ret, 'Fetch did not successfully retrieve rev \'{0}\' ' 'from {1}: {2}'.format(rev, name, exc) ) if (not revs_match and not update_head) \ and (branch is None or branch == local_branch): # Rev now exists locally (was fetched), and since we're # not updating HEAD we'll just exit here. ret['comment'] = remote_loc.capitalize() \ if rev == 'HEAD' \ else remote_loc ret['comment'] += ( ' is already present and local HEAD ({0}) does not ' 'match, but update_head=False. HEAD has not been ' 'updated locally.'.format(local_rev[:7]) ) return ret # Now that we've fetched, check again whether or not # the update is a fast-forward. if base_rev is None: fast_forward = True else: fast_forward = __salt__['git.merge_base']( target, refs=[base_rev, remote_rev], is_ancestor=True, user=user, password=password, output_encoding=output_encoding) if fast_forward is force_reset is False \ or (fast_forward is True and local_changes and force_reset is False): return _not_fast_forward( ret, rev, base_rev, remote_rev, branch, local_branch, default_branch, local_changes, comments) if _need_branch_change(branch, local_branch): if local_changes and not force_checkout: return _fail( ret, 'Local branch \'{0}\' has uncommitted ' 'changes. Set \'force_checkout\' to True to ' 'discard them and proceed.'.format(local_branch) ) # TODO: Maybe re-retrieve all_local_branches to handle # the corner case where the destination branch was # added to the local checkout during a fetch that takes # a long time to complete. if branch not in all_local_branches: if rev == 'HEAD': checkout_rev = remote_rev else: checkout_rev = desired_upstream \ if desired_upstream \ else rev checkout_opts = ['-b', branch] else: checkout_rev = branch checkout_opts = [] __salt__['git.checkout'](target, checkout_rev, force=force_checkout, opts=checkout_opts, user=user, password=password, output_encoding=output_encoding) if '-b' in checkout_opts: comments.append( 'New branch \'{0}\' was checked out, with {1} ' 'as a starting point'.format( branch, remote_loc ) ) else: comments.append( '\'{0}\' was checked out'.format(checkout_rev) ) if fast_forward is False: __salt__['git.reset']( target, opts=['--hard', remote_rev], user=user, password=password, output_encoding=output_encoding, **lfs_opts) ret['changes']['forced update'] = True if local_changes: comments.append('Uncommitted changes were discarded') comments.append( 'Repository was hard-reset to {0}'.format(remote_loc) ) elif fast_forward is True \ and local_changes \ and force_reset is not False: __salt__['git.discard_local_changes']( target, user=user, password=password, output_encoding=output_encoding) comments.append('Uncommitted changes were discarded') if branch_opts is not None: __salt__['git.branch']( target, opts=branch_opts, user=user, password=password, output_encoding=output_encoding) comments.append(upstream_action) # Fast-forward to the desired revision if fast_forward is True \ and not _revs_equal(base_rev, remote_rev, remote_rev_type): if desired_upstream or rev == 'HEAD': # Check first to see if we are on a branch before # trying to merge changes. (The call to # git.symbolic_ref will only return output if HEAD # points to a branch.) if __salt__['git.symbolic_ref']( target, 'HEAD', opts=['--quiet'], user=user, password=password, ignore_retcode=True, output_encoding=output_encoding): if git_ver >= _LooseVersion('1.8.1.6'): # --ff-only added in version 1.8.1.6. It's not # 100% necessary, but if we can use it, we'll # ensure that the merge doesn't go through if # not a fast-forward. Granted, the logic that # gets us to this point shouldn't allow us to # attempt this merge if it's not a # fast-forward, but it's an extra layer of # protection. merge_opts = ['--ff-only'] else: merge_opts = [] __salt__['git.merge']( target, rev=remote_rev, opts=merge_opts, user=user, password=password, output_encoding=output_encoding, **lfs_opts) comments.append( 'Repository was fast-forwarded to {0}' .format(remote_loc) ) else: return _fail( ret, 'Unable to fast-forward, HEAD is detached', comments ) else: # Update is a fast forward, but we cannot merge to that # commit so we'll reset to it. __salt__['git.reset']( target, opts=['--hard', remote_rev if rev == 'HEAD' else rev], user=user, password=password, output_encoding=output_encoding, **lfs_opts) comments.append( 'Repository was reset to {0} (fast-forward)' .format(rev) ) # TODO: Figure out how to add submodule update info to # test=True return data, and changes dict. if submodules: try: __salt__['git.submodule']( target, 'update', opts=['--init', '--recursive'], user=user, password=password, identity=identity, saltenv=__env__, output_encoding=output_encoding) except CommandExecutionError as exc: return _failed_submodule_update(ret, exc, comments) elif bare: if __opts__['test']: msg = ( 'Bare repository at {0} would be fetched' .format(target) ) if ret['changes']: return _neutral_test(ret, msg) else: return _uptodate(ret, target, msg) try: fetch_changes = __salt__['git.fetch']( target, remote=remote, force=force_fetch, refspecs=refspecs, user=user, password=password, identity=identity, saltenv=__env__, output_encoding=output_encoding) except CommandExecutionError as exc: return _failed_fetch(ret, exc, comments) else: comments.append( 'Bare repository at {0} was fetched{1}'.format( target, ', resulting in updated refs' if fetch_changes else '' ) ) try: new_rev = __salt__['git.revision']( cwd=target, user=user, password=password, ignore_retcode=True, output_encoding=output_encoding) except CommandExecutionError: new_rev = None except Exception as exc: log.error( 'Unexpected exception in git.latest state', exc_info=True ) if isinstance(exc, CommandExecutionError): msg = _strip_exc(exc) else: msg = six.text_type(exc) return _fail(ret, msg, comments) if not bare and not _revs_equal(new_rev, remote_rev, remote_rev_type): return _fail(ret, 'Failed to update repository', comments) if local_rev != new_rev: log.info( 'Repository %s updated: %s => %s', target, local_rev, new_rev ) ret['comment'] = _format_comments(comments) ret['changes']['revision'] = {'old': local_rev, 'new': new_rev} else: return _uptodate(ret, target, _format_comments(comments)) else: if os.path.isdir(target): target_contents = os.listdir(target) if force_clone: # Clone is required, and target directory exists, but the # ``force`` option is enabled, so we need to clear out its # contents to proceed. if __opts__['test']: ret['changes']['forced clone'] = True ret['changes']['new'] = name + ' => ' + target return _neutral_test( ret, 'Target directory {0} exists. Since force_clone=True, ' 'the contents of {0} would be deleted, and {1} would ' 'be cloned into this directory.'.format(target, name) ) log.debug( 'Removing contents of %s to clone repository %s in its ' 'place (force_clone=True set in git.latest state)', target, name ) removal_errors = {} for target_object in target_contents: target_path = os.path.join(target, target_object) try: salt.utils.files.rm_rf(target_path) except OSError as exc: if exc.errno != errno.ENOENT: removal_errors[target_path] = exc if removal_errors: err_strings = [ ' {0}\n {1}'.format(k, v) for k, v in six.iteritems(removal_errors) ] return _fail( ret, 'Unable to remove\n{0}'.format('\n'.join(err_strings)), comments ) ret['changes']['forced clone'] = True # Clone is required, but target dir exists and is non-empty. We # can't proceed. elif target_contents: return _fail( ret, 'Target \'{0}\' exists, is non-empty and is not a git ' 'repository. Set the \'force_clone\' option to True to ' 'remove this directory\'s contents and proceed with ' 'cloning the remote repository'.format(target) ) log.debug('Target %s is not found, \'git clone\' is required', target) if __opts__['test']: ret['changes']['new'] = name + ' => ' + target return _neutral_test( ret, 'Repository {0} would be cloned to {1}'.format( name, target ) ) try: clone_opts = ['--mirror'] if mirror else ['--bare'] if bare else [] if remote != 'origin': clone_opts.extend(['--origin', remote]) if depth is not None: clone_opts.extend(['--depth', six.text_type(depth), '--branch', rev]) # We're cloning a fresh repo, there is no local branch or revision local_branch = local_rev = None try: __salt__['git.clone'](target, name, user=user, password=password, opts=clone_opts, identity=identity, https_user=https_user, https_pass=https_pass, saltenv=__env__, output_encoding=output_encoding) except CommandExecutionError as exc: msg = 'Clone failed: {0}'.format(_strip_exc(exc)) return _fail(ret, msg, comments) ret['changes']['new'] = name + ' => ' + target comments.append( '{0} cloned to {1}{2}'.format( name, target, ' as mirror' if mirror else ' as bare repository' if bare else '' ) ) if not bare: if not remote_rev: if rev != 'HEAD': # No HEAD means the remote repo is empty, which means # our new clone will also be empty. This state has # failed, since a rev was specified but no matching rev # exists on the remote host. msg = ( '%s was cloned but is empty, so {0}/{1} ' 'cannot be checked out'.format(remote, rev) ) log.error(msg, name) # Disable check for string substitution return _fail(ret, msg % 'Repository', comments) # pylint: disable=E1321 else: if remote_rev_type == 'tag' \ and rev not in __salt__['git.list_tags']( target, user=user, password=password, output_encoding=output_encoding): return _fail( ret, 'Revision \'{0}\' does not exist in clone' .format(rev), comments ) if branch is not None: if branch not in \ __salt__['git.list_branches']( target, user=user, password=password, output_encoding=output_encoding): if rev == 'HEAD': checkout_rev = remote_rev else: checkout_rev = desired_upstream \ if desired_upstream \ else rev __salt__['git.checkout']( target, checkout_rev, opts=['-b', branch], user=user, password=password, output_encoding=output_encoding) comments.append( 'Branch \'{0}\' checked out, with {1} ' 'as a starting point'.format( branch, remote_loc ) ) local_rev, local_branch = _get_local_rev_and_branch( target, user, password, output_encoding=output_encoding) if local_branch is None \ and remote_rev is not None \ and 'HEAD' not in all_remote_refs: return _fail( ret, 'Remote HEAD refers to a ref that does not exist. ' 'This can happen when the default branch on the ' 'remote repository is renamed or deleted. If you ' 'are unable to fix the remote repository, you can ' 'work around this by setting the \'branch\' argument ' '(which will ensure that the named branch is created ' 'if it does not already exist).', comments ) if not _revs_equal(local_rev, remote_rev, remote_rev_type): __salt__['git.reset']( target, opts=['--hard', remote_rev], user=user, password=password, output_encoding=output_encoding) comments.append( 'Repository was reset to {0}'.format(remote_loc) ) try: upstream = __salt__['git.rev_parse']( target, local_branch + '@{upstream}', opts=['--abbrev-ref'], user=user, password=password, ignore_retcode=True, output_encoding=output_encoding) except CommandExecutionError: upstream = False if not upstream and desired_upstream: upstream_action = ( 'Tracking branch was set to {0}'.format( desired_upstream ) ) branch_opts = _get_branch_opts( branch, local_branch, __salt__['git.list_branches']( target, user=user, password=password, output_encoding=output_encoding), desired_upstream, git_ver) elif upstream and desired_upstream is False: # If the remote_rev is a tag or SHA1, and there is an # upstream tracking branch, we will unset it. However, # we can only do this if the git version is 1.8.0 or # newer, as the --unset-upstream option was not added # until that version. if git_ver >= _LooseVersion('1.8.0'): upstream_action = 'Tracking branch was unset' branch_opts = ['--unset-upstream'] else: branch_opts = None elif desired_upstream and upstream != desired_upstream: upstream_action = ( 'Tracking branch was updated to {0}'.format( desired_upstream ) ) branch_opts = _get_branch_opts( branch, local_branch, __salt__['git.list_branches']( target, user=user, password=password, output_encoding=output_encoding), desired_upstream, git_ver) else: branch_opts = None if branch_opts is not None: __salt__['git.branch']( target, opts=branch_opts, user=user, password=password, output_encoding=output_encoding) comments.append(upstream_action) if submodules and remote_rev: try: __salt__['git.submodule']( target, 'update', opts=['--init', '--recursive'], user=user, password=password, identity=identity, output_encoding=output_encoding) except CommandExecutionError as exc: return _failed_submodule_update(ret, exc, comments) try: new_rev = __salt__['git.revision']( cwd=target, user=user, password=password, ignore_retcode=True, output_encoding=output_encoding) except CommandExecutionError: new_rev = None except Exception as exc: log.error( 'Unexpected exception in git.latest state', exc_info=True ) if isinstance(exc, CommandExecutionError): msg = _strip_exc(exc) else: msg = six.text_type(exc) return _fail(ret, msg, comments) msg = _format_comments(comments) log.info(msg) ret['comment'] = msg if new_rev is not None: ret['changes']['revision'] = {'old': None, 'new': new_rev} return ret def present(name, force=False, bare=True, template=None, separate_git_dir=None, shared=None, user=None, password=None, output_encoding=None): ''' Ensure that a repository exists in the given directory .. warning:: If the minion has Git 2.5 or later installed, ``name`` points to a worktree_, and ``force`` is set to ``True``, then the worktree will be deleted. This has been corrected in Salt 2015.8.0. name Path to the directory .. versionchanged:: 2015.8.0 This path must now be absolute force : False If ``True``, and if ``name`` points to an existing directory which does not contain a git repository, then the contents of that directory will be recursively removed and a new repository will be initialized in its place. bare : True If ``True``, and a repository must be initialized, then the repository will be a bare repository. .. note:: This differs from the default behavior of :py:func:`git.init <salt.modules.git.init>`, make sure to set this value to ``False`` if a bare repo is not desired. template If a new repository is initialized, this argument will specify an alternate template directory. .. versionadded:: 2015.8.0 separate_git_dir If a new repository is initialized, this argument will specify an alternate ``$GIT_DIR`` .. versionadded:: 2015.8.0 shared Set sharing permissions on git repo. See `git-init(1)`_ for more details. .. versionadded:: 2015.5.0 user User under which to run git commands. By default, commands are run by the user under which the minion is running. .. versionadded:: 0.17.0 password Windows only. Required when specifying ``user``. This parameter will be ignored on non-Windows platforms. .. versionadded:: 2016.3.4 output_encoding Use this option to specify which encoding to use to decode the output from any git commands which are run. This should not be needed in most cases. .. note:: This should only be needed if the files in the repository were created with filenames using an encoding other than UTF-8 to handle Unicode characters. .. versionadded:: 2018.3.1 .. _`git-init(1)`: http://git-scm.com/docs/git-init .. _`worktree`: http://git-scm.com/docs/git-worktree ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} # If the named directory is a git repo return True if os.path.isdir(name): if bare and os.path.isfile(os.path.join(name, 'HEAD')): return ret elif not bare and \ (os.path.isdir(os.path.join(name, '.git')) or __salt__['git.is_worktree'](name, user=user, password=password, output_encoding=output_encoding)): return ret # Directory exists and is not a git repo, if force is set destroy the # directory and recreate, otherwise throw an error elif force: # Directory exists, and the ``force`` option is enabled, so we need # to clear out its contents to proceed. if __opts__['test']: ret['changes']['new'] = name ret['changes']['forced init'] = True return _neutral_test( ret, 'Target directory {0} exists. Since force=True, the ' 'contents of {0} would be deleted, and a {1}repository ' 'would be initialized in its place.' .format(name, 'bare ' if bare else '') ) log.debug( 'Removing contents of %s to initialize %srepository in its ' 'place (force=True set in git.present state)', name, 'bare ' if bare else '' ) try: if os.path.islink(name): os.unlink(name) else: salt.utils.files.rm_rf(name) except OSError as exc: return _fail( ret, 'Unable to remove {0}: {1}'.format(name, exc) ) else: ret['changes']['forced init'] = True elif os.listdir(name): return _fail( ret, 'Target \'{0}\' exists, is non-empty, and is not a git ' 'repository. Set the \'force\' option to True to remove ' 'this directory\'s contents and proceed with initializing a ' 'repository'.format(name) ) # Run test is set if __opts__['test']: ret['changes']['new'] = name return _neutral_test( ret, 'New {0}repository would be created'.format( 'bare ' if bare else '' ) ) __salt__['git.init'](cwd=name, bare=bare, template=template, separate_git_dir=separate_git_dir, shared=shared, user=user, password=password, output_encoding=output_encoding) actions = [ 'Initialized {0}repository in {1}'.format( 'bare ' if bare else '', name ) ] if template: actions.append('Template directory set to {0}'.format(template)) if separate_git_dir: actions.append('Gitdir set to {0}'.format(separate_git_dir)) message = '. '.join(actions) if len(actions) > 1: message += '.' log.info(message) ret['changes']['new'] = name ret['comment'] = message return ret def detached(name, rev, target=None, remote='origin', user=None, password=None, force_clone=False, force_checkout=False, fetch_remote=True, hard_reset=False, submodules=False, identity=None, https_user=None, https_pass=None, onlyif=None, unless=None, output_encoding=None, **kwargs): ''' .. versionadded:: 2016.3.0 Make sure a repository is cloned to the given target directory and is a detached HEAD checkout of the commit ID resolved from ``rev``. name Address of the remote repository. rev The branch, tag, or commit ID to checkout after clone. If a branch or tag is specified it will be resolved to a commit ID and checked out. target Name of the target directory where repository is about to be cloned. remote : origin Git remote to use. If this state needs to clone the repo, it will clone it using this value as the initial remote name. If the repository already exists, and a remote by this name is not present, one will be added. user User under which to run git commands. By default, commands are run by the user under which the minion is running. password Windows only. Required when specifying ``user``. This parameter will be ignored on non-Windows platforms. .. versionadded:: 2016.3.4 force_clone : False If the ``target`` directory exists and is not a git repository, then this state will fail. Set this argument to ``True`` to remove the contents of the target directory and clone the repo into it. force_checkout : False When checking out the revision ID, the state will fail if there are unwritten changes. Set this argument to ``True`` to discard unwritten changes when checking out. fetch_remote : True If ``False`` a fetch will not be performed and only local refs will be reachable. hard_reset : False If ``True`` a hard reset will be performed before the checkout and any uncommitted modifications to the working directory will be discarded. Untracked files will remain in place. .. note:: Changes resulting from a hard reset will not trigger requisites. submodules : False Update submodules identity A path on the minion (or a SaltStack fileserver URL, e.g. ``salt://path/to/identity_file``) to a private key to use for SSH authentication. https_user HTTP Basic Auth username for HTTPS (only) clones https_pass HTTP Basic Auth password for HTTPS (only) clones onlyif A command to run as a check, run the named command only if the command passed to the ``onlyif`` option returns true unless A command to run as a check, only run the named command if the command passed to the ``unless`` option returns false output_encoding Use this option to specify which encoding to use to decode the output from any git commands which are run. This should not be needed in most cases. .. note:: This should only be needed if the files in the repository were created with filenames using an encoding other than UTF-8 to handle Unicode characters. .. versionadded:: 2018.3.1 ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} kwargs = salt.utils.args.clean_kwargs(**kwargs) if kwargs: return _fail( ret, salt.utils.args.invalid_kwargs(kwargs, raise_exc=False) ) if not rev: return _fail( ret, '\'{0}\' is not a valid value for the \'rev\' argument'.format(rev) ) if not target: return _fail( ret, '\'{0}\' is not a valid value for the \'target\' argument'.format(rev) ) # Ensure that certain arguments are strings to ensure that comparisons work if not isinstance(rev, six.string_types): rev = six.text_type(rev) if target is not None: if not isinstance(target, six.string_types): target = six.text_type(target) if not os.path.isabs(target): return _fail( ret, 'Target \'{0}\' is not an absolute path'.format(target) ) if user is not None and not isinstance(user, six.string_types): user = six.text_type(user) if remote is not None and not isinstance(remote, six.string_types): remote = six.text_type(remote) if identity is not None: if isinstance(identity, six.string_types): identity = [identity] elif not isinstance(identity, list): return _fail(ret, 'Identity must be either a list or a string') identity = [os.path.expanduser(x) for x in identity] for ident_path in identity: if 'salt://' in ident_path: try: ident_path = __salt__['cp.cache_file'](ident_path) except IOError as exc: log.error('Failed to cache %s: %s', ident_path, exc) return _fail( ret, 'Identity \'{0}\' does not exist.'.format( ident_path ) ) if not os.path.isabs(ident_path): return _fail( ret, 'Identity \'{0}\' is not an absolute path'.format( ident_path ) ) if https_user is not None and not isinstance(https_user, six.string_types): https_user = six.text_type(https_user) if https_pass is not None and not isinstance(https_pass, six.string_types): https_pass = six.text_type(https_pass) if os.path.isfile(target): return _fail( ret, 'Target \'{0}\' exists and is a regular file, cannot proceed' .format(target) ) try: desired_fetch_url = salt.utils.url.add_http_basic_auth( name, https_user, https_pass, https_only=True ) except ValueError as exc: return _fail(ret, exc.__str__()) redacted_fetch_url = salt.utils.url.redact_http_basic_auth(desired_fetch_url) # Check if onlyif or unless conditions match run_check_cmd_kwargs = {'runas': user} if 'shell' in __grains__: run_check_cmd_kwargs['shell'] = __grains__['shell'] cret = mod_run_check( run_check_cmd_kwargs, onlyif, unless ) if isinstance(cret, dict): ret.update(cret) return ret # Determine if supplied ref is a hash remote_rev_type = 'ref' if len(rev) <= 40 \ and all(x in string.hexdigits for x in rev): rev = rev.lower() remote_rev_type = 'hash' comments = [] hash_exists_locally = False local_commit_id = None gitdir = os.path.join(target, '.git') if os.path.isdir(gitdir) \ or __salt__['git.is_worktree'](target, user=user, password=password, output_encoding=output_encoding): # Target directory is a git repository or git worktree local_commit_id = _get_local_rev_and_branch( target, user, password, output_encoding=output_encoding)[0] if remote_rev_type is 'hash': try: __salt__['git.describe'](target, rev, user=user, password=password, ignore_retcode=True, output_encoding=output_encoding) except CommandExecutionError: hash_exists_locally = False else: # The rev is a hash and it exists locally so skip to checkout hash_exists_locally = True else: # Check that remote is present and set to correct url remotes = __salt__['git.remotes'](target, user=user, password=password, redact_auth=False, output_encoding=output_encoding) if remote in remotes and name in remotes[remote]['fetch']: pass else: # The fetch_url for the desired remote does not match the # specified URL (or the remote does not exist), so set the # remote URL. current_fetch_url = None if remote in remotes: current_fetch_url = remotes[remote]['fetch'] if __opts__['test']: return _neutral_test( ret, 'Remote {0} would be set to {1}'.format( remote, name ) ) __salt__['git.remote_set'](target, url=name, remote=remote, user=user, password=password, https_user=https_user, https_pass=https_pass, output_encoding=output_encoding) comments.append( 'Remote {0} updated from \'{1}\' to \'{2}\''.format( remote, current_fetch_url, name ) ) else: # Clone repository if os.path.isdir(target): target_contents = os.listdir(target) if force_clone: # Clone is required, and target directory exists, but the # ``force`` option is enabled, so we need to clear out its # contents to proceed. if __opts__['test']: return _neutral_test( ret, 'Target directory {0} exists. Since force_clone=True, ' 'the contents of {0} would be deleted, and {1} would ' 'be cloned into this directory.'.format(target, name) ) log.debug( 'Removing contents of %s to clone repository %s in its ' 'place (force_clone=True set in git.detached state)', target, name ) removal_errors = {} for target_object in target_contents: target_path = os.path.join(target, target_object) try: salt.utils.files.rm_rf(target_path) except OSError as exc: if exc.errno != errno.ENOENT: removal_errors[target_path] = exc if removal_errors: err_strings = [ ' {0}\n {1}'.format(k, v) for k, v in six.iteritems(removal_errors) ] return _fail( ret, 'Unable to remove\n{0}'.format('\n'.join(err_strings)), comments ) ret['changes']['forced clone'] = True elif target_contents: # Clone is required, but target dir exists and is non-empty. We # can't proceed. return _fail( ret, 'Target \'{0}\' exists, is non-empty and is not a git ' 'repository. Set the \'force_clone\' option to True to ' 'remove this directory\'s contents and proceed with ' 'cloning the remote repository'.format(target) ) log.debug('Target %s is not found, \'git clone\' is required', target) if __opts__['test']: return _neutral_test( ret, 'Repository {0} would be cloned to {1}'.format( name, target ) ) try: clone_opts = ['--no-checkout'] if remote != 'origin': clone_opts.extend(['--origin', remote]) __salt__['git.clone'](target, name, user=user, password=password, opts=clone_opts, identity=identity, https_user=https_user, https_pass=https_pass, saltenv=__env__, output_encoding=output_encoding) comments.append('{0} cloned to {1}'.format(name, target)) except Exception as exc: log.error( 'Unexpected exception in git.detached state', exc_info=True ) if isinstance(exc, CommandExecutionError): msg = _strip_exc(exc) else: msg = six.text_type(exc) return _fail(ret, msg, comments) # Repository exists and is ready for fetch/checkout refspecs = [ 'refs/heads/*:refs/remotes/{0}/*'.format(remote), '+refs/tags/*:refs/tags/*' ] if hash_exists_locally or fetch_remote is False: pass else: # Fetch refs from remote if __opts__['test']: return _neutral_test( ret, 'Repository remote {0} would be fetched'.format(remote) ) try: fetch_changes = __salt__['git.fetch']( target, remote=remote, force=True, refspecs=refspecs, user=user, password=password, identity=identity, saltenv=__env__, output_encoding=output_encoding) except CommandExecutionError as exc: msg = 'Fetch failed' msg += ':\n\n' + six.text_type(exc) return _fail(ret, msg, comments) else: if fetch_changes: comments.append( 'Remote {0} was fetched, resulting in updated ' 'refs'.format(remote) ) # get refs and checkout checkout_commit_id = '' if remote_rev_type is 'hash': if __salt__['git.describe']( target, rev, user=user, password=password, output_encoding=output_encoding): checkout_commit_id = rev else: return _fail( ret, 'Revision \'{0}\' does not exist'.format(rev) ) else: try: all_remote_refs = __salt__['git.remote_refs']( target, user=user, password=password, identity=identity, https_user=https_user, https_pass=https_pass, ignore_retcode=False, output_encoding=output_encoding) if 'refs/remotes/'+remote+'/'+rev in all_remote_refs: checkout_commit_id = all_remote_refs['refs/remotes/' + remote + '/' + rev] elif 'refs/tags/' + rev in all_remote_refs: checkout_commit_id = all_remote_refs['refs/tags/' + rev] else: return _fail( ret, 'Revision \'{0}\' does not exist'.format(rev) ) except CommandExecutionError as exc: return _fail( ret, 'Failed to list refs for {0}: {1}'.format(remote, _strip_exc(exc)) ) if hard_reset: if __opts__['test']: return _neutral_test( ret, 'Hard reset to HEAD would be performed on {0}'.format(target) ) __salt__['git.reset']( target, opts=['--hard', 'HEAD'], user=user, password=password, output_encoding=output_encoding) comments.append( 'Repository was reset to HEAD before checking out revision' ) # TODO: implement clean function for git module and add clean flag if checkout_commit_id == local_commit_id: new_rev = None else: if __opts__['test']: ret['changes']['HEAD'] = {'old': local_commit_id, 'new': checkout_commit_id} return _neutral_test( ret, 'Commit ID {0} would be checked out at {1}'.format( checkout_commit_id, target ) ) __salt__['git.checkout'](target, checkout_commit_id, force=force_checkout, user=user, password=password, output_encoding=output_encoding) comments.append( 'Commit ID {0} was checked out at {1}'.format( checkout_commit_id, target ) ) try: new_rev = __salt__['git.revision']( cwd=target, user=user, password=password, ignore_retcode=True, output_encoding=output_encoding) except CommandExecutionError: new_rev = None if submodules: __salt__['git.submodule'](target, 'update', opts=['--init', '--recursive'], user=user, password=password, identity=identity, output_encoding=output_encoding) comments.append( 'Submodules were updated' ) if new_rev is not None: ret['changes']['HEAD'] = {'old': local_commit_id, 'new': new_rev} else: comments.append("Already checked out at correct revision") msg = _format_comments(comments) log.info(msg) ret['comment'] = msg return ret def cloned(name, target=None, branch=None, user=None, password=None, identity=None, https_user=None, https_pass=None, output_encoding=None): ''' .. versionadded:: 2018.3.3,2019.2.0 Ensure that a repository has been cloned to the specified target directory. If not, clone that repository. No fetches will be performed once cloned. name Address of the remote repository target Name of the target directory where repository should be cloned branch Remote branch to check out. If unspecified, the default branch (i.e. the one to the remote HEAD points) will be checked out. .. note:: The local branch name will match the remote branch name. If the branch name is changed, then that branch will be checked out locally, but keep in mind that remote repository will not be fetched. If your use case requires that you keep the clone up to date with the remote repository, then consider using :py:func:`git.latest <salt.states.git.latest>`. user User under which to run git commands. By default, commands are run by the user under which the minion is running. password Windows only. Required when specifying ``user``. This parameter will be ignored on non-Windows platforms. identity Path to a private key to use for ssh URLs. Works the same way as in :py:func:`git.latest <salt.states.git.latest>`, see that state's documentation for more information. https_user HTTP Basic Auth username for HTTPS (only) clones https_pass HTTP Basic Auth password for HTTPS (only) clones output_encoding Use this option to specify which encoding to use to decode the output from any git commands which are run. This should not be needed in most cases. .. note:: This should only be needed if the files in the repository were created with filenames using an encoding other than UTF-8 to handle Unicode characters. ''' ret = {'name': name, 'result': False, 'comment': '', 'changes': {}} if target is None: ret['comment'] = '\'target\' argument is required' return ret elif not isinstance(target, six.string_types): target = six.text_type(target) if not os.path.isabs(target): ret['comment'] = '\'target\' path must be absolute' return ret if branch is not None: if not isinstance(branch, six.string_types): branch = six.text_type(branch) if not branch: ret['comment'] = 'Invalid \'branch\' argument' return ret if not os.path.exists(target): need_clone = True else: try: __salt__['git.status'](target, user=user, password=password, output_encoding=output_encoding) except Exception as exc: ret['comment'] = six.text_type(exc) return ret else: need_clone = False comments = [] def _clone_changes(ret): ret['changes']['new'] = name + ' => ' + target def _branch_changes(ret, old, new): ret['changes']['branch'] = {'old': old, 'new': new} if need_clone: if __opts__['test']: _clone_changes(ret) comment = '{0} would be cloned to {1}{2}'.format( name, target, ' with branch \'{0}\''.format(branch) if branch is not None else '' ) return _neutral_test(ret, comment) clone_opts = ['--branch', branch] if branch is not None else None try: __salt__['git.clone'](target, name, opts=clone_opts, user=user, password=password, identity=identity, https_user=https_user, https_pass=https_pass, output_encoding=output_encoding) except CommandExecutionError as exc: msg = 'Clone failed: {0}'.format(_strip_exc(exc)) return _fail(ret, msg, comments) comments.append( '{0} cloned to {1}{2}'.format( name, target, ' with branch \'{0}\''.format(branch) if branch is not None else '' ) ) _clone_changes(ret) ret['comment'] = _format_comments(comments) ret['result'] = True return ret else: if branch is None: return _already_cloned(ret, target, branch, comments) else: current_branch = __salt__['git.current_branch']( target, user=user, password=password, output_encoding=output_encoding) if current_branch == branch: return _already_cloned(ret, target, branch, comments) else: if __opts__['test']: _branch_changes(ret, current_branch, branch) return _neutral_test( ret, 'Branch would be changed to \'{0}\''.format(branch)) try: __salt__['git.rev_parse']( target, rev=branch, user=user, password=password, ignore_retcode=True, output_encoding=output_encoding) except CommandExecutionError: # Local head does not exist, so we need to check out a new # branch at the remote rev checkout_rev = '/'.join(('origin', branch)) checkout_opts = ['-b', branch] else: # Local head exists, so we just need to check it out checkout_rev = branch checkout_opts = None try: __salt__['git.checkout']( target, rev=checkout_rev, opts=checkout_opts, user=user, password=password, output_encoding=output_encoding) except CommandExecutionError as exc: msg = 'Failed to change branch to \'{0}\': {1}'.format(branch, exc) return _fail(ret, msg, comments) else: comments.append('Branch changed to \'{0}\''.format(branch)) _branch_changes(ret, current_branch, branch) ret['comment'] = _format_comments(comments) ret['result'] = True return ret def config_unset(name, value_regex=None, repo=None, user=None, password=None, output_encoding=None, **kwargs): r''' .. versionadded:: 2015.8.0 Ensure that the named config key is not present name The name of the configuration key to unset. This value can be a regex, but the regex must match the entire key name. For example, ``foo\.`` would not match all keys in the ``foo`` section, it would be necessary to use ``foo\..+`` to do so. value_regex Regex indicating the values to unset for the matching key(s) .. note:: This option behaves differently depending on whether or not ``all`` is set to ``True``. If it is, then all values matching the regex will be deleted (this is the only way to delete multiple values from a multivar). If ``all`` is set to ``False``, then this state will fail if the regex matches more than one value in a multivar. all : False If ``True``, unset all matches repo Location of the git repository for which the config value should be set. Required unless ``global`` is set to ``True``. user User under which to run git commands. By default, commands are run by the user under which the minion is running. password Windows only. Required when specifying ``user``. This parameter will be ignored on non-Windows platforms. .. versionadded:: 2016.3.4 global : False If ``True``, this will set a global git config option output_encoding Use this option to specify which encoding to use to decode the output from any git commands which are run. This should not be needed in most cases. .. note:: This should only be needed if the files in the repository were created with filenames using an encoding other than UTF-8 to handle Unicode characters. .. versionadded:: 2018.3.1 **Examples:** .. code-block:: yaml # Value matching 'baz' mylocalrepo: git.config_unset: - name: foo.bar - value_regex: 'baz' - repo: /path/to/repo # Ensure entire multivar is unset mylocalrepo: git.config_unset: - name: foo.bar - all: True # Ensure all variables in 'foo' section are unset, including multivars mylocalrepo: git.config_unset: - name: 'foo\..+' - all: True # Ensure that global config value is unset mylocalrepo: git.config_unset: - name: foo.bar - global: True ''' ret = {'name': name, 'changes': {}, 'result': True, 'comment': 'No matching keys are set'} # Sanitize kwargs and make sure that no invalid ones were passed. This # allows us to accept 'global' as an argument to this function without # shadowing global(), while also not allowing unwanted arguments to be # passed. kwargs = salt.utils.args.clean_kwargs(**kwargs) global_ = kwargs.pop('global', False) all_ = kwargs.pop('all', False) if kwargs: return _fail( ret, salt.utils.args.invalid_kwargs(kwargs, raise_exc=False) ) if not global_ and not repo: return _fail( ret, 'Non-global config options require the \'repo\' argument to be ' 'set' ) if not isinstance(name, six.string_types): name = six.text_type(name) if value_regex is not None: if not isinstance(value_regex, six.string_types): value_regex = six.text_type(value_regex) # Ensure that the key regex matches the full key name key = '^' + name.lstrip('^').rstrip('$') + '$' # Get matching keys/values pre_matches = __salt__['git.config_get_regexp']( cwd=repo, key=key, value_regex=value_regex, user=user, password=password, ignore_retcode=True, output_encoding=output_encoding, **{'global': global_} ) if not pre_matches: # No changes need to be made return ret # Perform sanity check on the matches. We can't proceed if the value_regex # matches more than one value in a given key, and 'all' is not set to True if not all_: greedy_matches = ['{0} ({1})'.format(x, ', '.join(y)) for x, y in six.iteritems(pre_matches) if len(y) > 1] if greedy_matches: if value_regex is not None: return _fail( ret, 'Multiple values are matched by value_regex for the ' 'following keys (set \'all\' to True to force removal): ' '{0}'.format('; '.join(greedy_matches)) ) else: return _fail( ret, 'Multivar(s) matched by the key expression (set \'all\' ' 'to True to force removal): {0}'.format( '; '.join(greedy_matches) ) ) if __opts__['test']: ret['changes'] = pre_matches return _neutral_test( ret, '{0} key(s) would have value(s) unset'.format(len(pre_matches)) ) if value_regex is None: pre = pre_matches else: # Get all keys matching the key expression, so we can accurately report # on changes made. pre = __salt__['git.config_get_regexp']( cwd=repo, key=key, value_regex=None, user=user, password=password, ignore_retcode=True, output_encoding=output_encoding, **{'global': global_} ) failed = [] # Unset the specified value(s). There is no unset for regexes so loop # through the pre_matches dict and unset each matching key individually. for key_name in pre_matches: try: __salt__['git.config_unset']( cwd=repo, key=name, value_regex=value_regex, all=all_, user=user, password=password, output_encoding=output_encoding, **{'global': global_} ) except CommandExecutionError as exc: msg = 'Failed to unset \'{0}\''.format(key_name) if value_regex is not None: msg += ' using value_regex \'{1}\'' msg += ': ' + _strip_exc(exc) log.error(msg) failed.append(key_name) if failed: return _fail( ret, 'Error(s) occurred unsetting values for the following keys (see ' 'the minion log for details): {0}'.format(', '.join(failed)) ) post = __salt__['git.config_get_regexp']( cwd=repo, key=key, value_regex=None, user=user, password=password, ignore_retcode=True, output_encoding=output_encoding, **{'global': global_} ) for key_name in pre: if key_name not in post: ret['changes'][key_name] = pre[key_name] unset = [x for x in pre[key_name] if x not in post[key_name]] if unset: ret['changes'][key_name] = unset if value_regex is None: post_matches = post else: post_matches = __salt__['git.config_get_regexp']( cwd=repo, key=key, value_regex=value_regex, user=user, password=password, ignore_retcode=True, output_encoding=output_encoding, **{'global': global_} ) if post_matches: failed = ['{0} ({1})'.format(x, ', '.join(y)) for x, y in six.iteritems(post_matches)] return _fail( ret, 'Failed to unset value(s): {0}'.format('; '.join(failed)) ) ret['comment'] = 'Value(s) successfully unset' return ret def mod_run_check(cmd_kwargs, onlyif, unless): ''' Execute the onlyif and unless logic. Return a result dict if: * onlyif failed (onlyif != 0) * unless succeeded (unless == 0) Otherwise, returns ``True`` ''' cmd_kwargs = copy.deepcopy(cmd_kwargs) cmd_kwargs.update({ 'use_vt': False, 'bg': False, 'ignore_retcode': True, 'python_shell': True, }) if onlyif is not None: if not isinstance(onlyif, list): onlyif = [onlyif] for command in onlyif: if not isinstance(command, six.string_types) and command: # Boolean or some other non-string which resolves to True continue try: if __salt__['cmd.retcode'](command, **cmd_kwargs) == 0: # Command exited with a zero retcode continue except Exception as exc: log.exception( 'The following onlyif command raised an error: %s', command ) return { 'comment': 'onlyif raised error ({0}), see log for ' 'more details'.format(exc), 'result': False } return {'comment': 'onlyif condition is false', 'skip_watch': True, 'result': True} if unless is not None: if not isinstance(unless, list): unless = [unless] for command in unless: if not isinstance(command, six.string_types) and not command: # Boolean or some other non-string which resolves to False break try: if __salt__['cmd.retcode'](command, **cmd_kwargs) != 0: # Command exited with a non-zero retcode break except Exception as exc: log.exception( 'The following unless command raised an error: %s', command ) return { 'comment': 'unless raised error ({0}), see log for ' 'more details'.format(exc), 'result': False } else: return {'comment': 'unless condition is true', 'skip_watch': True, 'result': True} return True
saltstack/salt
salt/states/git.py
mod_run_check
python
def mod_run_check(cmd_kwargs, onlyif, unless): ''' Execute the onlyif and unless logic. Return a result dict if: * onlyif failed (onlyif != 0) * unless succeeded (unless == 0) Otherwise, returns ``True`` ''' cmd_kwargs = copy.deepcopy(cmd_kwargs) cmd_kwargs.update({ 'use_vt': False, 'bg': False, 'ignore_retcode': True, 'python_shell': True, }) if onlyif is not None: if not isinstance(onlyif, list): onlyif = [onlyif] for command in onlyif: if not isinstance(command, six.string_types) and command: # Boolean or some other non-string which resolves to True continue try: if __salt__['cmd.retcode'](command, **cmd_kwargs) == 0: # Command exited with a zero retcode continue except Exception as exc: log.exception( 'The following onlyif command raised an error: %s', command ) return { 'comment': 'onlyif raised error ({0}), see log for ' 'more details'.format(exc), 'result': False } return {'comment': 'onlyif condition is false', 'skip_watch': True, 'result': True} if unless is not None: if not isinstance(unless, list): unless = [unless] for command in unless: if not isinstance(command, six.string_types) and not command: # Boolean or some other non-string which resolves to False break try: if __salt__['cmd.retcode'](command, **cmd_kwargs) != 0: # Command exited with a non-zero retcode break except Exception as exc: log.exception( 'The following unless command raised an error: %s', command ) return { 'comment': 'unless raised error ({0}), see log for ' 'more details'.format(exc), 'result': False } else: return {'comment': 'unless condition is true', 'skip_watch': True, 'result': True} return True
Execute the onlyif and unless logic. Return a result dict if: * onlyif failed (onlyif != 0) * unless succeeded (unless == 0) Otherwise, returns ``True``
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/git.py#L3441-L3512
null
# -*- coding: utf-8 -*- ''' States to manage git repositories and git configuration .. important:: Before using git over ssh, make sure your remote host fingerprint exists in your ``~/.ssh/known_hosts`` file. .. versionchanged:: 2015.8.8 This state module now requires git 1.6.5 (released 10 October 2009) or newer. ''' from __future__ import absolute_import, print_function, unicode_literals # Import python libs import copy import errno import logging import os import re import string # Import salt libs import salt.utils.args import salt.utils.files import salt.utils.url import salt.utils.versions from salt.exceptions import CommandExecutionError from salt.utils.versions import LooseVersion as _LooseVersion # Import 3rd-party libs from salt.ext import six log = logging.getLogger(__name__) def __virtual__(): ''' Only load if git is available ''' if 'git.version' not in __salt__: return False git_ver = _LooseVersion(__salt__['git.version'](versioninfo=False)) return git_ver >= _LooseVersion('1.6.5') def _revs_equal(rev1, rev2, rev_type): ''' Shorthand helper function for comparing SHA1s. If rev_type == 'sha1' then the comparison will be done using str.startwith() to allow short SHA1s to compare successfully. NOTE: This means that rev2 must be the short rev. ''' if (rev1 is None and rev2 is not None) \ or (rev2 is None and rev1 is not None): return False elif rev1 is rev2 is None: return True elif rev_type == 'sha1': return rev1.startswith(rev2) else: return rev1 == rev2 def _short_sha(sha1): return sha1[:7] if sha1 is not None else None def _format_comments(comments): ''' Return a joined list ''' ret = '. '.join(comments) if len(comments) > 1: ret += '.' return ret def _need_branch_change(branch, local_branch): ''' Short hand for telling when a new branch is needed ''' return branch is not None and branch != local_branch def _get_branch_opts(branch, local_branch, all_local_branches, desired_upstream, git_ver=None): ''' DRY helper to build list of opts for git.branch, for the purposes of setting upstream tracking branch ''' if branch is not None and branch not in all_local_branches: # We won't be setting upstream because the act of checking out a new # branch will set upstream for us return None if git_ver is None: git_ver = _LooseVersion(__salt__['git.version'](versioninfo=False)) ret = [] if git_ver >= _LooseVersion('1.8.0'): ret.extend(['--set-upstream-to', desired_upstream]) else: ret.append('--set-upstream') # --set-upstream does not assume the current branch, so we have to # tell it which branch we'll be using ret.append(local_branch if branch is None else branch) ret.append(desired_upstream) return ret def _get_local_rev_and_branch(target, user, password, output_encoding=None): ''' Return the local revision for before/after comparisons ''' log.info('Checking local revision for %s', target) try: local_rev = __salt__['git.revision']( target, user=user, password=password, ignore_retcode=True, output_encoding=output_encoding) except CommandExecutionError: log.info('No local revision for %s', target) local_rev = None log.info('Checking local branch for %s', target) try: local_branch = __salt__['git.current_branch']( target, user=user, password=password, ignore_retcode=True, output_encoding=output_encoding) except CommandExecutionError: log.info('No local branch for %s', target) local_branch = None return local_rev, local_branch def _strip_exc(exc): ''' Strip the actual command that was run from exc.strerror to leave just the error message ''' return re.sub(r'^Command [\'"].+[\'"] failed: ', '', exc.strerror) def _uptodate(ret, target, comments=None, local_changes=False): ret['comment'] = 'Repository {0} is up-to-date'.format(target) if local_changes: ret['comment'] += ( ', but with uncommitted changes. Set \'force_reset\' to True to ' 'purge uncommitted changes.' ) if comments: # Shouldn't be making any changes if the repo was up to date, but # report on them so we are alerted to potential problems with our # logic. ret['comment'] += ( '\n\nChanges {0}made: {1}'.format( 'that would be ' if __opts__['test'] else '', _format_comments(comments) ) ) return ret def _neutral_test(ret, comment): ret['result'] = None ret['comment'] = comment return ret def _fail(ret, msg, comments=None): ret['result'] = False if comments: msg += '\n\nChanges already made: ' + _format_comments(comments) ret['comment'] = msg return ret def _already_cloned(ret, target, branch=None, comments=None): ret['result'] = True ret['comment'] = 'Repository already exists at {0}{1}'.format( target, ' and is checked out to branch \'{0}\''.format(branch) if branch else '' ) if comments: ret['comment'] += ( '\n\nChanges {0}made: {1}'.format( 'that would be ' if __opts__['test'] else '', _format_comments(comments) ) ) return ret def _failed_fetch(ret, exc, comments=None): msg = ( 'Fetch failed. Set \'force_fetch\' to True to force the fetch if the ' 'failure was due to not being able to fast-forward. Output of the fetch ' 'command follows:\n\n{0}'.format(_strip_exc(exc)) ) return _fail(ret, msg, comments) def _failed_submodule_update(ret, exc, comments=None): msg = 'Failed to update submodules: ' + _strip_exc(exc) return _fail(ret, msg, comments) def _not_fast_forward(ret, rev, pre, post, branch, local_branch, default_branch, local_changes, comments): branch_msg = '' if branch is None: if rev != 'HEAD': if local_branch != rev: branch_msg = ( ' The desired rev ({0}) differs from the name of the ' 'local branch ({1}), if the desired rev is a branch name ' 'then a forced update could possibly be avoided by ' 'setting the \'branch\' argument to \'{0}\' instead.' .format(rev, local_branch) ) else: if default_branch is not None and local_branch != default_branch: branch_msg = ( ' The default remote branch ({0}) differs from the ' 'local branch ({1}). This could be caused by changing the ' 'default remote branch, or if the local branch was ' 'manually changed. Rather than forcing an update, it ' 'may be advisable to set the \'branch\' argument to ' '\'{0}\' instead. To ensure that this state follows the ' '\'{0}\' branch instead of the remote HEAD, set the ' '\'rev\' argument to \'{0}\'.' .format(default_branch, local_branch) ) pre = _short_sha(pre) post = _short_sha(post) return _fail( ret, 'Repository would be updated {0}{1}, but {2}. Set \'force_reset\' to ' 'True{3} to force this update{4}.{5}'.format( 'from {0} to {1}'.format(pre, post) if local_changes and pre != post else 'to {0}'.format(post), ' (after checking out local branch \'{0}\')'.format(branch) if _need_branch_change(branch, local_branch) else '', 'this is not a fast-forward merge' if not local_changes else 'there are uncommitted changes', ' (or \'remote-changes\')' if local_changes else '', ' and discard these changes' if local_changes else '', branch_msg, ), comments ) def latest(name, rev='HEAD', target=None, branch=None, user=None, password=None, update_head=True, force_checkout=False, force_clone=False, force_fetch=False, force_reset=False, submodules=False, bare=False, mirror=False, remote='origin', fetch_tags=True, sync_tags=True, depth=None, identity=None, https_user=None, https_pass=None, onlyif=None, unless=None, refspec_branch='*', refspec_tag='*', output_encoding=None, **kwargs): ''' Make sure the repository is cloned to the given directory and is up-to-date. name Address of the remote repository, as passed to ``git clone`` .. note:: From the `Git documentation`_, there are two URL formats supported for SSH authentication. The below two examples are equivalent: .. code-block:: text # ssh:// URL ssh://user@server/project.git # SCP-like syntax user@server:project.git A common mistake is to use an ``ssh://`` URL, but with a colon after the domain instead of a slash. This is invalid syntax in Git, and will therefore not work in Salt. When in doubt, confirm that a ``git clone`` works for the URL before using it in Salt. It has been reported by some users that SCP-like syntax is incompatible with git repos hosted on `Atlassian Stash/BitBucket Server`_. In these cases, it may be necessary to use ``ssh://`` URLs for SSH authentication. .. _`Git documentation`: https://git-scm.com/book/en/v2/Git-on-the-Server-The-Protocols#The-SSH-Protocol .. _`Atlassian Stash/BitBucket Server`: https://www.atlassian.com/software/bitbucket/server rev : HEAD The remote branch, tag, or revision ID to checkout after clone / before update. If specified, then Salt will also ensure that the tracking branch is set to ``<remote>/<rev>``, unless ``rev`` refers to a tag or SHA1, in which case Salt will ensure that the tracking branch is unset. If ``rev`` is not specified, it will be assumed to be ``HEAD``, and Salt will not manage the tracking branch at all. .. versionchanged:: 2015.8.0 If not specified, ``rev`` now defaults to the remote repository's HEAD. target Name of the target directory where repository is about to be cloned branch Name of the local branch into which to checkout the specified rev. If not specified, then Salt will not care what branch is being used locally and will just use whatever branch is currently there. .. versionadded:: 2015.8.0 .. note:: If this argument is not specified, this means that Salt will not change the local branch if the repository is reset to another branch/tag/SHA1. For example, assume that the following state was run initially: .. code-block:: yaml foo_app: git.latest: - name: https://mydomain.tld/apps/foo.git - target: /var/www/foo - user: www This would have cloned the HEAD of that repo (since a ``rev`` wasn't specified), and because ``branch`` is not specified, the branch in the local clone at ``/var/www/foo`` would be whatever the default branch is on the remote repository (usually ``master``, but not always). Now, assume that it becomes necessary to switch this checkout to the ``dev`` branch. This would require ``rev`` to be set, and probably would also require ``force_reset`` to be enabled: .. code-block:: yaml foo_app: git.latest: - name: https://mydomain.tld/apps/foo.git - target: /var/www/foo - user: www - rev: dev - force_reset: True The result of this state would be to perform a hard-reset to ``origin/dev``. Since ``branch`` was not specified though, while ``/var/www/foo`` would reflect the contents of the remote repo's ``dev`` branch, the local branch would still remain whatever it was when it was cloned. To make the local branch match the remote one, set ``branch`` as well, like so: .. code-block:: yaml foo_app: git.latest: - name: https://mydomain.tld/apps/foo.git - target: /var/www/foo - user: www - rev: dev - branch: dev - force_reset: True This may seem redundant, but Salt tries to support a wide variety of use cases, and doing it this way allows for the use case where the local branch doesn't need to be strictly managed. user Local system user under which to run git commands. By default, commands are run by the user under which the minion is running. .. note:: This is not to be confused with the username for http(s)/SSH authentication. .. versionadded:: 0.17.0 password Windows only. Required when specifying ``user``. This parameter will be ignored on non-Windows platforms. .. versionadded:: 2016.3.4 update_head : True If set to ``False``, then the remote repository will be fetched (if necessary) to ensure that the commit to which ``rev`` points exists in the local checkout, but no changes will be made to the local HEAD. .. versionadded:: 2015.8.3 force_checkout : False When checking out the local branch, the state will fail if there are unwritten changes. Set this argument to ``True`` to discard unwritten changes when checking out. force_clone : False If the ``target`` directory exists and is not a git repository, then this state will fail. Set this argument to ``True`` to remove the contents of the target directory and clone the repo into it. force_fetch : False If a fetch needs to be performed, non-fast-forward fetches will cause this state to fail. Set this argument to ``True`` to force the fetch even if it is a non-fast-forward update. .. versionadded:: 2015.8.0 force_reset : False If the update is not a fast-forward, this state will fail. Set this argument to ``True`` to force a hard-reset to the remote revision in these cases. .. versionchanged:: 2019.2.0 This option can now be set to ``remote-changes``, which will instruct Salt not to discard local changes if the repo is up-to-date with the remote repository. submodules : False Update submodules on clone or branch change bare : False Set to ``True`` if the repository is to be a bare clone of the remote repository. .. note: Setting this option to ``True`` is incompatible with the ``rev`` argument. mirror Set to ``True`` if the repository is to be a mirror of the remote repository. This implies that ``bare`` set to ``True``, and thus is incompatible with ``rev``. remote : origin Git remote to use. If this state needs to clone the repo, it will clone it using this value as the initial remote name. If the repository already exists, and a remote by this name is not present, one will be added. fetch_tags : True If ``True``, then when a fetch is performed all tags will be fetched, even those which are not reachable by any branch on the remote. sync_tags : True If ``True``, then Salt will delete tags which exist in the local clone but are not found on the remote repository. .. versionadded:: 2018.3.4 depth Defines depth in history when git a clone is needed in order to ensure latest. E.g. ``depth: 1`` is useful when deploying from a repository with a long history. Use rev to specify branch or tag. This is not compatible with revision IDs. .. versionchanged:: 2019.2.0 This option now supports tags as well as branches, on Git 1.8.0 and newer. identity Path to a private key to use for ssh URLs. This can be either a single string, or a list of strings. For example: .. code-block:: yaml # Single key git@github.com:user/repo.git: git.latest: - user: deployer - identity: /home/deployer/.ssh/id_rsa # Two keys git@github.com:user/repo.git: git.latest: - user: deployer - identity: - /home/deployer/.ssh/id_rsa - /home/deployer/.ssh/id_rsa_alternate If multiple keys are specified, they will be tried one-by-one in order for each git command which needs to authenticate. .. warning:: Unless Salt is invoked from the minion using ``salt-call``, the key(s) must be passphraseless. For greater security with passphraseless private keys, see the `sshd(8)`_ manpage for information on securing the keypair from the remote side in the ``authorized_keys`` file. .. _`sshd(8)`: http://www.man7.org/linux/man-pages/man8/sshd.8.html#AUTHORIZED_KEYS_FILE%20FORMAT .. versionchanged:: 2015.8.7 Salt will no longer attempt to use passphrase-protected keys unless invoked from the minion using ``salt-call``, to prevent blocking waiting for user input. .. versionchanged:: 2016.3.0 Key can now be specified as a SaltStack fileserver URL (e.g. ``salt://path/to/identity_file``). https_user HTTP Basic Auth username for HTTPS (only) clones .. versionadded:: 2015.5.0 https_pass HTTP Basic Auth password for HTTPS (only) clones .. versionadded:: 2015.5.0 onlyif A command to run as a check, run the named command only if the command passed to the ``onlyif`` option returns true unless A command to run as a check, only run the named command if the command passed to the ``unless`` option returns false refspec_branch : * A glob expression defining which branches to retrieve when fetching. See `git-fetch(1)`_ for more information on how refspecs work. .. versionadded:: 2017.7.0 refspec_tag : * A glob expression defining which tags to retrieve when fetching. See `git-fetch(1)`_ for more information on how refspecs work. .. versionadded:: 2017.7.0 output_encoding Use this option to specify which encoding to use to decode the output from any git commands which are run. This should not be needed in most cases. .. note:: This should only be needed if the files in the repository were created with filenames using an encoding other than UTF-8 to handle Unicode characters. .. versionadded:: 2018.3.1 .. _`git-fetch(1)`: http://git-scm.com/docs/git-fetch .. note:: Clashing ID declarations can be avoided when including different branches from the same git repository in the same SLS file by using the ``name`` argument. The example below checks out the ``gh-pages`` and ``gh-pages-prod`` branches from the same repository into separate directories. The example also sets up the ``ssh_known_hosts`` ssh key required to perform the git checkout. Also, it has been reported that the SCP-like syntax for .. code-block:: yaml gitlab.example.com: ssh_known_hosts: - present - user: root - enc: ecdsa - fingerprint: 4e:94:b0:54:c1:5b:29:a2:70:0e:e1:a3:51:ee:ee:e3 git-website-staging: git.latest: - name: git@gitlab.example.com:user/website.git - rev: gh-pages - target: /usr/share/nginx/staging - identity: /root/.ssh/website_id_rsa - require: - pkg: git - ssh_known_hosts: gitlab.example.com git-website-staging: git.latest: - name: git@gitlab.example.com:user/website.git - rev: gh-pages - target: /usr/share/nginx/staging - identity: salt://website/id_rsa - require: - pkg: git - ssh_known_hosts: gitlab.example.com git-website-prod: git.latest: - name: git@gitlab.example.com:user/website.git - rev: gh-pages-prod - target: /usr/share/nginx/prod - identity: /root/.ssh/website_id_rsa - require: - pkg: git - ssh_known_hosts: gitlab.example.com ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} kwargs = salt.utils.args.clean_kwargs(**kwargs) if kwargs: return _fail( ret, salt.utils.args.invalid_kwargs(kwargs, raise_exc=False) ) if not remote: return _fail(ret, '\'remote\' argument is required') if not target: return _fail(ret, '\'target\' argument is required') if not rev: return _fail( ret, '\'{0}\' is not a valid value for the \'rev\' argument'.format(rev) ) if force_reset not in (True, False, 'remote-changes'): return _fail( ret, '\'force_reset\' must be one of True, False, or \'remote-changes\'' ) # Ensure that certain arguments are strings to ensure that comparisons work if not isinstance(rev, six.string_types): rev = six.text_type(rev) if target is not None: if not isinstance(target, six.string_types): target = six.text_type(target) if not os.path.isabs(target): return _fail( ret, 'target \'{0}\' is not an absolute path'.format(target) ) if branch is not None and not isinstance(branch, six.string_types): branch = six.text_type(branch) if user is not None and not isinstance(user, six.string_types): user = six.text_type(user) if password is not None and not isinstance(password, six.string_types): password = six.text_type(password) if remote is not None and not isinstance(remote, six.string_types): remote = six.text_type(remote) if identity is not None: if isinstance(identity, six.string_types): identity = [identity] elif not isinstance(identity, list): return _fail(ret, 'identity must be either a list or a string') identity = [os.path.expanduser(x) for x in identity] for ident_path in identity: if 'salt://' in ident_path: try: ident_path = __salt__['cp.cache_file'](ident_path, __env__) except IOError as exc: log.exception('Failed to cache %s', ident_path) return _fail( ret, 'identity \'{0}\' does not exist.'.format( ident_path ) ) if not os.path.isabs(ident_path): return _fail( ret, 'identity \'{0}\' is not an absolute path'.format( ident_path ) ) if https_user is not None and not isinstance(https_user, six.string_types): https_user = six.text_type(https_user) if https_pass is not None and not isinstance(https_pass, six.string_types): https_pass = six.text_type(https_pass) # Check for lfs filter settings, and setup lfs_opts accordingly. These opts # will be passed where appropriate to ensure that these commands are # authenticated and that the git LFS plugin can download files. use_lfs = bool( __salt__['git.config_get_regexp']( r'filter\.lfs\.', **{'global': True})) lfs_opts = {'identity': identity} if use_lfs else {} if os.path.isfile(target): return _fail( ret, 'Target \'{0}\' exists and is a regular file, cannot proceed' .format(target) ) try: desired_fetch_url = salt.utils.url.add_http_basic_auth( name, https_user, https_pass, https_only=True ) except ValueError as exc: return _fail(ret, exc.__str__()) redacted_fetch_url = \ salt.utils.url.redact_http_basic_auth(desired_fetch_url) if mirror: bare = True # Check to make sure rev and mirror/bare are not both in use if rev != 'HEAD' and bare: return _fail(ret, ('\'rev\' is not compatible with the \'mirror\' and ' '\'bare\' arguments')) run_check_cmd_kwargs = {'runas': user, 'password': password} if 'shell' in __grains__: run_check_cmd_kwargs['shell'] = __grains__['shell'] # check if git.latest should be applied cret = mod_run_check( run_check_cmd_kwargs, onlyif, unless ) if isinstance(cret, dict): ret.update(cret) return ret refspecs = [ 'refs/heads/{0}:refs/remotes/{1}/{0}'.format(refspec_branch, remote), '+refs/tags/{0}:refs/tags/{0}'.format(refspec_tag) ] if fetch_tags else [] log.info('Checking remote revision for %s', name) try: all_remote_refs = __salt__['git.remote_refs']( name, heads=False, tags=False, user=user, password=password, identity=identity, https_user=https_user, https_pass=https_pass, ignore_retcode=False, saltenv=__env__, output_encoding=output_encoding) except CommandExecutionError as exc: return _fail( ret, 'Failed to check remote refs: {0}'.format(_strip_exc(exc)) ) except NameError as exc: if 'global name' in exc.message: raise CommandExecutionError( 'Failed to check remote refs: You may need to install ' 'GitPython or PyGit2') raise if 'HEAD' in all_remote_refs: head_rev = all_remote_refs['HEAD'] for refname, refsha in six.iteritems(all_remote_refs): if refname.startswith('refs/heads/'): if refsha == head_rev: default_branch = refname.partition('refs/heads/')[-1] break else: default_branch = None else: head_rev = None default_branch = None desired_upstream = False if bare: remote_rev = None remote_rev_type = None else: if rev == 'HEAD': if head_rev is not None: remote_rev = head_rev # Just go with whatever the upstream currently is desired_upstream = None remote_rev_type = 'sha1' else: # Empty remote repo remote_rev = None remote_rev_type = None elif 'refs/heads/' + rev in all_remote_refs: remote_rev = all_remote_refs['refs/heads/' + rev] desired_upstream = '/'.join((remote, rev)) remote_rev_type = 'branch' elif 'refs/tags/' + rev + '^{}' in all_remote_refs: # Annotated tag remote_rev = all_remote_refs['refs/tags/' + rev + '^{}'] remote_rev_type = 'tag' elif 'refs/tags/' + rev in all_remote_refs: # Non-annotated tag remote_rev = all_remote_refs['refs/tags/' + rev] remote_rev_type = 'tag' else: if len(rev) <= 40 \ and all(x in string.hexdigits for x in rev): # git ls-remote did not find the rev, and because it's a # hex string <= 40 chars we're going to assume that the # desired rev is a SHA1 rev = rev.lower() remote_rev = rev remote_rev_type = 'sha1' else: remote_rev = None remote_rev_type = None # For the comment field of the state return dict, the remote location # (and short-sha1, if rev is not a sha1) is referenced several times, # determine it once here and reuse the value below. if remote_rev_type == 'sha1': if rev == 'HEAD': remote_loc = 'remote HEAD (' + remote_rev[:7] + ')' else: remote_loc = remote_rev[:7] elif remote_rev is not None: remote_loc = '{0} ({1})'.format( desired_upstream if remote_rev_type == 'branch' else rev, remote_rev[:7] ) else: # Shouldn't happen but log a warning here for future # troubleshooting purposes in the event we find a corner case. log.warning( 'Unable to determine remote_loc. rev is %s, remote_rev is ' '%s, remove_rev_type is %s, desired_upstream is %s, and bare ' 'is%s set', rev, remote_rev, remote_rev_type, desired_upstream, ' not' if not bare else '' ) remote_loc = None if depth is not None and remote_rev_type not in ('branch', 'tag'): return _fail( ret, 'When \'depth\' is used, \'rev\' must be set to the name of a ' 'branch or tag on the remote repository' ) if remote_rev is None and not bare: if rev != 'HEAD': # A specific rev is desired, but that rev doesn't exist on the # remote repo. return _fail( ret, 'No revision matching \'{0}\' exists in the remote ' 'repository'.format(rev) ) git_ver = _LooseVersion(__salt__['git.version'](versioninfo=False)) check = 'refs' if bare else '.git' gitdir = os.path.join(target, check) comments = [] if os.path.isdir(gitdir) \ or __salt__['git.is_worktree']( target, user=user, password=password, output_encoding=output_encoding): # Target directory is a git repository or git worktree try: all_local_branches = __salt__['git.list_branches']( target, user=user, password=password, output_encoding=output_encoding) all_local_tags = set( __salt__['git.list_tags']( target, user=user, password=password, output_encoding=output_encoding ) ) local_rev, local_branch = _get_local_rev_and_branch( target, user, password, output_encoding) if not bare and remote_rev is None and local_rev is not None: return _fail( ret, 'Remote repository is empty, cannot update from a ' 'non-empty to an empty repository' ) # Base rev and branch are the ones from which any reset or merge # will take place. If the branch is not being specified, the base # will be the "local" rev and branch, i.e. those we began with # before this state was run. If a branch is being specified and it # both exists and is not the one with which we started, then we'll # be checking that branch out first, and it instead becomes our # base. The base branch and rev will be used below in comparisons # to determine what changes to make. base_rev = local_rev base_branch = local_branch if _need_branch_change(branch, local_branch): if branch not in all_local_branches: # We're checking out a new branch, so the base_rev and # remote_rev will be identical. base_rev = remote_rev else: base_branch = branch # Desired branch exists locally and is not the current # branch. We'll be performing a checkout to that branch # eventually, but before we do that we need to find the # current SHA1. try: base_rev = __salt__['git.rev_parse']( target, branch + '^{commit}', user=user, password=password, ignore_retcode=True, output_encoding=output_encoding) except CommandExecutionError as exc: return _fail( ret, 'Unable to get position of local branch \'{0}\': ' '{1}'.format(branch, _strip_exc(exc)), comments ) remotes = __salt__['git.remotes'](target, user=user, password=password, redact_auth=False, output_encoding=output_encoding) revs_match = _revs_equal(local_rev, remote_rev, remote_rev_type) try: # If not a bare repo, check `git diff HEAD` to determine if # there are local changes. local_changes = bool( not bare and __salt__['git.diff'](target, 'HEAD', user=user, password=password, output_encoding=output_encoding) ) except CommandExecutionError: # No need to capture the error and log it, the _git_run() # helper in the git execution module will have already logged # the output from the command. log.warning( 'git.latest: Unable to determine if %s has local changes', target ) local_changes = False if local_changes and revs_match: if force_reset is True: msg = ( '{0} is up-to-date, but with uncommitted changes. ' 'Since \'force_reset\' is set to True, these local ' 'changes would be reset. To only reset when there are ' 'changes in the remote repository, set ' '\'force_reset\' to \'remote-changes\'.'.format(target) ) if __opts__['test']: ret['changes']['forced update'] = True if comments: msg += _format_comments(comments) return _neutral_test(ret, msg) log.debug(msg.replace('would', 'will')) else: log.debug( '%s up-to-date, but with uncommitted changes. Since ' '\'force_reset\' is set to %s, no changes will be ' 'made.', target, force_reset ) return _uptodate(ret, target, _format_comments(comments), local_changes) if remote_rev_type == 'sha1' \ and base_rev is not None \ and base_rev.startswith(remote_rev): # Either we're already checked out to the branch we need and it # is up-to-date, or the branch to which we need to switch is # on the same SHA1 as the desired remote revision. Either way, # we know we have the remote rev present already and no fetch # will be needed. has_remote_rev = True else: has_remote_rev = False if remote_rev is not None: try: __salt__['git.rev_parse']( target, remote_rev + '^{commit}', user=user, password=password, ignore_retcode=True, output_encoding=output_encoding) except CommandExecutionError: # Local checkout doesn't have the remote_rev pass else: # The object might exist enough to get a rev-parse to # work, while the local ref could have been # deleted/changed/force updated. Do some further sanity # checks to determine if we really do have the # remote_rev. if remote_rev_type == 'branch': if remote in remotes: try: # Do a rev-parse on <remote>/<rev> to get # the local SHA1 for it, so we can compare # it to the remote_rev SHA1. local_copy = __salt__['git.rev_parse']( target, desired_upstream, user=user, password=password, ignore_retcode=True, output_encoding=output_encoding) except CommandExecutionError: pass else: # If the SHA1s don't match, then the remote # branch was force-updated, and we need to # fetch to update our local copy the ref # for the remote branch. If they do match, # then we have the remote_rev and don't # need to fetch. if local_copy == remote_rev: has_remote_rev = True elif remote_rev_type == 'tag': if rev in all_local_tags: try: local_tag_sha1 = __salt__['git.rev_parse']( target, rev + '^{commit}', user=user, password=password, ignore_retcode=True, output_encoding=output_encoding) except CommandExecutionError: # Shouldn't happen if the tag exists # locally but account for this just in # case. local_tag_sha1 = None if local_tag_sha1 == remote_rev: has_remote_rev = True else: if not force_reset: # SHA1 of tag on remote repo is # different than local tag. Unless # we're doing a hard reset then we # don't need to proceed as we know that # the fetch will update the tag and the # only way to make the state succeed is # to reset the branch to point at the # tag's new location. return _fail( ret, '\'{0}\' is a tag, but the remote ' 'SHA1 for this tag ({1}) doesn\'t ' 'match the local SHA1 ({2}). Set ' '\'force_reset\' to True to force ' 'this update.'.format( rev, _short_sha(remote_rev), _short_sha(local_tag_sha1) ) ) elif remote_rev_type == 'sha1': has_remote_rev = True # If fast_forward is not boolean, then we don't yet know if this # will be a fast forward or not, because a fetch is required. fast_forward = False \ if (local_changes and force_reset != 'remote-changes') \ else None if has_remote_rev: if (not revs_match and not update_head) \ and (branch is None or branch == local_branch): ret['comment'] = ( '{0} is already present and local HEAD ({1}) does not ' 'match, but update_head=False. HEAD has not been ' 'updated locally.'.format( remote_loc.capitalize() if rev == 'HEAD' else remote_loc, local_rev[:7] ) ) return ret # No need to check if this is a fast_forward if we already know # that it won't be (due to local changes). if fast_forward is not False: if base_rev is None: # If we're here, the remote_rev exists in the local # checkout but there is still no HEAD locally. A # possible reason for this is that an empty repository # existed there and a remote was added and fetched, but # the repository was not fast-forwarded. Regardless, # going from no HEAD to a locally-present rev is # considered a fast-forward update. fast_forward = True else: fast_forward = __salt__['git.merge_base']( target, refs=[base_rev, remote_rev], is_ancestor=True, user=user, password=password, ignore_retcode=True, output_encoding=output_encoding) if fast_forward is False: if force_reset is False: return _not_fast_forward( ret, rev, base_rev, remote_rev, branch, local_branch, default_branch, local_changes, comments) merge_action = 'hard-reset' elif fast_forward is True: merge_action = 'fast-forwarded' else: merge_action = 'updated' if base_branch is None: # No local branch, no upstream tracking branch upstream = None else: try: upstream = __salt__['git.rev_parse']( target, base_branch + '@{upstream}', opts=['--abbrev-ref'], user=user, password=password, ignore_retcode=True, output_encoding=output_encoding) except CommandExecutionError: # There is a local branch but the rev-parse command # failed, so that means there is no upstream tracking # branch. This could be because it is just not set, or # because the branch was checked out to a SHA1 or tag # instead of a branch. Set upstream to False to make a # distinction between the case above where there is no # local_branch (when the local checkout is an empty # repository). upstream = False if remote in remotes: fetch_url = remotes[remote]['fetch'] else: log.debug( 'Remote \'%s\' not found in git checkout at %s', remote, target ) fetch_url = None if remote_rev is not None and desired_fetch_url != fetch_url: if __opts__['test']: actions = [ 'Remote \'{0}\' would be changed from {1} to {2}' .format( remote, salt.utils.url.redact_http_basic_auth(fetch_url), redacted_fetch_url ) ] if not has_remote_rev: actions.append('Remote would be fetched') if not revs_match: if update_head: ret['changes']['revision'] = { 'old': local_rev, 'new': remote_rev } if fast_forward is False: ret['changes']['forced update'] = True actions.append( 'Repository would be {0} to {1}'.format( merge_action, _short_sha(remote_rev) ) ) if ret['changes']: return _neutral_test(ret, _format_comments(actions)) else: if not revs_match and not update_head: # Repo content would not be modified but the remote # URL would be modified, so we can't just say that # the repo is up-to-date, we need to inform the # user of the actions taken. ret['comment'] = _format_comments(actions) return ret return _uptodate(ret, target, _format_comments(actions)) # The fetch_url for the desired remote does not match the # specified URL (or the remote does not exist), so set the # remote URL. __salt__['git.remote_set'](target, url=name, remote=remote, user=user, password=password, https_user=https_user, https_pass=https_pass, output_encoding=output_encoding) if fetch_url is None: comments.append( 'Remote \'{0}\' set to {1}'.format( remote, redacted_fetch_url ) ) ret['changes']['new'] = name + ' => ' + remote else: comments.append( 'Remote \'{0}\' changed from {1} to {2}'.format( remote, salt.utils.url.redact_http_basic_auth(fetch_url), redacted_fetch_url ) ) if remote_rev is not None: if __opts__['test']: actions = [] if not has_remote_rev: actions.append( 'Remote \'{0}\' would be fetched'.format(remote) ) if (not revs_match) \ and (update_head or (branch is not None and branch != local_branch)): ret['changes']['revision'] = { 'old': local_rev, 'new': remote_rev } if _need_branch_change(branch, local_branch): if branch not in all_local_branches: actions.append( 'New branch \'{0}\' would be checked ' 'out, with {1} as a starting ' 'point'.format(branch, remote_loc) ) if desired_upstream: actions.append( 'Tracking branch would be set to {0}' .format(desired_upstream) ) else: actions.append( 'Branch \'{0}\' would be checked out ' 'and {1} to {2}'.format( branch, merge_action, _short_sha(remote_rev) ) ) else: if not revs_match: if update_head: if fast_forward is True: actions.append( 'Repository would be fast-forwarded from ' '{0} to {1}'.format( _short_sha(local_rev), _short_sha(remote_rev) ) ) else: actions.append( 'Repository would be {0} from {1} to {2}' .format( 'hard-reset' if force_reset and has_remote_rev else 'updated', _short_sha(local_rev), _short_sha(remote_rev) ) ) else: actions.append( 'Local HEAD ({0}) does not match {1} but ' 'update_head=False, HEAD would not be ' 'updated locally'.format( local_rev[:7], remote_loc ) ) # Check if upstream needs changing if not upstream and desired_upstream: actions.append( 'Tracking branch would be set to {0}'.format( desired_upstream ) ) elif upstream and desired_upstream is False: actions.append( 'Tracking branch would be unset' ) elif desired_upstream and upstream != desired_upstream: actions.append( 'Tracking branch would be ' 'updated to {0}'.format(desired_upstream) ) if ret['changes']: return _neutral_test(ret, _format_comments(actions)) else: formatted_actions = _format_comments(actions) if not revs_match \ and not update_head \ and formatted_actions: ret['comment'] = formatted_actions return ret return _uptodate(ret, target, _format_comments(actions)) if not upstream and desired_upstream: upstream_action = ( 'Tracking branch was set to {0}'.format( desired_upstream ) ) branch_opts = _get_branch_opts( branch, local_branch, all_local_branches, desired_upstream, git_ver) elif upstream and desired_upstream is False: # If the remote_rev is a tag or SHA1, and there is an # upstream tracking branch, we will unset it. However, we # can only do this if the git version is 1.8.0 or newer, as # the --unset-upstream option was not added until that # version. if git_ver >= _LooseVersion('1.8.0'): upstream_action = 'Tracking branch was unset' branch_opts = ['--unset-upstream'] else: branch_opts = None elif desired_upstream and upstream != desired_upstream: upstream_action = ( 'Tracking branch was updated to {0}'.format( desired_upstream ) ) branch_opts = _get_branch_opts( branch, local_branch, all_local_branches, desired_upstream, git_ver) else: branch_opts = None if branch_opts is not None and local_branch is None: return _fail( ret, 'Cannot set/unset upstream tracking branch, local ' 'HEAD refers to nonexistent branch. This may have ' 'been caused by cloning a remote repository for which ' 'the default branch was renamed or deleted. If you ' 'are unable to fix the remote repository, you can ' 'work around this by setting the \'branch\' argument ' '(which will ensure that the named branch is created ' 'if it does not already exist).', comments ) remote_tags = set([ x.replace('refs/tags/', '') for x in __salt__['git.ls_remote']( cwd=target, remote=remote, opts="--tags", user=user, password=password, identity=identity, saltenv=__env__, ignore_retcode=True, output_encoding=output_encoding) if '^{}' not in x ]) if all_local_tags != remote_tags: has_remote_rev = False new_tags = remote_tags - all_local_tags deleted_tags = all_local_tags - remote_tags if new_tags: ret['changes']['new_tags'] = new_tags if sync_tags and deleted_tags: # Delete the local copy of the tags to keep up with the # remote repository. for tag_name in deleted_tags: try: if not __opts__['test']: __salt__['git.tag']( target, tag_name, opts='-d', user=user, password=password, output_encoding=output_encoding) except CommandExecutionError as exc: ret.setdefault('warnings', []).append( 'Failed to remove local tag \'{0}\':\n\n' '{1}\n\n'.format(tag_name, exc) ) else: ret['changes'].setdefault( 'deleted_tags', []).append(tag_name) if ret['changes'].get('deleted_tags'): comments.append( 'The following tags {0} removed from the local ' 'checkout: {1}'.format( 'would be' if __opts__['test'] else 'were', ', '.join(ret['changes']['deleted_tags']) ) ) if not has_remote_rev: try: fetch_changes = __salt__['git.fetch']( target, remote=remote, force=force_fetch, refspecs=refspecs, user=user, password=password, identity=identity, saltenv=__env__, output_encoding=output_encoding) except CommandExecutionError as exc: return _failed_fetch(ret, exc, comments) else: if fetch_changes: comments.append( '{0} was fetched, resulting in updated ' 'refs'.format(name) ) try: __salt__['git.rev_parse']( target, remote_rev + '^{commit}', user=user, password=password, ignore_retcode=True, output_encoding=output_encoding) except CommandExecutionError as exc: return _fail( ret, 'Fetch did not successfully retrieve rev \'{0}\' ' 'from {1}: {2}'.format(rev, name, exc) ) if (not revs_match and not update_head) \ and (branch is None or branch == local_branch): # Rev now exists locally (was fetched), and since we're # not updating HEAD we'll just exit here. ret['comment'] = remote_loc.capitalize() \ if rev == 'HEAD' \ else remote_loc ret['comment'] += ( ' is already present and local HEAD ({0}) does not ' 'match, but update_head=False. HEAD has not been ' 'updated locally.'.format(local_rev[:7]) ) return ret # Now that we've fetched, check again whether or not # the update is a fast-forward. if base_rev is None: fast_forward = True else: fast_forward = __salt__['git.merge_base']( target, refs=[base_rev, remote_rev], is_ancestor=True, user=user, password=password, output_encoding=output_encoding) if fast_forward is force_reset is False \ or (fast_forward is True and local_changes and force_reset is False): return _not_fast_forward( ret, rev, base_rev, remote_rev, branch, local_branch, default_branch, local_changes, comments) if _need_branch_change(branch, local_branch): if local_changes and not force_checkout: return _fail( ret, 'Local branch \'{0}\' has uncommitted ' 'changes. Set \'force_checkout\' to True to ' 'discard them and proceed.'.format(local_branch) ) # TODO: Maybe re-retrieve all_local_branches to handle # the corner case where the destination branch was # added to the local checkout during a fetch that takes # a long time to complete. if branch not in all_local_branches: if rev == 'HEAD': checkout_rev = remote_rev else: checkout_rev = desired_upstream \ if desired_upstream \ else rev checkout_opts = ['-b', branch] else: checkout_rev = branch checkout_opts = [] __salt__['git.checkout'](target, checkout_rev, force=force_checkout, opts=checkout_opts, user=user, password=password, output_encoding=output_encoding) if '-b' in checkout_opts: comments.append( 'New branch \'{0}\' was checked out, with {1} ' 'as a starting point'.format( branch, remote_loc ) ) else: comments.append( '\'{0}\' was checked out'.format(checkout_rev) ) if fast_forward is False: __salt__['git.reset']( target, opts=['--hard', remote_rev], user=user, password=password, output_encoding=output_encoding, **lfs_opts) ret['changes']['forced update'] = True if local_changes: comments.append('Uncommitted changes were discarded') comments.append( 'Repository was hard-reset to {0}'.format(remote_loc) ) elif fast_forward is True \ and local_changes \ and force_reset is not False: __salt__['git.discard_local_changes']( target, user=user, password=password, output_encoding=output_encoding) comments.append('Uncommitted changes were discarded') if branch_opts is not None: __salt__['git.branch']( target, opts=branch_opts, user=user, password=password, output_encoding=output_encoding) comments.append(upstream_action) # Fast-forward to the desired revision if fast_forward is True \ and not _revs_equal(base_rev, remote_rev, remote_rev_type): if desired_upstream or rev == 'HEAD': # Check first to see if we are on a branch before # trying to merge changes. (The call to # git.symbolic_ref will only return output if HEAD # points to a branch.) if __salt__['git.symbolic_ref']( target, 'HEAD', opts=['--quiet'], user=user, password=password, ignore_retcode=True, output_encoding=output_encoding): if git_ver >= _LooseVersion('1.8.1.6'): # --ff-only added in version 1.8.1.6. It's not # 100% necessary, but if we can use it, we'll # ensure that the merge doesn't go through if # not a fast-forward. Granted, the logic that # gets us to this point shouldn't allow us to # attempt this merge if it's not a # fast-forward, but it's an extra layer of # protection. merge_opts = ['--ff-only'] else: merge_opts = [] __salt__['git.merge']( target, rev=remote_rev, opts=merge_opts, user=user, password=password, output_encoding=output_encoding, **lfs_opts) comments.append( 'Repository was fast-forwarded to {0}' .format(remote_loc) ) else: return _fail( ret, 'Unable to fast-forward, HEAD is detached', comments ) else: # Update is a fast forward, but we cannot merge to that # commit so we'll reset to it. __salt__['git.reset']( target, opts=['--hard', remote_rev if rev == 'HEAD' else rev], user=user, password=password, output_encoding=output_encoding, **lfs_opts) comments.append( 'Repository was reset to {0} (fast-forward)' .format(rev) ) # TODO: Figure out how to add submodule update info to # test=True return data, and changes dict. if submodules: try: __salt__['git.submodule']( target, 'update', opts=['--init', '--recursive'], user=user, password=password, identity=identity, saltenv=__env__, output_encoding=output_encoding) except CommandExecutionError as exc: return _failed_submodule_update(ret, exc, comments) elif bare: if __opts__['test']: msg = ( 'Bare repository at {0} would be fetched' .format(target) ) if ret['changes']: return _neutral_test(ret, msg) else: return _uptodate(ret, target, msg) try: fetch_changes = __salt__['git.fetch']( target, remote=remote, force=force_fetch, refspecs=refspecs, user=user, password=password, identity=identity, saltenv=__env__, output_encoding=output_encoding) except CommandExecutionError as exc: return _failed_fetch(ret, exc, comments) else: comments.append( 'Bare repository at {0} was fetched{1}'.format( target, ', resulting in updated refs' if fetch_changes else '' ) ) try: new_rev = __salt__['git.revision']( cwd=target, user=user, password=password, ignore_retcode=True, output_encoding=output_encoding) except CommandExecutionError: new_rev = None except Exception as exc: log.error( 'Unexpected exception in git.latest state', exc_info=True ) if isinstance(exc, CommandExecutionError): msg = _strip_exc(exc) else: msg = six.text_type(exc) return _fail(ret, msg, comments) if not bare and not _revs_equal(new_rev, remote_rev, remote_rev_type): return _fail(ret, 'Failed to update repository', comments) if local_rev != new_rev: log.info( 'Repository %s updated: %s => %s', target, local_rev, new_rev ) ret['comment'] = _format_comments(comments) ret['changes']['revision'] = {'old': local_rev, 'new': new_rev} else: return _uptodate(ret, target, _format_comments(comments)) else: if os.path.isdir(target): target_contents = os.listdir(target) if force_clone: # Clone is required, and target directory exists, but the # ``force`` option is enabled, so we need to clear out its # contents to proceed. if __opts__['test']: ret['changes']['forced clone'] = True ret['changes']['new'] = name + ' => ' + target return _neutral_test( ret, 'Target directory {0} exists. Since force_clone=True, ' 'the contents of {0} would be deleted, and {1} would ' 'be cloned into this directory.'.format(target, name) ) log.debug( 'Removing contents of %s to clone repository %s in its ' 'place (force_clone=True set in git.latest state)', target, name ) removal_errors = {} for target_object in target_contents: target_path = os.path.join(target, target_object) try: salt.utils.files.rm_rf(target_path) except OSError as exc: if exc.errno != errno.ENOENT: removal_errors[target_path] = exc if removal_errors: err_strings = [ ' {0}\n {1}'.format(k, v) for k, v in six.iteritems(removal_errors) ] return _fail( ret, 'Unable to remove\n{0}'.format('\n'.join(err_strings)), comments ) ret['changes']['forced clone'] = True # Clone is required, but target dir exists and is non-empty. We # can't proceed. elif target_contents: return _fail( ret, 'Target \'{0}\' exists, is non-empty and is not a git ' 'repository. Set the \'force_clone\' option to True to ' 'remove this directory\'s contents and proceed with ' 'cloning the remote repository'.format(target) ) log.debug('Target %s is not found, \'git clone\' is required', target) if __opts__['test']: ret['changes']['new'] = name + ' => ' + target return _neutral_test( ret, 'Repository {0} would be cloned to {1}'.format( name, target ) ) try: clone_opts = ['--mirror'] if mirror else ['--bare'] if bare else [] if remote != 'origin': clone_opts.extend(['--origin', remote]) if depth is not None: clone_opts.extend(['--depth', six.text_type(depth), '--branch', rev]) # We're cloning a fresh repo, there is no local branch or revision local_branch = local_rev = None try: __salt__['git.clone'](target, name, user=user, password=password, opts=clone_opts, identity=identity, https_user=https_user, https_pass=https_pass, saltenv=__env__, output_encoding=output_encoding) except CommandExecutionError as exc: msg = 'Clone failed: {0}'.format(_strip_exc(exc)) return _fail(ret, msg, comments) ret['changes']['new'] = name + ' => ' + target comments.append( '{0} cloned to {1}{2}'.format( name, target, ' as mirror' if mirror else ' as bare repository' if bare else '' ) ) if not bare: if not remote_rev: if rev != 'HEAD': # No HEAD means the remote repo is empty, which means # our new clone will also be empty. This state has # failed, since a rev was specified but no matching rev # exists on the remote host. msg = ( '%s was cloned but is empty, so {0}/{1} ' 'cannot be checked out'.format(remote, rev) ) log.error(msg, name) # Disable check for string substitution return _fail(ret, msg % 'Repository', comments) # pylint: disable=E1321 else: if remote_rev_type == 'tag' \ and rev not in __salt__['git.list_tags']( target, user=user, password=password, output_encoding=output_encoding): return _fail( ret, 'Revision \'{0}\' does not exist in clone' .format(rev), comments ) if branch is not None: if branch not in \ __salt__['git.list_branches']( target, user=user, password=password, output_encoding=output_encoding): if rev == 'HEAD': checkout_rev = remote_rev else: checkout_rev = desired_upstream \ if desired_upstream \ else rev __salt__['git.checkout']( target, checkout_rev, opts=['-b', branch], user=user, password=password, output_encoding=output_encoding) comments.append( 'Branch \'{0}\' checked out, with {1} ' 'as a starting point'.format( branch, remote_loc ) ) local_rev, local_branch = _get_local_rev_and_branch( target, user, password, output_encoding=output_encoding) if local_branch is None \ and remote_rev is not None \ and 'HEAD' not in all_remote_refs: return _fail( ret, 'Remote HEAD refers to a ref that does not exist. ' 'This can happen when the default branch on the ' 'remote repository is renamed or deleted. If you ' 'are unable to fix the remote repository, you can ' 'work around this by setting the \'branch\' argument ' '(which will ensure that the named branch is created ' 'if it does not already exist).', comments ) if not _revs_equal(local_rev, remote_rev, remote_rev_type): __salt__['git.reset']( target, opts=['--hard', remote_rev], user=user, password=password, output_encoding=output_encoding) comments.append( 'Repository was reset to {0}'.format(remote_loc) ) try: upstream = __salt__['git.rev_parse']( target, local_branch + '@{upstream}', opts=['--abbrev-ref'], user=user, password=password, ignore_retcode=True, output_encoding=output_encoding) except CommandExecutionError: upstream = False if not upstream and desired_upstream: upstream_action = ( 'Tracking branch was set to {0}'.format( desired_upstream ) ) branch_opts = _get_branch_opts( branch, local_branch, __salt__['git.list_branches']( target, user=user, password=password, output_encoding=output_encoding), desired_upstream, git_ver) elif upstream and desired_upstream is False: # If the remote_rev is a tag or SHA1, and there is an # upstream tracking branch, we will unset it. However, # we can only do this if the git version is 1.8.0 or # newer, as the --unset-upstream option was not added # until that version. if git_ver >= _LooseVersion('1.8.0'): upstream_action = 'Tracking branch was unset' branch_opts = ['--unset-upstream'] else: branch_opts = None elif desired_upstream and upstream != desired_upstream: upstream_action = ( 'Tracking branch was updated to {0}'.format( desired_upstream ) ) branch_opts = _get_branch_opts( branch, local_branch, __salt__['git.list_branches']( target, user=user, password=password, output_encoding=output_encoding), desired_upstream, git_ver) else: branch_opts = None if branch_opts is not None: __salt__['git.branch']( target, opts=branch_opts, user=user, password=password, output_encoding=output_encoding) comments.append(upstream_action) if submodules and remote_rev: try: __salt__['git.submodule']( target, 'update', opts=['--init', '--recursive'], user=user, password=password, identity=identity, output_encoding=output_encoding) except CommandExecutionError as exc: return _failed_submodule_update(ret, exc, comments) try: new_rev = __salt__['git.revision']( cwd=target, user=user, password=password, ignore_retcode=True, output_encoding=output_encoding) except CommandExecutionError: new_rev = None except Exception as exc: log.error( 'Unexpected exception in git.latest state', exc_info=True ) if isinstance(exc, CommandExecutionError): msg = _strip_exc(exc) else: msg = six.text_type(exc) return _fail(ret, msg, comments) msg = _format_comments(comments) log.info(msg) ret['comment'] = msg if new_rev is not None: ret['changes']['revision'] = {'old': None, 'new': new_rev} return ret def present(name, force=False, bare=True, template=None, separate_git_dir=None, shared=None, user=None, password=None, output_encoding=None): ''' Ensure that a repository exists in the given directory .. warning:: If the minion has Git 2.5 or later installed, ``name`` points to a worktree_, and ``force`` is set to ``True``, then the worktree will be deleted. This has been corrected in Salt 2015.8.0. name Path to the directory .. versionchanged:: 2015.8.0 This path must now be absolute force : False If ``True``, and if ``name`` points to an existing directory which does not contain a git repository, then the contents of that directory will be recursively removed and a new repository will be initialized in its place. bare : True If ``True``, and a repository must be initialized, then the repository will be a bare repository. .. note:: This differs from the default behavior of :py:func:`git.init <salt.modules.git.init>`, make sure to set this value to ``False`` if a bare repo is not desired. template If a new repository is initialized, this argument will specify an alternate template directory. .. versionadded:: 2015.8.0 separate_git_dir If a new repository is initialized, this argument will specify an alternate ``$GIT_DIR`` .. versionadded:: 2015.8.0 shared Set sharing permissions on git repo. See `git-init(1)`_ for more details. .. versionadded:: 2015.5.0 user User under which to run git commands. By default, commands are run by the user under which the minion is running. .. versionadded:: 0.17.0 password Windows only. Required when specifying ``user``. This parameter will be ignored on non-Windows platforms. .. versionadded:: 2016.3.4 output_encoding Use this option to specify which encoding to use to decode the output from any git commands which are run. This should not be needed in most cases. .. note:: This should only be needed if the files in the repository were created with filenames using an encoding other than UTF-8 to handle Unicode characters. .. versionadded:: 2018.3.1 .. _`git-init(1)`: http://git-scm.com/docs/git-init .. _`worktree`: http://git-scm.com/docs/git-worktree ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} # If the named directory is a git repo return True if os.path.isdir(name): if bare and os.path.isfile(os.path.join(name, 'HEAD')): return ret elif not bare and \ (os.path.isdir(os.path.join(name, '.git')) or __salt__['git.is_worktree'](name, user=user, password=password, output_encoding=output_encoding)): return ret # Directory exists and is not a git repo, if force is set destroy the # directory and recreate, otherwise throw an error elif force: # Directory exists, and the ``force`` option is enabled, so we need # to clear out its contents to proceed. if __opts__['test']: ret['changes']['new'] = name ret['changes']['forced init'] = True return _neutral_test( ret, 'Target directory {0} exists. Since force=True, the ' 'contents of {0} would be deleted, and a {1}repository ' 'would be initialized in its place.' .format(name, 'bare ' if bare else '') ) log.debug( 'Removing contents of %s to initialize %srepository in its ' 'place (force=True set in git.present state)', name, 'bare ' if bare else '' ) try: if os.path.islink(name): os.unlink(name) else: salt.utils.files.rm_rf(name) except OSError as exc: return _fail( ret, 'Unable to remove {0}: {1}'.format(name, exc) ) else: ret['changes']['forced init'] = True elif os.listdir(name): return _fail( ret, 'Target \'{0}\' exists, is non-empty, and is not a git ' 'repository. Set the \'force\' option to True to remove ' 'this directory\'s contents and proceed with initializing a ' 'repository'.format(name) ) # Run test is set if __opts__['test']: ret['changes']['new'] = name return _neutral_test( ret, 'New {0}repository would be created'.format( 'bare ' if bare else '' ) ) __salt__['git.init'](cwd=name, bare=bare, template=template, separate_git_dir=separate_git_dir, shared=shared, user=user, password=password, output_encoding=output_encoding) actions = [ 'Initialized {0}repository in {1}'.format( 'bare ' if bare else '', name ) ] if template: actions.append('Template directory set to {0}'.format(template)) if separate_git_dir: actions.append('Gitdir set to {0}'.format(separate_git_dir)) message = '. '.join(actions) if len(actions) > 1: message += '.' log.info(message) ret['changes']['new'] = name ret['comment'] = message return ret def detached(name, rev, target=None, remote='origin', user=None, password=None, force_clone=False, force_checkout=False, fetch_remote=True, hard_reset=False, submodules=False, identity=None, https_user=None, https_pass=None, onlyif=None, unless=None, output_encoding=None, **kwargs): ''' .. versionadded:: 2016.3.0 Make sure a repository is cloned to the given target directory and is a detached HEAD checkout of the commit ID resolved from ``rev``. name Address of the remote repository. rev The branch, tag, or commit ID to checkout after clone. If a branch or tag is specified it will be resolved to a commit ID and checked out. target Name of the target directory where repository is about to be cloned. remote : origin Git remote to use. If this state needs to clone the repo, it will clone it using this value as the initial remote name. If the repository already exists, and a remote by this name is not present, one will be added. user User under which to run git commands. By default, commands are run by the user under which the minion is running. password Windows only. Required when specifying ``user``. This parameter will be ignored on non-Windows platforms. .. versionadded:: 2016.3.4 force_clone : False If the ``target`` directory exists and is not a git repository, then this state will fail. Set this argument to ``True`` to remove the contents of the target directory and clone the repo into it. force_checkout : False When checking out the revision ID, the state will fail if there are unwritten changes. Set this argument to ``True`` to discard unwritten changes when checking out. fetch_remote : True If ``False`` a fetch will not be performed and only local refs will be reachable. hard_reset : False If ``True`` a hard reset will be performed before the checkout and any uncommitted modifications to the working directory will be discarded. Untracked files will remain in place. .. note:: Changes resulting from a hard reset will not trigger requisites. submodules : False Update submodules identity A path on the minion (or a SaltStack fileserver URL, e.g. ``salt://path/to/identity_file``) to a private key to use for SSH authentication. https_user HTTP Basic Auth username for HTTPS (only) clones https_pass HTTP Basic Auth password for HTTPS (only) clones onlyif A command to run as a check, run the named command only if the command passed to the ``onlyif`` option returns true unless A command to run as a check, only run the named command if the command passed to the ``unless`` option returns false output_encoding Use this option to specify which encoding to use to decode the output from any git commands which are run. This should not be needed in most cases. .. note:: This should only be needed if the files in the repository were created with filenames using an encoding other than UTF-8 to handle Unicode characters. .. versionadded:: 2018.3.1 ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} kwargs = salt.utils.args.clean_kwargs(**kwargs) if kwargs: return _fail( ret, salt.utils.args.invalid_kwargs(kwargs, raise_exc=False) ) if not rev: return _fail( ret, '\'{0}\' is not a valid value for the \'rev\' argument'.format(rev) ) if not target: return _fail( ret, '\'{0}\' is not a valid value for the \'target\' argument'.format(rev) ) # Ensure that certain arguments are strings to ensure that comparisons work if not isinstance(rev, six.string_types): rev = six.text_type(rev) if target is not None: if not isinstance(target, six.string_types): target = six.text_type(target) if not os.path.isabs(target): return _fail( ret, 'Target \'{0}\' is not an absolute path'.format(target) ) if user is not None and not isinstance(user, six.string_types): user = six.text_type(user) if remote is not None and not isinstance(remote, six.string_types): remote = six.text_type(remote) if identity is not None: if isinstance(identity, six.string_types): identity = [identity] elif not isinstance(identity, list): return _fail(ret, 'Identity must be either a list or a string') identity = [os.path.expanduser(x) for x in identity] for ident_path in identity: if 'salt://' in ident_path: try: ident_path = __salt__['cp.cache_file'](ident_path) except IOError as exc: log.error('Failed to cache %s: %s', ident_path, exc) return _fail( ret, 'Identity \'{0}\' does not exist.'.format( ident_path ) ) if not os.path.isabs(ident_path): return _fail( ret, 'Identity \'{0}\' is not an absolute path'.format( ident_path ) ) if https_user is not None and not isinstance(https_user, six.string_types): https_user = six.text_type(https_user) if https_pass is not None and not isinstance(https_pass, six.string_types): https_pass = six.text_type(https_pass) if os.path.isfile(target): return _fail( ret, 'Target \'{0}\' exists and is a regular file, cannot proceed' .format(target) ) try: desired_fetch_url = salt.utils.url.add_http_basic_auth( name, https_user, https_pass, https_only=True ) except ValueError as exc: return _fail(ret, exc.__str__()) redacted_fetch_url = salt.utils.url.redact_http_basic_auth(desired_fetch_url) # Check if onlyif or unless conditions match run_check_cmd_kwargs = {'runas': user} if 'shell' in __grains__: run_check_cmd_kwargs['shell'] = __grains__['shell'] cret = mod_run_check( run_check_cmd_kwargs, onlyif, unless ) if isinstance(cret, dict): ret.update(cret) return ret # Determine if supplied ref is a hash remote_rev_type = 'ref' if len(rev) <= 40 \ and all(x in string.hexdigits for x in rev): rev = rev.lower() remote_rev_type = 'hash' comments = [] hash_exists_locally = False local_commit_id = None gitdir = os.path.join(target, '.git') if os.path.isdir(gitdir) \ or __salt__['git.is_worktree'](target, user=user, password=password, output_encoding=output_encoding): # Target directory is a git repository or git worktree local_commit_id = _get_local_rev_and_branch( target, user, password, output_encoding=output_encoding)[0] if remote_rev_type is 'hash': try: __salt__['git.describe'](target, rev, user=user, password=password, ignore_retcode=True, output_encoding=output_encoding) except CommandExecutionError: hash_exists_locally = False else: # The rev is a hash and it exists locally so skip to checkout hash_exists_locally = True else: # Check that remote is present and set to correct url remotes = __salt__['git.remotes'](target, user=user, password=password, redact_auth=False, output_encoding=output_encoding) if remote in remotes and name in remotes[remote]['fetch']: pass else: # The fetch_url for the desired remote does not match the # specified URL (or the remote does not exist), so set the # remote URL. current_fetch_url = None if remote in remotes: current_fetch_url = remotes[remote]['fetch'] if __opts__['test']: return _neutral_test( ret, 'Remote {0} would be set to {1}'.format( remote, name ) ) __salt__['git.remote_set'](target, url=name, remote=remote, user=user, password=password, https_user=https_user, https_pass=https_pass, output_encoding=output_encoding) comments.append( 'Remote {0} updated from \'{1}\' to \'{2}\''.format( remote, current_fetch_url, name ) ) else: # Clone repository if os.path.isdir(target): target_contents = os.listdir(target) if force_clone: # Clone is required, and target directory exists, but the # ``force`` option is enabled, so we need to clear out its # contents to proceed. if __opts__['test']: return _neutral_test( ret, 'Target directory {0} exists. Since force_clone=True, ' 'the contents of {0} would be deleted, and {1} would ' 'be cloned into this directory.'.format(target, name) ) log.debug( 'Removing contents of %s to clone repository %s in its ' 'place (force_clone=True set in git.detached state)', target, name ) removal_errors = {} for target_object in target_contents: target_path = os.path.join(target, target_object) try: salt.utils.files.rm_rf(target_path) except OSError as exc: if exc.errno != errno.ENOENT: removal_errors[target_path] = exc if removal_errors: err_strings = [ ' {0}\n {1}'.format(k, v) for k, v in six.iteritems(removal_errors) ] return _fail( ret, 'Unable to remove\n{0}'.format('\n'.join(err_strings)), comments ) ret['changes']['forced clone'] = True elif target_contents: # Clone is required, but target dir exists and is non-empty. We # can't proceed. return _fail( ret, 'Target \'{0}\' exists, is non-empty and is not a git ' 'repository. Set the \'force_clone\' option to True to ' 'remove this directory\'s contents and proceed with ' 'cloning the remote repository'.format(target) ) log.debug('Target %s is not found, \'git clone\' is required', target) if __opts__['test']: return _neutral_test( ret, 'Repository {0} would be cloned to {1}'.format( name, target ) ) try: clone_opts = ['--no-checkout'] if remote != 'origin': clone_opts.extend(['--origin', remote]) __salt__['git.clone'](target, name, user=user, password=password, opts=clone_opts, identity=identity, https_user=https_user, https_pass=https_pass, saltenv=__env__, output_encoding=output_encoding) comments.append('{0} cloned to {1}'.format(name, target)) except Exception as exc: log.error( 'Unexpected exception in git.detached state', exc_info=True ) if isinstance(exc, CommandExecutionError): msg = _strip_exc(exc) else: msg = six.text_type(exc) return _fail(ret, msg, comments) # Repository exists and is ready for fetch/checkout refspecs = [ 'refs/heads/*:refs/remotes/{0}/*'.format(remote), '+refs/tags/*:refs/tags/*' ] if hash_exists_locally or fetch_remote is False: pass else: # Fetch refs from remote if __opts__['test']: return _neutral_test( ret, 'Repository remote {0} would be fetched'.format(remote) ) try: fetch_changes = __salt__['git.fetch']( target, remote=remote, force=True, refspecs=refspecs, user=user, password=password, identity=identity, saltenv=__env__, output_encoding=output_encoding) except CommandExecutionError as exc: msg = 'Fetch failed' msg += ':\n\n' + six.text_type(exc) return _fail(ret, msg, comments) else: if fetch_changes: comments.append( 'Remote {0} was fetched, resulting in updated ' 'refs'.format(remote) ) # get refs and checkout checkout_commit_id = '' if remote_rev_type is 'hash': if __salt__['git.describe']( target, rev, user=user, password=password, output_encoding=output_encoding): checkout_commit_id = rev else: return _fail( ret, 'Revision \'{0}\' does not exist'.format(rev) ) else: try: all_remote_refs = __salt__['git.remote_refs']( target, user=user, password=password, identity=identity, https_user=https_user, https_pass=https_pass, ignore_retcode=False, output_encoding=output_encoding) if 'refs/remotes/'+remote+'/'+rev in all_remote_refs: checkout_commit_id = all_remote_refs['refs/remotes/' + remote + '/' + rev] elif 'refs/tags/' + rev in all_remote_refs: checkout_commit_id = all_remote_refs['refs/tags/' + rev] else: return _fail( ret, 'Revision \'{0}\' does not exist'.format(rev) ) except CommandExecutionError as exc: return _fail( ret, 'Failed to list refs for {0}: {1}'.format(remote, _strip_exc(exc)) ) if hard_reset: if __opts__['test']: return _neutral_test( ret, 'Hard reset to HEAD would be performed on {0}'.format(target) ) __salt__['git.reset']( target, opts=['--hard', 'HEAD'], user=user, password=password, output_encoding=output_encoding) comments.append( 'Repository was reset to HEAD before checking out revision' ) # TODO: implement clean function for git module and add clean flag if checkout_commit_id == local_commit_id: new_rev = None else: if __opts__['test']: ret['changes']['HEAD'] = {'old': local_commit_id, 'new': checkout_commit_id} return _neutral_test( ret, 'Commit ID {0} would be checked out at {1}'.format( checkout_commit_id, target ) ) __salt__['git.checkout'](target, checkout_commit_id, force=force_checkout, user=user, password=password, output_encoding=output_encoding) comments.append( 'Commit ID {0} was checked out at {1}'.format( checkout_commit_id, target ) ) try: new_rev = __salt__['git.revision']( cwd=target, user=user, password=password, ignore_retcode=True, output_encoding=output_encoding) except CommandExecutionError: new_rev = None if submodules: __salt__['git.submodule'](target, 'update', opts=['--init', '--recursive'], user=user, password=password, identity=identity, output_encoding=output_encoding) comments.append( 'Submodules were updated' ) if new_rev is not None: ret['changes']['HEAD'] = {'old': local_commit_id, 'new': new_rev} else: comments.append("Already checked out at correct revision") msg = _format_comments(comments) log.info(msg) ret['comment'] = msg return ret def cloned(name, target=None, branch=None, user=None, password=None, identity=None, https_user=None, https_pass=None, output_encoding=None): ''' .. versionadded:: 2018.3.3,2019.2.0 Ensure that a repository has been cloned to the specified target directory. If not, clone that repository. No fetches will be performed once cloned. name Address of the remote repository target Name of the target directory where repository should be cloned branch Remote branch to check out. If unspecified, the default branch (i.e. the one to the remote HEAD points) will be checked out. .. note:: The local branch name will match the remote branch name. If the branch name is changed, then that branch will be checked out locally, but keep in mind that remote repository will not be fetched. If your use case requires that you keep the clone up to date with the remote repository, then consider using :py:func:`git.latest <salt.states.git.latest>`. user User under which to run git commands. By default, commands are run by the user under which the minion is running. password Windows only. Required when specifying ``user``. This parameter will be ignored on non-Windows platforms. identity Path to a private key to use for ssh URLs. Works the same way as in :py:func:`git.latest <salt.states.git.latest>`, see that state's documentation for more information. https_user HTTP Basic Auth username for HTTPS (only) clones https_pass HTTP Basic Auth password for HTTPS (only) clones output_encoding Use this option to specify which encoding to use to decode the output from any git commands which are run. This should not be needed in most cases. .. note:: This should only be needed if the files in the repository were created with filenames using an encoding other than UTF-8 to handle Unicode characters. ''' ret = {'name': name, 'result': False, 'comment': '', 'changes': {}} if target is None: ret['comment'] = '\'target\' argument is required' return ret elif not isinstance(target, six.string_types): target = six.text_type(target) if not os.path.isabs(target): ret['comment'] = '\'target\' path must be absolute' return ret if branch is not None: if not isinstance(branch, six.string_types): branch = six.text_type(branch) if not branch: ret['comment'] = 'Invalid \'branch\' argument' return ret if not os.path.exists(target): need_clone = True else: try: __salt__['git.status'](target, user=user, password=password, output_encoding=output_encoding) except Exception as exc: ret['comment'] = six.text_type(exc) return ret else: need_clone = False comments = [] def _clone_changes(ret): ret['changes']['new'] = name + ' => ' + target def _branch_changes(ret, old, new): ret['changes']['branch'] = {'old': old, 'new': new} if need_clone: if __opts__['test']: _clone_changes(ret) comment = '{0} would be cloned to {1}{2}'.format( name, target, ' with branch \'{0}\''.format(branch) if branch is not None else '' ) return _neutral_test(ret, comment) clone_opts = ['--branch', branch] if branch is not None else None try: __salt__['git.clone'](target, name, opts=clone_opts, user=user, password=password, identity=identity, https_user=https_user, https_pass=https_pass, output_encoding=output_encoding) except CommandExecutionError as exc: msg = 'Clone failed: {0}'.format(_strip_exc(exc)) return _fail(ret, msg, comments) comments.append( '{0} cloned to {1}{2}'.format( name, target, ' with branch \'{0}\''.format(branch) if branch is not None else '' ) ) _clone_changes(ret) ret['comment'] = _format_comments(comments) ret['result'] = True return ret else: if branch is None: return _already_cloned(ret, target, branch, comments) else: current_branch = __salt__['git.current_branch']( target, user=user, password=password, output_encoding=output_encoding) if current_branch == branch: return _already_cloned(ret, target, branch, comments) else: if __opts__['test']: _branch_changes(ret, current_branch, branch) return _neutral_test( ret, 'Branch would be changed to \'{0}\''.format(branch)) try: __salt__['git.rev_parse']( target, rev=branch, user=user, password=password, ignore_retcode=True, output_encoding=output_encoding) except CommandExecutionError: # Local head does not exist, so we need to check out a new # branch at the remote rev checkout_rev = '/'.join(('origin', branch)) checkout_opts = ['-b', branch] else: # Local head exists, so we just need to check it out checkout_rev = branch checkout_opts = None try: __salt__['git.checkout']( target, rev=checkout_rev, opts=checkout_opts, user=user, password=password, output_encoding=output_encoding) except CommandExecutionError as exc: msg = 'Failed to change branch to \'{0}\': {1}'.format(branch, exc) return _fail(ret, msg, comments) else: comments.append('Branch changed to \'{0}\''.format(branch)) _branch_changes(ret, current_branch, branch) ret['comment'] = _format_comments(comments) ret['result'] = True return ret def config_unset(name, value_regex=None, repo=None, user=None, password=None, output_encoding=None, **kwargs): r''' .. versionadded:: 2015.8.0 Ensure that the named config key is not present name The name of the configuration key to unset. This value can be a regex, but the regex must match the entire key name. For example, ``foo\.`` would not match all keys in the ``foo`` section, it would be necessary to use ``foo\..+`` to do so. value_regex Regex indicating the values to unset for the matching key(s) .. note:: This option behaves differently depending on whether or not ``all`` is set to ``True``. If it is, then all values matching the regex will be deleted (this is the only way to delete multiple values from a multivar). If ``all`` is set to ``False``, then this state will fail if the regex matches more than one value in a multivar. all : False If ``True``, unset all matches repo Location of the git repository for which the config value should be set. Required unless ``global`` is set to ``True``. user User under which to run git commands. By default, commands are run by the user under which the minion is running. password Windows only. Required when specifying ``user``. This parameter will be ignored on non-Windows platforms. .. versionadded:: 2016.3.4 global : False If ``True``, this will set a global git config option output_encoding Use this option to specify which encoding to use to decode the output from any git commands which are run. This should not be needed in most cases. .. note:: This should only be needed if the files in the repository were created with filenames using an encoding other than UTF-8 to handle Unicode characters. .. versionadded:: 2018.3.1 **Examples:** .. code-block:: yaml # Value matching 'baz' mylocalrepo: git.config_unset: - name: foo.bar - value_regex: 'baz' - repo: /path/to/repo # Ensure entire multivar is unset mylocalrepo: git.config_unset: - name: foo.bar - all: True # Ensure all variables in 'foo' section are unset, including multivars mylocalrepo: git.config_unset: - name: 'foo\..+' - all: True # Ensure that global config value is unset mylocalrepo: git.config_unset: - name: foo.bar - global: True ''' ret = {'name': name, 'changes': {}, 'result': True, 'comment': 'No matching keys are set'} # Sanitize kwargs and make sure that no invalid ones were passed. This # allows us to accept 'global' as an argument to this function without # shadowing global(), while also not allowing unwanted arguments to be # passed. kwargs = salt.utils.args.clean_kwargs(**kwargs) global_ = kwargs.pop('global', False) all_ = kwargs.pop('all', False) if kwargs: return _fail( ret, salt.utils.args.invalid_kwargs(kwargs, raise_exc=False) ) if not global_ and not repo: return _fail( ret, 'Non-global config options require the \'repo\' argument to be ' 'set' ) if not isinstance(name, six.string_types): name = six.text_type(name) if value_regex is not None: if not isinstance(value_regex, six.string_types): value_regex = six.text_type(value_regex) # Ensure that the key regex matches the full key name key = '^' + name.lstrip('^').rstrip('$') + '$' # Get matching keys/values pre_matches = __salt__['git.config_get_regexp']( cwd=repo, key=key, value_regex=value_regex, user=user, password=password, ignore_retcode=True, output_encoding=output_encoding, **{'global': global_} ) if not pre_matches: # No changes need to be made return ret # Perform sanity check on the matches. We can't proceed if the value_regex # matches more than one value in a given key, and 'all' is not set to True if not all_: greedy_matches = ['{0} ({1})'.format(x, ', '.join(y)) for x, y in six.iteritems(pre_matches) if len(y) > 1] if greedy_matches: if value_regex is not None: return _fail( ret, 'Multiple values are matched by value_regex for the ' 'following keys (set \'all\' to True to force removal): ' '{0}'.format('; '.join(greedy_matches)) ) else: return _fail( ret, 'Multivar(s) matched by the key expression (set \'all\' ' 'to True to force removal): {0}'.format( '; '.join(greedy_matches) ) ) if __opts__['test']: ret['changes'] = pre_matches return _neutral_test( ret, '{0} key(s) would have value(s) unset'.format(len(pre_matches)) ) if value_regex is None: pre = pre_matches else: # Get all keys matching the key expression, so we can accurately report # on changes made. pre = __salt__['git.config_get_regexp']( cwd=repo, key=key, value_regex=None, user=user, password=password, ignore_retcode=True, output_encoding=output_encoding, **{'global': global_} ) failed = [] # Unset the specified value(s). There is no unset for regexes so loop # through the pre_matches dict and unset each matching key individually. for key_name in pre_matches: try: __salt__['git.config_unset']( cwd=repo, key=name, value_regex=value_regex, all=all_, user=user, password=password, output_encoding=output_encoding, **{'global': global_} ) except CommandExecutionError as exc: msg = 'Failed to unset \'{0}\''.format(key_name) if value_regex is not None: msg += ' using value_regex \'{1}\'' msg += ': ' + _strip_exc(exc) log.error(msg) failed.append(key_name) if failed: return _fail( ret, 'Error(s) occurred unsetting values for the following keys (see ' 'the minion log for details): {0}'.format(', '.join(failed)) ) post = __salt__['git.config_get_regexp']( cwd=repo, key=key, value_regex=None, user=user, password=password, ignore_retcode=True, output_encoding=output_encoding, **{'global': global_} ) for key_name in pre: if key_name not in post: ret['changes'][key_name] = pre[key_name] unset = [x for x in pre[key_name] if x not in post[key_name]] if unset: ret['changes'][key_name] = unset if value_regex is None: post_matches = post else: post_matches = __salt__['git.config_get_regexp']( cwd=repo, key=key, value_regex=value_regex, user=user, password=password, ignore_retcode=True, output_encoding=output_encoding, **{'global': global_} ) if post_matches: failed = ['{0} ({1})'.format(x, ', '.join(y)) for x, y in six.iteritems(post_matches)] return _fail( ret, 'Failed to unset value(s): {0}'.format('; '.join(failed)) ) ret['comment'] = 'Value(s) successfully unset' return ret def config_set(name, value=None, multivar=None, repo=None, user=None, password=None, output_encoding=None, **kwargs): ''' .. versionadded:: 2014.7.0 .. versionchanged:: 2015.8.0 Renamed from ``git.config`` to ``git.config_set``. For earlier versions, use ``git.config``. Ensure that a config value is set to the desired value(s) name Name of the git config value to set value Set a single value for the config item multivar Set multiple values for the config item .. note:: The order matters here, if the same parameters are set but in a different order, they will be removed and replaced in the order specified. .. versionadded:: 2015.8.0 repo Location of the git repository for which the config value should be set. Required unless ``global`` is set to ``True``. user User under which to run git commands. By default, the commands are run by the user under which the minion is running. password Windows only. Required when specifying ``user``. This parameter will be ignored on non-Windows platforms. .. versionadded:: 2016.3.4 global : False If ``True``, this will set a global git config option output_encoding Use this option to specify which encoding to use to decode the output from any git commands which are run. This should not be needed in most cases. .. note:: This should only be needed if the files in the repository were created with filenames using an encoding other than UTF-8 to handle Unicode characters. .. versionadded:: 2018.3.1 **Local Config Example:** .. code-block:: yaml # Single value mylocalrepo: git.config_set: - name: user.email - value: foo@bar.net - repo: /path/to/repo # Multiple values mylocalrepo: git.config_set: - name: mysection.myattribute - multivar: - foo - bar - baz - repo: /path/to/repo **Global Config Example (User ``foo``):** .. code-block:: yaml mylocalrepo: git.config_set: - name: user.name - value: Foo Bar - user: foo - global: True ''' ret = {'name': name, 'changes': {}, 'result': True, 'comment': ''} if value is not None and multivar is not None: return _fail( ret, 'Only one of \'value\' and \'multivar\' is permitted' ) # Sanitize kwargs and make sure that no invalid ones were passed. This # allows us to accept 'global' as an argument to this function without # shadowing global(), while also not allowing unwanted arguments to be # passed. kwargs = salt.utils.args.clean_kwargs(**kwargs) global_ = kwargs.pop('global', False) if kwargs: return _fail( ret, salt.utils.args.invalid_kwargs(kwargs, raise_exc=False) ) if not global_ and not repo: return _fail( ret, 'Non-global config options require the \'repo\' argument to be ' 'set' ) if not isinstance(name, six.string_types): name = six.text_type(name) if value is not None: if not isinstance(value, six.string_types): value = six.text_type(value) value_comment = '\'' + value + '\'' desired = [value] if multivar is not None: if not isinstance(multivar, list): try: multivar = multivar.split(',') except AttributeError: multivar = six.text_type(multivar).split(',') else: new_multivar = [] for item in multivar: if isinstance(item, six.string_types): new_multivar.append(item) else: new_multivar.append(six.text_type(item)) multivar = new_multivar value_comment = multivar desired = multivar # Get current value pre = __salt__['git.config_get']( cwd=repo, key=name, user=user, password=password, ignore_retcode=True, output_encoding=output_encoding, **{'all': True, 'global': global_} ) if desired == pre: ret['comment'] = '{0}\'{1}\' is already set to {2}'.format( 'Global key ' if global_ else '', name, value_comment ) return ret if __opts__['test']: ret['changes'] = {'old': pre, 'new': desired} msg = '{0}\'{1}\' would be {2} {3}'.format( 'Global key ' if global_ else '', name, 'added as' if pre is None else 'set to', value_comment ) return _neutral_test(ret, msg) try: # Set/update config value post = __salt__['git.config_set']( cwd=repo, key=name, value=value, multivar=multivar, user=user, password=password, output_encoding=output_encoding, **{'global': global_} ) except CommandExecutionError as exc: return _fail( ret, 'Failed to set {0}\'{1}\' to {2}: {3}'.format( 'global key ' if global_ else '', name, value_comment, _strip_exc(exc) ) ) if pre != post: ret['changes'][name] = {'old': pre, 'new': post} if post != desired: return _fail( ret, 'Failed to set {0}\'{1}\' to {2}'.format( 'global key ' if global_ else '', name, value_comment ) ) ret['comment'] = '{0}\'{1}\' was {2} {3}'.format( 'Global key ' if global_ else '', name, 'added as' if pre is None else 'set to', value_comment ) return ret
saltstack/salt
salt/modules/pushbullet.py
push_note
python
def push_note(device=None, title=None, body=None): ''' Pushing a text note. :param device: Pushbullet target device :param title: Note title :param body: Note body :return: Boolean if message was sent successfully. CLI Example: .. code-block:: bash salt "*" pushbullet.push_note device="Chrome" title="Example title" body="Example body." ''' spb = _SaltPushbullet(device) res = spb.push_note(title, body) return res
Pushing a text note. :param device: Pushbullet target device :param title: Note title :param body: Note body :return: Boolean if message was sent successfully. CLI Example: .. code-block:: bash salt "*" pushbullet.push_note device="Chrome" title="Example title" body="Example body."
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/pushbullet.py#L65-L84
[ "def push_note(self, title, body):\n push = self.pb.push_note(title, body, device=self.target)\n return push\n" ]
# -*- coding: utf-8 -*- ''' Module for sending messages to Pushbullet (https://www.pushbullet.com) .. versionadded:: 2015.8.0 Requires an ``api_key`` in ``/etc/salt/minion``: .. code-block: yaml pushbullet: api_key: 'ABC123abc123ABC123abc123ABC123ab' For example: .. code-block:: yaml pushbullet: device: "Chrome" title: "Example push message" body: "Message body." ''' # Import python libs from __future__ import absolute_import, print_function, unicode_literals import logging try: import pushbullet HAS_PUSHBULLET = True except ImportError: HAS_PUSHBULLET = False log = logging.getLogger(__name__) def __virtual__(): if not HAS_PUSHBULLET: return (False, 'Missing pushbullet library.') if not __salt__['config.get']('pushbullet.api_key') and \ not __salt__['config.get']('pushbullet:api_key'): return (False, 'Pushbullet API Key Unavailable, not loading.') return True class _SaltPushbullet(object): def __init__(self, device_name): api_key = __salt__['config.get']('pushbullet.api_key') or \ __salt__['config.get']('pushbullet:api_key') self.pb = pushbullet.Pushbullet(api_key) self.target = self._find_device_by_name(device_name) def push_note(self, title, body): push = self.pb.push_note(title, body, device=self.target) return push def _find_device_by_name(self, name): for dev in self.pb.devices: if dev.nickname == name: return dev
saltstack/salt
salt/modules/travisci.py
verify_webhook
python
def verify_webhook(signature, body): ''' Verify the webhook signature from travisci signature The signature header from the webhook header body The full payload body from the webhook post .. note:: The body needs to be the urlencoded version of the body. CLI Example: .. code-block:: bash salt '*' travisci.verify_webhook 'M6NucCX5722bxisQs7e...' 'payload=%7B%22id%22%3A183791261%2C%22repository...' ''' # get public key setup public_key = __utils__['http.query']('https://api.travis-ci.org/config')['config']['notifications']['webhook']['public_key'] pkey_public_key = OpenSSL.crypto.load_publickey(OpenSSL.crypto.FILETYPE_PEM, public_key) certificate = OpenSSL.crypto.X509() certificate.set_pubkey(pkey_public_key) # decode signature signature = base64.b64decode(signature) # parse the urlencoded payload from travis payload = salt.utils.json.loads(parse_qs(body)['payload'][0]) try: OpenSSL.crypto.verify(certificate, signature, payload, six.text_type('sha1')) except OpenSSL.crypto.Error: return False return True
Verify the webhook signature from travisci signature The signature header from the webhook header body The full payload body from the webhook post .. note:: The body needs to be the urlencoded version of the body. CLI Example: .. code-block:: bash salt '*' travisci.verify_webhook 'M6NucCX5722bxisQs7e...' 'payload=%7B%22id%22%3A183791261%2C%22repository...'
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/travisci.py#L42-L77
[ "def loads(s, **kwargs):\n '''\n .. versionadded:: 2018.3.0\n\n Wraps json.loads and prevents a traceback in the event that a bytestring is\n passed to the function. (Python < 3.6 cannot load bytestrings)\n\n You can pass an alternate json module (loaded via import_json() above)\n using the _json_module argument)\n '''\n json_module = kwargs.pop('_json_module', json)\n try:\n return json_module.loads(s, **kwargs)\n except TypeError as exc:\n # json.loads cannot load bytestrings in Python < 3.6\n if six.PY3 and isinstance(s, bytes):\n return json_module.loads(salt.utils.stringutils.to_unicode(s), **kwargs)\n else:\n raise exc\n" ]
# -*- coding: utf-8 -*- ''' Commands for working with travisci. :depends: pyOpenSSL >= 16.0.0 ''' # Import python libraries from __future__ import absolute_import, unicode_literals, print_function import base64 try: import OpenSSL import OpenSSL.crypto HAS_OPENSSL = True except ImportError: HAS_OPENSSL = False # Import Salt libraries import salt.utils.json from salt.utils.versions import LooseVersion as _LooseVersion from salt.ext.six.moves.urllib.parse import parse_qs # pylint: disable=import-error,no-name-in-module # Import 3rd party libraries from salt.ext import six OPENSSL_MIN_VER = '16.0.0' __virtualname__ = 'travisci' def __virtual__(): if HAS_OPENSSL is False: return (False, 'The travisci module was unable to be loaded: Install pyOpenssl >= {0}'.format(OPENSSL_MIN_VER)) cur_version = _LooseVersion(OpenSSL.__version__) min_version = _LooseVersion(OPENSSL_MIN_VER) if cur_version < min_version: return (False, 'The travisci module was unable to be loaded: Install pyOpenssl >= {0}'.format(OPENSSL_MIN_VER)) return __virtualname__
saltstack/salt
salt/states/network.py
managed
python
def managed(name, type, enabled=True, **kwargs): ''' Ensure that the named interface is configured properly. name The name of the interface to manage type Type of interface and configuration. enabled Designates the state of this interface. kwargs The IP parameters for this interface. ''' # For this function we are purposefully overwriting a bif # to enhance the user experience. This does not look like # it will cause a problem. Just giving a heads up in case # it does create a problem. ret = { 'name': name, 'changes': {}, 'result': True, 'comment': 'Interface {0} is up to date.'.format(name), } if 'test' not in kwargs: kwargs['test'] = __opts__.get('test', False) # set ranged status apply_ranged_setting = False # Build interface try: old = __salt__['ip.get_interface'](name) new = __salt__['ip.build_interface'](name, type, enabled, **kwargs) if kwargs['test']: if old == new: pass if not old and new: ret['result'] = None ret['comment'] = 'Interface {0} is set to be ' \ 'added.'.format(name) elif old != new: diff = difflib.unified_diff(old, new, lineterm='') ret['result'] = None ret['comment'] = 'Interface {0} is set to be ' \ 'updated:\n{1}'.format(name, '\n'.join(diff)) else: if not old and new: ret['comment'] = 'Interface {0} ' \ 'added.'.format(name) ret['changes']['interface'] = 'Added network interface.' apply_ranged_setting = True elif old != new: diff = difflib.unified_diff(old, new, lineterm='') ret['comment'] = 'Interface {0} ' \ 'updated.'.format(name) ret['changes']['interface'] = '\n'.join(diff) apply_ranged_setting = True except AttributeError as error: ret['result'] = False ret['comment'] = six.text_type(error) return ret # Debian based system can have a type of source # in the interfaces file, we don't ifup or ifdown it if type == 'source': return ret # Setup up bond modprobe script if required if type == 'bond': try: old = __salt__['ip.get_bond'](name) new = __salt__['ip.build_bond'](name, **kwargs) if kwargs['test']: if not old and new: ret['result'] = None ret['comment'] = 'Bond interface {0} is set to be ' \ 'added.'.format(name) elif old != new: diff = difflib.unified_diff(old, new, lineterm='') ret['result'] = None ret['comment'] = 'Bond interface {0} is set to be ' \ 'updated:\n{1}'.format(name, '\n'.join(diff)) else: if not old and new: ret['comment'] = 'Bond interface {0} ' \ 'added.'.format(name) ret['changes']['bond'] = 'Added bond {0}.'.format(name) apply_ranged_setting = True elif old != new: diff = difflib.unified_diff(old, new, lineterm='') ret['comment'] = 'Bond interface {0} ' \ 'updated.'.format(name) ret['changes']['bond'] = '\n'.join(diff) apply_ranged_setting = True except AttributeError as error: #TODO Add a way of reversing the interface changes. ret['result'] = False ret['comment'] = six.text_type(error) return ret if kwargs['test']: return ret # For Redhat/Centos ranged network if "range" in name: if apply_ranged_setting: try: ret['result'] = __salt__['service.restart']('network') ret['comment'] = "network restarted for change of ranged interfaces" return ret except Exception as error: ret['result'] = False ret['comment'] = six.text_type(error) return ret ret['result'] = True ret['comment'] = "no change, passing it" return ret # Bring up/shutdown interface try: # Get Interface current status interfaces = salt.utils.network.interfaces() interface_status = False if name in interfaces: interface_status = interfaces[name].get('up') else: for iface in interfaces: if 'secondary' in interfaces[iface]: for second in interfaces[iface]['secondary']: if second.get('label', '') == name: interface_status = True if enabled: if 'noifupdown' not in kwargs: if interface_status: if ret['changes']: # Interface should restart to validate if it's up __salt__['ip.down'](name, type) __salt__['ip.up'](name, type) ret['changes']['status'] = 'Interface {0} restart to validate'.format(name) else: __salt__['ip.up'](name, type) ret['changes']['status'] = 'Interface {0} is up'.format(name) else: if 'noifupdown' not in kwargs: if interface_status: __salt__['ip.down'](name, type) ret['changes']['status'] = 'Interface {0} down'.format(name) except Exception as error: ret['result'] = False ret['comment'] = six.text_type(error) return ret # Try to enslave bonding interfaces after master was created if type == 'bond' and 'noifupdown' not in kwargs: if 'slaves' in kwargs and kwargs['slaves']: # Check that there are new slaves for this master present_slaves = __salt__['cmd.run']( ['cat', '/sys/class/net/{0}/bonding/slaves'.format(name)]).split() desired_slaves = kwargs['slaves'].split() missing_slaves = set(desired_slaves) - set(present_slaves) # Enslave only slaves missing in master if missing_slaves: ifenslave_path = __salt__['cmd.run'](['which', 'ifenslave']).strip() if ifenslave_path: log.info("Adding slaves '%s' to the master %s", ' '.join(missing_slaves), name) cmd = [ifenslave_path, name] + list(missing_slaves) __salt__['cmd.run'](cmd, python_shell=False) else: log.error("Command 'ifenslave' not found") ret['changes']['enslave'] = ( "Added slaves '{0}' to master '{1}'" .format(' '.join(missing_slaves), name)) else: log.info("All slaves '%s' are already added to the master %s" ", no actions required", ' '.join(missing_slaves), name) if enabled and interface_status: # Interface was restarted, return return ret # TODO: create saltutil.refresh_grains that fires events to the minion daemon grains_info = salt.loader.grains(__opts__, True) __grains__.update(grains_info) __salt__['saltutil.refresh_modules']() return ret
Ensure that the named interface is configured properly. name The name of the interface to manage type Type of interface and configuration. enabled Designates the state of this interface. kwargs The IP parameters for this interface.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/network.py#L359-L551
[ "def interfaces():\n '''\n Return a dictionary of information about all the interfaces on the minion\n '''\n if salt.utils.platform.is_windows():\n return win_interfaces()\n elif salt.utils.platform.is_netbsd():\n return netbsd_interfaces()\n else:\n return linux_interfaces()\n" ]
# -*- coding: utf-8 -*- ''' Configuration of network interfaces =================================== The network module is used to create and manage network settings, interfaces can be set as either managed or ignored. By default all interfaces are ignored unless specified. .. note:: RedHat-based systems (RHEL, CentOS, Scientific, etc.) have been supported since version 2014.1.0. Debian-based systems (Debian, Ubuntu, etc.) have been supported since version 2017.7.0. The following options are not supported: ipaddr_start, and ipaddr_end. Other platforms are not yet supported. .. note:: On Debian-based systems, networking configuration can be specified in `/etc/network/interfaces` or via included files such as (by default) `/etc/network/interfaces.d/*`. This can be problematic for configuration management. It is recommended to use either `file.managed` *or* `network.managed`. If using `network.managed`, it can be useful to ensure `interfaces.d/` is empty. This can be done using: /etc/network/interfaces.d: file.directory: - clean: True .. code-block:: yaml system: network.system: - enabled: True - hostname: server1.example.com - gateway: 192.168.0.1 - gatewaydev: eth0 - nozeroconf: True - nisdomain: example.com - require_reboot: True eth0: network.managed: - enabled: True - type: eth - proto: static - ipaddr: 10.1.0.7 - netmask: 255.255.255.0 - gateway: 10.1.0.1 - enable_ipv6: true - ipv6proto: static - ipv6ipaddrs: - 2001:db8:dead:beef::3/64 - 2001:db8:dead:beef::7/64 - ipv6gateway: 2001:db8:dead:beef::1 - ipv6netmask: 64 - dns: - 8.8.8.8 - 8.8.4.4 eth0-range0: network.managed: - type: eth - ipaddr_start: 192.168.1.1 - ipaddr_end: 192.168.1.10 - clonenum_start: 10 - mtu: 9000 bond0-range0: network.managed: - type: eth - ipaddr_start: 192.168.1.1 - ipaddr_end: 192.168.1.10 - clonenum_start: 10 - mtu: 9000 eth1.0-range0: network.managed: - type: eth - ipaddr_start: 192.168.1.1 - ipaddr_end: 192.168.1.10 - clonenum_start: 10 - vlan: True - mtu: 9000 bond0.1-range0: network.managed: - type: eth - ipaddr_start: 192.168.1.1 - ipaddr_end: 192.168.1.10 - clonenum_start: 10 - vlan: True - mtu: 9000 .. note:: add support of ranged interfaces (vlan, bond and eth) for redhat system, Important:type must be eth. routes: network.routes: - name: eth0 - routes: - name: secure_network ipaddr: 10.2.0.0 netmask: 255.255.255.0 gateway: 10.1.0.3 - name: HQ_network ipaddr: 10.100.0.0 netmask: 255.255.0.0 gateway: 10.1.0.10 eth2: network.managed: - enabled: True - type: slave - master: bond0 eth3: network.managed: - enabled: True - type: slave - master: bond0 eth4: network.managed: - enabled: True - type: eth - proto: dhcp - bridge: br0 eth5: network.managed: - enabled: True - type: eth - proto: dhcp - noifupdown: True # Do not restart the interface # you need to reboot/reconfigure manualy bond0: network.managed: - type: bond - ipaddr: 10.1.0.1 - netmask: 255.255.255.0 - mode: gre - proto: static - dns: - 8.8.8.8 - 8.8.4.4 - enabled: False - slaves: eth2 eth3 - require: - network: eth2 - network: eth3 - miimon: 100 - arp_interval: 250 - downdelay: 200 - lacp_rate: fast - max_bonds: 1 - updelay: 0 - use_carrier: on - hashing-algorithm: layer2 - mtu: 9000 - autoneg: on - speed: 1000 - duplex: full - rx: on - tx: off - sg: on - tso: off - ufo: off - gso: off - gro: off - lro: off bond0.2: network.managed: - type: vlan - ipaddr: 10.1.0.2 - use: - network: bond0 - require: - network: bond0 bond0.3: network.managed: - type: vlan - ipaddr: 10.1.0.3 - use: - network: bond0 - require: - network: bond0 bond0.10: network.managed: - type: vlan - ipaddr: 10.1.0.4 - use: - network: bond0 - require: - network: bond0 bond0.12: network.managed: - type: vlan - ipaddr: 10.1.0.5 - use: - network: bond0 - require: - network: bond0 br0: network.managed: - enabled: True - type: bridge - proto: dhcp - bridge: br0 - delay: 0 - ports: eth4 - bypassfirewall: True - use: - network: eth4 - require: - network: eth4 eth6: network.managed: - type: eth - noifupdown: True # IPv4 - proto: static - ipaddr: 192.168.4.9 - netmask: 255.255.255.0 - gateway: 192.168.4.1 - enable_ipv6: True # IPv6 - ipv6proto: static - ipv6ipaddr: 2001:db8:dead:c0::3 - ipv6netmask: 64 - ipv6gateway: 2001:db8:dead:c0::1 # override shared; makes those options v4-only - ipv6ttl: 15 # Shared - mtu: 1480 - ttl: 18 - dns: - 8.8.8.8 - 8.8.4.4 eth7: - type: eth - proto: static - ipaddr: 10.1.0.7 - netmask: 255.255.255.0 - gateway: 10.1.0.1 - enable_ipv6: True - ipv6proto: static - ipv6ipaddr: 2001:db8:dead:beef::3 - ipv6netmask: 64 - ipv6gateway: 2001:db8:dead:beef::1 - noifupdown: True eth8: network.managed: - enabled: True - type: eth - proto: static - enable_ipv6: true - ipv6proto: static - ipv6ipaddrs: - 2001:db8:dead:beef::3/64 - 2001:db8:dead:beef::7/64 - ipv6gateway: 2001:db8:dead:beef::1 - ipv6netmask: 64 - dns: - 8.8.8.8 - 8.8.4.4 system: network.system: - enabled: True - hostname: server1.example.com - gateway: 192.168.0.1 - gatewaydev: eth0 - nozeroconf: True - nisdomain: example.com - require_reboot: True - apply_hostname: True lo: network.managed: - name: lo - type: eth - proto: loopback - onboot: yes - userctl: no - ipv6_autoconf: no - enable_ipv6: true .. note:: Apply changes to hostname immediately. .. versionadded:: 2015.5.0 system: network.system: - hostname: server2.example.com - apply_hostname: True - retain_settings: True .. note:: Use `retain_settings` to retain current network settings that are not otherwise specified in the state. Particularly useful if only setting the hostname. Default behavior is to delete unspecified network settings. .. versionadded:: 2016.11.0 .. note:: When managing bridged interfaces on a Debian or Ubuntu based system, the ports argument is required. Red Hat systems will ignore the argument. ''' from __future__ import absolute_import, unicode_literals, print_function # Import Python libs import difflib # Import Salt libs import salt.utils.network import salt.utils.platform import salt.loader # Import 3rd party libs from salt.ext import six # Set up logging import logging log = logging.getLogger(__name__) def __virtual__(): ''' Confine this module to non-Windows systems with the required execution module available. ''' if not salt.utils.platform.is_windows() and 'ip.get_interface' in __salt__: return True return False def routes(name, **kwargs): ''' Manage network interface static routes. name Interface name to apply the route to. kwargs Named routes ''' ret = { 'name': name, 'changes': {}, 'result': True, 'comment': 'Interface {0} routes are up to date.'.format(name), } apply_routes = False if 'test' not in kwargs: kwargs['test'] = __opts__.get('test', False) # Build interface routes try: old = __salt__['ip.get_routes'](name) new = __salt__['ip.build_routes'](name, **kwargs) if kwargs['test']: if old == new: return ret if not old and new: ret['result'] = None ret['comment'] = 'Interface {0} routes are set to be added.'.format(name) return ret elif old != new: diff = difflib.unified_diff(old, new, lineterm='') ret['result'] = None ret['comment'] = 'Interface {0} routes are set to be ' \ 'updated:\n{1}'.format(name, '\n'.join(diff)) return ret if not old and new: apply_routes = True ret['comment'] = 'Interface {0} routes added.'.format(name) ret['changes']['network_routes'] = 'Added interface {0} routes.'.format(name) elif old != new: diff = difflib.unified_diff(old, new, lineterm='') apply_routes = True ret['comment'] = 'Interface {0} routes updated.'.format(name) ret['changes']['network_routes'] = '\n'.join(diff) except AttributeError as error: ret['result'] = False ret['comment'] = six.text_type(error) return ret # Apply interface routes if apply_routes: try: __salt__['ip.apply_network_settings'](**kwargs) except AttributeError as error: ret['result'] = False ret['comment'] = six.text_type(error) return ret return ret def system(name, **kwargs): ''' Ensure that global network settings are configured properly. name Custom name to represent this configuration change. kwargs The global parameters for the system. ''' ret = { 'name': name, 'changes': {}, 'result': True, 'comment': 'Global network settings are up to date.', } apply_net_settings = False kwargs['test'] = __opts__['test'] # Build global network settings try: old = __salt__['ip.get_network_settings']() new = __salt__['ip.build_network_settings'](**kwargs) if __opts__['test']: if old == new: return ret if not old and new: ret['result'] = None ret['comment'] = 'Global network settings are set to be added.' return ret elif old != new: diff = difflib.unified_diff(old, new, lineterm='') ret['result'] = None ret['comment'] = 'Global network settings are set to be ' \ 'updated:\n{0}'.format('\n'.join(diff)) return ret if not old and new: apply_net_settings = True ret['changes']['network_settings'] = 'Added global network settings.' elif old != new: diff = difflib.unified_diff(old, new, lineterm='') apply_net_settings = True ret['changes']['network_settings'] = '\n'.join(diff) except AttributeError as error: ret['result'] = False ret['comment'] = six.text_type(error) return ret except KeyError as error: ret['result'] = False ret['comment'] = six.text_type(error) return ret # Apply global network settings if apply_net_settings: try: __salt__['ip.apply_network_settings'](**kwargs) except AttributeError as error: ret['result'] = False ret['comment'] = six.text_type(error) return ret return ret
saltstack/salt
salt/states/network.py
routes
python
def routes(name, **kwargs): ''' Manage network interface static routes. name Interface name to apply the route to. kwargs Named routes ''' ret = { 'name': name, 'changes': {}, 'result': True, 'comment': 'Interface {0} routes are up to date.'.format(name), } apply_routes = False if 'test' not in kwargs: kwargs['test'] = __opts__.get('test', False) # Build interface routes try: old = __salt__['ip.get_routes'](name) new = __salt__['ip.build_routes'](name, **kwargs) if kwargs['test']: if old == new: return ret if not old and new: ret['result'] = None ret['comment'] = 'Interface {0} routes are set to be added.'.format(name) return ret elif old != new: diff = difflib.unified_diff(old, new, lineterm='') ret['result'] = None ret['comment'] = 'Interface {0} routes are set to be ' \ 'updated:\n{1}'.format(name, '\n'.join(diff)) return ret if not old and new: apply_routes = True ret['comment'] = 'Interface {0} routes added.'.format(name) ret['changes']['network_routes'] = 'Added interface {0} routes.'.format(name) elif old != new: diff = difflib.unified_diff(old, new, lineterm='') apply_routes = True ret['comment'] = 'Interface {0} routes updated.'.format(name) ret['changes']['network_routes'] = '\n'.join(diff) except AttributeError as error: ret['result'] = False ret['comment'] = six.text_type(error) return ret # Apply interface routes if apply_routes: try: __salt__['ip.apply_network_settings'](**kwargs) except AttributeError as error: ret['result'] = False ret['comment'] = six.text_type(error) return ret return ret
Manage network interface static routes. name Interface name to apply the route to. kwargs Named routes
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/network.py#L554-L614
null
# -*- coding: utf-8 -*- ''' Configuration of network interfaces =================================== The network module is used to create and manage network settings, interfaces can be set as either managed or ignored. By default all interfaces are ignored unless specified. .. note:: RedHat-based systems (RHEL, CentOS, Scientific, etc.) have been supported since version 2014.1.0. Debian-based systems (Debian, Ubuntu, etc.) have been supported since version 2017.7.0. The following options are not supported: ipaddr_start, and ipaddr_end. Other platforms are not yet supported. .. note:: On Debian-based systems, networking configuration can be specified in `/etc/network/interfaces` or via included files such as (by default) `/etc/network/interfaces.d/*`. This can be problematic for configuration management. It is recommended to use either `file.managed` *or* `network.managed`. If using `network.managed`, it can be useful to ensure `interfaces.d/` is empty. This can be done using: /etc/network/interfaces.d: file.directory: - clean: True .. code-block:: yaml system: network.system: - enabled: True - hostname: server1.example.com - gateway: 192.168.0.1 - gatewaydev: eth0 - nozeroconf: True - nisdomain: example.com - require_reboot: True eth0: network.managed: - enabled: True - type: eth - proto: static - ipaddr: 10.1.0.7 - netmask: 255.255.255.0 - gateway: 10.1.0.1 - enable_ipv6: true - ipv6proto: static - ipv6ipaddrs: - 2001:db8:dead:beef::3/64 - 2001:db8:dead:beef::7/64 - ipv6gateway: 2001:db8:dead:beef::1 - ipv6netmask: 64 - dns: - 8.8.8.8 - 8.8.4.4 eth0-range0: network.managed: - type: eth - ipaddr_start: 192.168.1.1 - ipaddr_end: 192.168.1.10 - clonenum_start: 10 - mtu: 9000 bond0-range0: network.managed: - type: eth - ipaddr_start: 192.168.1.1 - ipaddr_end: 192.168.1.10 - clonenum_start: 10 - mtu: 9000 eth1.0-range0: network.managed: - type: eth - ipaddr_start: 192.168.1.1 - ipaddr_end: 192.168.1.10 - clonenum_start: 10 - vlan: True - mtu: 9000 bond0.1-range0: network.managed: - type: eth - ipaddr_start: 192.168.1.1 - ipaddr_end: 192.168.1.10 - clonenum_start: 10 - vlan: True - mtu: 9000 .. note:: add support of ranged interfaces (vlan, bond and eth) for redhat system, Important:type must be eth. routes: network.routes: - name: eth0 - routes: - name: secure_network ipaddr: 10.2.0.0 netmask: 255.255.255.0 gateway: 10.1.0.3 - name: HQ_network ipaddr: 10.100.0.0 netmask: 255.255.0.0 gateway: 10.1.0.10 eth2: network.managed: - enabled: True - type: slave - master: bond0 eth3: network.managed: - enabled: True - type: slave - master: bond0 eth4: network.managed: - enabled: True - type: eth - proto: dhcp - bridge: br0 eth5: network.managed: - enabled: True - type: eth - proto: dhcp - noifupdown: True # Do not restart the interface # you need to reboot/reconfigure manualy bond0: network.managed: - type: bond - ipaddr: 10.1.0.1 - netmask: 255.255.255.0 - mode: gre - proto: static - dns: - 8.8.8.8 - 8.8.4.4 - enabled: False - slaves: eth2 eth3 - require: - network: eth2 - network: eth3 - miimon: 100 - arp_interval: 250 - downdelay: 200 - lacp_rate: fast - max_bonds: 1 - updelay: 0 - use_carrier: on - hashing-algorithm: layer2 - mtu: 9000 - autoneg: on - speed: 1000 - duplex: full - rx: on - tx: off - sg: on - tso: off - ufo: off - gso: off - gro: off - lro: off bond0.2: network.managed: - type: vlan - ipaddr: 10.1.0.2 - use: - network: bond0 - require: - network: bond0 bond0.3: network.managed: - type: vlan - ipaddr: 10.1.0.3 - use: - network: bond0 - require: - network: bond0 bond0.10: network.managed: - type: vlan - ipaddr: 10.1.0.4 - use: - network: bond0 - require: - network: bond0 bond0.12: network.managed: - type: vlan - ipaddr: 10.1.0.5 - use: - network: bond0 - require: - network: bond0 br0: network.managed: - enabled: True - type: bridge - proto: dhcp - bridge: br0 - delay: 0 - ports: eth4 - bypassfirewall: True - use: - network: eth4 - require: - network: eth4 eth6: network.managed: - type: eth - noifupdown: True # IPv4 - proto: static - ipaddr: 192.168.4.9 - netmask: 255.255.255.0 - gateway: 192.168.4.1 - enable_ipv6: True # IPv6 - ipv6proto: static - ipv6ipaddr: 2001:db8:dead:c0::3 - ipv6netmask: 64 - ipv6gateway: 2001:db8:dead:c0::1 # override shared; makes those options v4-only - ipv6ttl: 15 # Shared - mtu: 1480 - ttl: 18 - dns: - 8.8.8.8 - 8.8.4.4 eth7: - type: eth - proto: static - ipaddr: 10.1.0.7 - netmask: 255.255.255.0 - gateway: 10.1.0.1 - enable_ipv6: True - ipv6proto: static - ipv6ipaddr: 2001:db8:dead:beef::3 - ipv6netmask: 64 - ipv6gateway: 2001:db8:dead:beef::1 - noifupdown: True eth8: network.managed: - enabled: True - type: eth - proto: static - enable_ipv6: true - ipv6proto: static - ipv6ipaddrs: - 2001:db8:dead:beef::3/64 - 2001:db8:dead:beef::7/64 - ipv6gateway: 2001:db8:dead:beef::1 - ipv6netmask: 64 - dns: - 8.8.8.8 - 8.8.4.4 system: network.system: - enabled: True - hostname: server1.example.com - gateway: 192.168.0.1 - gatewaydev: eth0 - nozeroconf: True - nisdomain: example.com - require_reboot: True - apply_hostname: True lo: network.managed: - name: lo - type: eth - proto: loopback - onboot: yes - userctl: no - ipv6_autoconf: no - enable_ipv6: true .. note:: Apply changes to hostname immediately. .. versionadded:: 2015.5.0 system: network.system: - hostname: server2.example.com - apply_hostname: True - retain_settings: True .. note:: Use `retain_settings` to retain current network settings that are not otherwise specified in the state. Particularly useful if only setting the hostname. Default behavior is to delete unspecified network settings. .. versionadded:: 2016.11.0 .. note:: When managing bridged interfaces on a Debian or Ubuntu based system, the ports argument is required. Red Hat systems will ignore the argument. ''' from __future__ import absolute_import, unicode_literals, print_function # Import Python libs import difflib # Import Salt libs import salt.utils.network import salt.utils.platform import salt.loader # Import 3rd party libs from salt.ext import six # Set up logging import logging log = logging.getLogger(__name__) def __virtual__(): ''' Confine this module to non-Windows systems with the required execution module available. ''' if not salt.utils.platform.is_windows() and 'ip.get_interface' in __salt__: return True return False def managed(name, type, enabled=True, **kwargs): ''' Ensure that the named interface is configured properly. name The name of the interface to manage type Type of interface and configuration. enabled Designates the state of this interface. kwargs The IP parameters for this interface. ''' # For this function we are purposefully overwriting a bif # to enhance the user experience. This does not look like # it will cause a problem. Just giving a heads up in case # it does create a problem. ret = { 'name': name, 'changes': {}, 'result': True, 'comment': 'Interface {0} is up to date.'.format(name), } if 'test' not in kwargs: kwargs['test'] = __opts__.get('test', False) # set ranged status apply_ranged_setting = False # Build interface try: old = __salt__['ip.get_interface'](name) new = __salt__['ip.build_interface'](name, type, enabled, **kwargs) if kwargs['test']: if old == new: pass if not old and new: ret['result'] = None ret['comment'] = 'Interface {0} is set to be ' \ 'added.'.format(name) elif old != new: diff = difflib.unified_diff(old, new, lineterm='') ret['result'] = None ret['comment'] = 'Interface {0} is set to be ' \ 'updated:\n{1}'.format(name, '\n'.join(diff)) else: if not old and new: ret['comment'] = 'Interface {0} ' \ 'added.'.format(name) ret['changes']['interface'] = 'Added network interface.' apply_ranged_setting = True elif old != new: diff = difflib.unified_diff(old, new, lineterm='') ret['comment'] = 'Interface {0} ' \ 'updated.'.format(name) ret['changes']['interface'] = '\n'.join(diff) apply_ranged_setting = True except AttributeError as error: ret['result'] = False ret['comment'] = six.text_type(error) return ret # Debian based system can have a type of source # in the interfaces file, we don't ifup or ifdown it if type == 'source': return ret # Setup up bond modprobe script if required if type == 'bond': try: old = __salt__['ip.get_bond'](name) new = __salt__['ip.build_bond'](name, **kwargs) if kwargs['test']: if not old and new: ret['result'] = None ret['comment'] = 'Bond interface {0} is set to be ' \ 'added.'.format(name) elif old != new: diff = difflib.unified_diff(old, new, lineterm='') ret['result'] = None ret['comment'] = 'Bond interface {0} is set to be ' \ 'updated:\n{1}'.format(name, '\n'.join(diff)) else: if not old and new: ret['comment'] = 'Bond interface {0} ' \ 'added.'.format(name) ret['changes']['bond'] = 'Added bond {0}.'.format(name) apply_ranged_setting = True elif old != new: diff = difflib.unified_diff(old, new, lineterm='') ret['comment'] = 'Bond interface {0} ' \ 'updated.'.format(name) ret['changes']['bond'] = '\n'.join(diff) apply_ranged_setting = True except AttributeError as error: #TODO Add a way of reversing the interface changes. ret['result'] = False ret['comment'] = six.text_type(error) return ret if kwargs['test']: return ret # For Redhat/Centos ranged network if "range" in name: if apply_ranged_setting: try: ret['result'] = __salt__['service.restart']('network') ret['comment'] = "network restarted for change of ranged interfaces" return ret except Exception as error: ret['result'] = False ret['comment'] = six.text_type(error) return ret ret['result'] = True ret['comment'] = "no change, passing it" return ret # Bring up/shutdown interface try: # Get Interface current status interfaces = salt.utils.network.interfaces() interface_status = False if name in interfaces: interface_status = interfaces[name].get('up') else: for iface in interfaces: if 'secondary' in interfaces[iface]: for second in interfaces[iface]['secondary']: if second.get('label', '') == name: interface_status = True if enabled: if 'noifupdown' not in kwargs: if interface_status: if ret['changes']: # Interface should restart to validate if it's up __salt__['ip.down'](name, type) __salt__['ip.up'](name, type) ret['changes']['status'] = 'Interface {0} restart to validate'.format(name) else: __salt__['ip.up'](name, type) ret['changes']['status'] = 'Interface {0} is up'.format(name) else: if 'noifupdown' not in kwargs: if interface_status: __salt__['ip.down'](name, type) ret['changes']['status'] = 'Interface {0} down'.format(name) except Exception as error: ret['result'] = False ret['comment'] = six.text_type(error) return ret # Try to enslave bonding interfaces after master was created if type == 'bond' and 'noifupdown' not in kwargs: if 'slaves' in kwargs and kwargs['slaves']: # Check that there are new slaves for this master present_slaves = __salt__['cmd.run']( ['cat', '/sys/class/net/{0}/bonding/slaves'.format(name)]).split() desired_slaves = kwargs['slaves'].split() missing_slaves = set(desired_slaves) - set(present_slaves) # Enslave only slaves missing in master if missing_slaves: ifenslave_path = __salt__['cmd.run'](['which', 'ifenslave']).strip() if ifenslave_path: log.info("Adding slaves '%s' to the master %s", ' '.join(missing_slaves), name) cmd = [ifenslave_path, name] + list(missing_slaves) __salt__['cmd.run'](cmd, python_shell=False) else: log.error("Command 'ifenslave' not found") ret['changes']['enslave'] = ( "Added slaves '{0}' to master '{1}'" .format(' '.join(missing_slaves), name)) else: log.info("All slaves '%s' are already added to the master %s" ", no actions required", ' '.join(missing_slaves), name) if enabled and interface_status: # Interface was restarted, return return ret # TODO: create saltutil.refresh_grains that fires events to the minion daemon grains_info = salt.loader.grains(__opts__, True) __grains__.update(grains_info) __salt__['saltutil.refresh_modules']() return ret def system(name, **kwargs): ''' Ensure that global network settings are configured properly. name Custom name to represent this configuration change. kwargs The global parameters for the system. ''' ret = { 'name': name, 'changes': {}, 'result': True, 'comment': 'Global network settings are up to date.', } apply_net_settings = False kwargs['test'] = __opts__['test'] # Build global network settings try: old = __salt__['ip.get_network_settings']() new = __salt__['ip.build_network_settings'](**kwargs) if __opts__['test']: if old == new: return ret if not old and new: ret['result'] = None ret['comment'] = 'Global network settings are set to be added.' return ret elif old != new: diff = difflib.unified_diff(old, new, lineterm='') ret['result'] = None ret['comment'] = 'Global network settings are set to be ' \ 'updated:\n{0}'.format('\n'.join(diff)) return ret if not old and new: apply_net_settings = True ret['changes']['network_settings'] = 'Added global network settings.' elif old != new: diff = difflib.unified_diff(old, new, lineterm='') apply_net_settings = True ret['changes']['network_settings'] = '\n'.join(diff) except AttributeError as error: ret['result'] = False ret['comment'] = six.text_type(error) return ret except KeyError as error: ret['result'] = False ret['comment'] = six.text_type(error) return ret # Apply global network settings if apply_net_settings: try: __salt__['ip.apply_network_settings'](**kwargs) except AttributeError as error: ret['result'] = False ret['comment'] = six.text_type(error) return ret return ret
saltstack/salt
salt/states/network.py
system
python
def system(name, **kwargs): ''' Ensure that global network settings are configured properly. name Custom name to represent this configuration change. kwargs The global parameters for the system. ''' ret = { 'name': name, 'changes': {}, 'result': True, 'comment': 'Global network settings are up to date.', } apply_net_settings = False kwargs['test'] = __opts__['test'] # Build global network settings try: old = __salt__['ip.get_network_settings']() new = __salt__['ip.build_network_settings'](**kwargs) if __opts__['test']: if old == new: return ret if not old and new: ret['result'] = None ret['comment'] = 'Global network settings are set to be added.' return ret elif old != new: diff = difflib.unified_diff(old, new, lineterm='') ret['result'] = None ret['comment'] = 'Global network settings are set to be ' \ 'updated:\n{0}'.format('\n'.join(diff)) return ret if not old and new: apply_net_settings = True ret['changes']['network_settings'] = 'Added global network settings.' elif old != new: diff = difflib.unified_diff(old, new, lineterm='') apply_net_settings = True ret['changes']['network_settings'] = '\n'.join(diff) except AttributeError as error: ret['result'] = False ret['comment'] = six.text_type(error) return ret except KeyError as error: ret['result'] = False ret['comment'] = six.text_type(error) return ret # Apply global network settings if apply_net_settings: try: __salt__['ip.apply_network_settings'](**kwargs) except AttributeError as error: ret['result'] = False ret['comment'] = six.text_type(error) return ret return ret
Ensure that global network settings are configured properly. name Custom name to represent this configuration change. kwargs The global parameters for the system.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/network.py#L617-L678
null
# -*- coding: utf-8 -*- ''' Configuration of network interfaces =================================== The network module is used to create and manage network settings, interfaces can be set as either managed or ignored. By default all interfaces are ignored unless specified. .. note:: RedHat-based systems (RHEL, CentOS, Scientific, etc.) have been supported since version 2014.1.0. Debian-based systems (Debian, Ubuntu, etc.) have been supported since version 2017.7.0. The following options are not supported: ipaddr_start, and ipaddr_end. Other platforms are not yet supported. .. note:: On Debian-based systems, networking configuration can be specified in `/etc/network/interfaces` or via included files such as (by default) `/etc/network/interfaces.d/*`. This can be problematic for configuration management. It is recommended to use either `file.managed` *or* `network.managed`. If using `network.managed`, it can be useful to ensure `interfaces.d/` is empty. This can be done using: /etc/network/interfaces.d: file.directory: - clean: True .. code-block:: yaml system: network.system: - enabled: True - hostname: server1.example.com - gateway: 192.168.0.1 - gatewaydev: eth0 - nozeroconf: True - nisdomain: example.com - require_reboot: True eth0: network.managed: - enabled: True - type: eth - proto: static - ipaddr: 10.1.0.7 - netmask: 255.255.255.0 - gateway: 10.1.0.1 - enable_ipv6: true - ipv6proto: static - ipv6ipaddrs: - 2001:db8:dead:beef::3/64 - 2001:db8:dead:beef::7/64 - ipv6gateway: 2001:db8:dead:beef::1 - ipv6netmask: 64 - dns: - 8.8.8.8 - 8.8.4.4 eth0-range0: network.managed: - type: eth - ipaddr_start: 192.168.1.1 - ipaddr_end: 192.168.1.10 - clonenum_start: 10 - mtu: 9000 bond0-range0: network.managed: - type: eth - ipaddr_start: 192.168.1.1 - ipaddr_end: 192.168.1.10 - clonenum_start: 10 - mtu: 9000 eth1.0-range0: network.managed: - type: eth - ipaddr_start: 192.168.1.1 - ipaddr_end: 192.168.1.10 - clonenum_start: 10 - vlan: True - mtu: 9000 bond0.1-range0: network.managed: - type: eth - ipaddr_start: 192.168.1.1 - ipaddr_end: 192.168.1.10 - clonenum_start: 10 - vlan: True - mtu: 9000 .. note:: add support of ranged interfaces (vlan, bond and eth) for redhat system, Important:type must be eth. routes: network.routes: - name: eth0 - routes: - name: secure_network ipaddr: 10.2.0.0 netmask: 255.255.255.0 gateway: 10.1.0.3 - name: HQ_network ipaddr: 10.100.0.0 netmask: 255.255.0.0 gateway: 10.1.0.10 eth2: network.managed: - enabled: True - type: slave - master: bond0 eth3: network.managed: - enabled: True - type: slave - master: bond0 eth4: network.managed: - enabled: True - type: eth - proto: dhcp - bridge: br0 eth5: network.managed: - enabled: True - type: eth - proto: dhcp - noifupdown: True # Do not restart the interface # you need to reboot/reconfigure manualy bond0: network.managed: - type: bond - ipaddr: 10.1.0.1 - netmask: 255.255.255.0 - mode: gre - proto: static - dns: - 8.8.8.8 - 8.8.4.4 - enabled: False - slaves: eth2 eth3 - require: - network: eth2 - network: eth3 - miimon: 100 - arp_interval: 250 - downdelay: 200 - lacp_rate: fast - max_bonds: 1 - updelay: 0 - use_carrier: on - hashing-algorithm: layer2 - mtu: 9000 - autoneg: on - speed: 1000 - duplex: full - rx: on - tx: off - sg: on - tso: off - ufo: off - gso: off - gro: off - lro: off bond0.2: network.managed: - type: vlan - ipaddr: 10.1.0.2 - use: - network: bond0 - require: - network: bond0 bond0.3: network.managed: - type: vlan - ipaddr: 10.1.0.3 - use: - network: bond0 - require: - network: bond0 bond0.10: network.managed: - type: vlan - ipaddr: 10.1.0.4 - use: - network: bond0 - require: - network: bond0 bond0.12: network.managed: - type: vlan - ipaddr: 10.1.0.5 - use: - network: bond0 - require: - network: bond0 br0: network.managed: - enabled: True - type: bridge - proto: dhcp - bridge: br0 - delay: 0 - ports: eth4 - bypassfirewall: True - use: - network: eth4 - require: - network: eth4 eth6: network.managed: - type: eth - noifupdown: True # IPv4 - proto: static - ipaddr: 192.168.4.9 - netmask: 255.255.255.0 - gateway: 192.168.4.1 - enable_ipv6: True # IPv6 - ipv6proto: static - ipv6ipaddr: 2001:db8:dead:c0::3 - ipv6netmask: 64 - ipv6gateway: 2001:db8:dead:c0::1 # override shared; makes those options v4-only - ipv6ttl: 15 # Shared - mtu: 1480 - ttl: 18 - dns: - 8.8.8.8 - 8.8.4.4 eth7: - type: eth - proto: static - ipaddr: 10.1.0.7 - netmask: 255.255.255.0 - gateway: 10.1.0.1 - enable_ipv6: True - ipv6proto: static - ipv6ipaddr: 2001:db8:dead:beef::3 - ipv6netmask: 64 - ipv6gateway: 2001:db8:dead:beef::1 - noifupdown: True eth8: network.managed: - enabled: True - type: eth - proto: static - enable_ipv6: true - ipv6proto: static - ipv6ipaddrs: - 2001:db8:dead:beef::3/64 - 2001:db8:dead:beef::7/64 - ipv6gateway: 2001:db8:dead:beef::1 - ipv6netmask: 64 - dns: - 8.8.8.8 - 8.8.4.4 system: network.system: - enabled: True - hostname: server1.example.com - gateway: 192.168.0.1 - gatewaydev: eth0 - nozeroconf: True - nisdomain: example.com - require_reboot: True - apply_hostname: True lo: network.managed: - name: lo - type: eth - proto: loopback - onboot: yes - userctl: no - ipv6_autoconf: no - enable_ipv6: true .. note:: Apply changes to hostname immediately. .. versionadded:: 2015.5.0 system: network.system: - hostname: server2.example.com - apply_hostname: True - retain_settings: True .. note:: Use `retain_settings` to retain current network settings that are not otherwise specified in the state. Particularly useful if only setting the hostname. Default behavior is to delete unspecified network settings. .. versionadded:: 2016.11.0 .. note:: When managing bridged interfaces on a Debian or Ubuntu based system, the ports argument is required. Red Hat systems will ignore the argument. ''' from __future__ import absolute_import, unicode_literals, print_function # Import Python libs import difflib # Import Salt libs import salt.utils.network import salt.utils.platform import salt.loader # Import 3rd party libs from salt.ext import six # Set up logging import logging log = logging.getLogger(__name__) def __virtual__(): ''' Confine this module to non-Windows systems with the required execution module available. ''' if not salt.utils.platform.is_windows() and 'ip.get_interface' in __salt__: return True return False def managed(name, type, enabled=True, **kwargs): ''' Ensure that the named interface is configured properly. name The name of the interface to manage type Type of interface and configuration. enabled Designates the state of this interface. kwargs The IP parameters for this interface. ''' # For this function we are purposefully overwriting a bif # to enhance the user experience. This does not look like # it will cause a problem. Just giving a heads up in case # it does create a problem. ret = { 'name': name, 'changes': {}, 'result': True, 'comment': 'Interface {0} is up to date.'.format(name), } if 'test' not in kwargs: kwargs['test'] = __opts__.get('test', False) # set ranged status apply_ranged_setting = False # Build interface try: old = __salt__['ip.get_interface'](name) new = __salt__['ip.build_interface'](name, type, enabled, **kwargs) if kwargs['test']: if old == new: pass if not old and new: ret['result'] = None ret['comment'] = 'Interface {0} is set to be ' \ 'added.'.format(name) elif old != new: diff = difflib.unified_diff(old, new, lineterm='') ret['result'] = None ret['comment'] = 'Interface {0} is set to be ' \ 'updated:\n{1}'.format(name, '\n'.join(diff)) else: if not old and new: ret['comment'] = 'Interface {0} ' \ 'added.'.format(name) ret['changes']['interface'] = 'Added network interface.' apply_ranged_setting = True elif old != new: diff = difflib.unified_diff(old, new, lineterm='') ret['comment'] = 'Interface {0} ' \ 'updated.'.format(name) ret['changes']['interface'] = '\n'.join(diff) apply_ranged_setting = True except AttributeError as error: ret['result'] = False ret['comment'] = six.text_type(error) return ret # Debian based system can have a type of source # in the interfaces file, we don't ifup or ifdown it if type == 'source': return ret # Setup up bond modprobe script if required if type == 'bond': try: old = __salt__['ip.get_bond'](name) new = __salt__['ip.build_bond'](name, **kwargs) if kwargs['test']: if not old and new: ret['result'] = None ret['comment'] = 'Bond interface {0} is set to be ' \ 'added.'.format(name) elif old != new: diff = difflib.unified_diff(old, new, lineterm='') ret['result'] = None ret['comment'] = 'Bond interface {0} is set to be ' \ 'updated:\n{1}'.format(name, '\n'.join(diff)) else: if not old and new: ret['comment'] = 'Bond interface {0} ' \ 'added.'.format(name) ret['changes']['bond'] = 'Added bond {0}.'.format(name) apply_ranged_setting = True elif old != new: diff = difflib.unified_diff(old, new, lineterm='') ret['comment'] = 'Bond interface {0} ' \ 'updated.'.format(name) ret['changes']['bond'] = '\n'.join(diff) apply_ranged_setting = True except AttributeError as error: #TODO Add a way of reversing the interface changes. ret['result'] = False ret['comment'] = six.text_type(error) return ret if kwargs['test']: return ret # For Redhat/Centos ranged network if "range" in name: if apply_ranged_setting: try: ret['result'] = __salt__['service.restart']('network') ret['comment'] = "network restarted for change of ranged interfaces" return ret except Exception as error: ret['result'] = False ret['comment'] = six.text_type(error) return ret ret['result'] = True ret['comment'] = "no change, passing it" return ret # Bring up/shutdown interface try: # Get Interface current status interfaces = salt.utils.network.interfaces() interface_status = False if name in interfaces: interface_status = interfaces[name].get('up') else: for iface in interfaces: if 'secondary' in interfaces[iface]: for second in interfaces[iface]['secondary']: if second.get('label', '') == name: interface_status = True if enabled: if 'noifupdown' not in kwargs: if interface_status: if ret['changes']: # Interface should restart to validate if it's up __salt__['ip.down'](name, type) __salt__['ip.up'](name, type) ret['changes']['status'] = 'Interface {0} restart to validate'.format(name) else: __salt__['ip.up'](name, type) ret['changes']['status'] = 'Interface {0} is up'.format(name) else: if 'noifupdown' not in kwargs: if interface_status: __salt__['ip.down'](name, type) ret['changes']['status'] = 'Interface {0} down'.format(name) except Exception as error: ret['result'] = False ret['comment'] = six.text_type(error) return ret # Try to enslave bonding interfaces after master was created if type == 'bond' and 'noifupdown' not in kwargs: if 'slaves' in kwargs and kwargs['slaves']: # Check that there are new slaves for this master present_slaves = __salt__['cmd.run']( ['cat', '/sys/class/net/{0}/bonding/slaves'.format(name)]).split() desired_slaves = kwargs['slaves'].split() missing_slaves = set(desired_slaves) - set(present_slaves) # Enslave only slaves missing in master if missing_slaves: ifenslave_path = __salt__['cmd.run'](['which', 'ifenslave']).strip() if ifenslave_path: log.info("Adding slaves '%s' to the master %s", ' '.join(missing_slaves), name) cmd = [ifenslave_path, name] + list(missing_slaves) __salt__['cmd.run'](cmd, python_shell=False) else: log.error("Command 'ifenslave' not found") ret['changes']['enslave'] = ( "Added slaves '{0}' to master '{1}'" .format(' '.join(missing_slaves), name)) else: log.info("All slaves '%s' are already added to the master %s" ", no actions required", ' '.join(missing_slaves), name) if enabled and interface_status: # Interface was restarted, return return ret # TODO: create saltutil.refresh_grains that fires events to the minion daemon grains_info = salt.loader.grains(__opts__, True) __grains__.update(grains_info) __salt__['saltutil.refresh_modules']() return ret def routes(name, **kwargs): ''' Manage network interface static routes. name Interface name to apply the route to. kwargs Named routes ''' ret = { 'name': name, 'changes': {}, 'result': True, 'comment': 'Interface {0} routes are up to date.'.format(name), } apply_routes = False if 'test' not in kwargs: kwargs['test'] = __opts__.get('test', False) # Build interface routes try: old = __salt__['ip.get_routes'](name) new = __salt__['ip.build_routes'](name, **kwargs) if kwargs['test']: if old == new: return ret if not old and new: ret['result'] = None ret['comment'] = 'Interface {0} routes are set to be added.'.format(name) return ret elif old != new: diff = difflib.unified_diff(old, new, lineterm='') ret['result'] = None ret['comment'] = 'Interface {0} routes are set to be ' \ 'updated:\n{1}'.format(name, '\n'.join(diff)) return ret if not old and new: apply_routes = True ret['comment'] = 'Interface {0} routes added.'.format(name) ret['changes']['network_routes'] = 'Added interface {0} routes.'.format(name) elif old != new: diff = difflib.unified_diff(old, new, lineterm='') apply_routes = True ret['comment'] = 'Interface {0} routes updated.'.format(name) ret['changes']['network_routes'] = '\n'.join(diff) except AttributeError as error: ret['result'] = False ret['comment'] = six.text_type(error) return ret # Apply interface routes if apply_routes: try: __salt__['ip.apply_network_settings'](**kwargs) except AttributeError as error: ret['result'] = False ret['comment'] = six.text_type(error) return ret return ret
saltstack/salt
salt/states/zabbix_mediatype.py
present
python
def present(name, mediatype, **kwargs): ''' Creates new mediatype. NOTE: This function accepts all standard mediatype properties: keyword argument names differ depending on your zabbix version, see: https://www.zabbix.com/documentation/3.0/manual/api/reference/host/object#host_inventory :param name: name of the mediatype :param _connection_user: Optional - zabbix user (can also be set in opts or pillar, see module's docstring) :param _connection_password: Optional - zabbix password (can also be set in opts or pillar, see module's docstring) :param _connection_url: Optional - url of zabbix frontend (can also be set in opts, pillar, see module's docstring) .. code-block:: yaml make_new_mediatype: zabbix_mediatype.present: - name: 'Email' - mediatype: 0 - smtp_server: smtp.example.com - smtp_hello: zabbix.example.com - smtp_email: zabbix@example.com ''' connection_args = {} if '_connection_user' in kwargs: connection_args['_connection_user'] = kwargs['_connection_user'] if '_connection_password' in kwargs: connection_args['_connection_password'] = kwargs['_connection_password'] if '_connection_url' in kwargs: connection_args['_connection_url'] = kwargs['_connection_url'] ret = {'name': name, 'changes': {}, 'result': False, 'comment': ''} # Comment and change messages comment_mediatype_created = 'Mediatype {0} created.'.format(name) comment_mediatype_updated = 'Mediatype {0} updated.'.format(name) comment_mediatype_notcreated = 'Unable to create mediatype: {0}. '.format(name) comment_mediatype_exists = 'Mediatype {0} already exists.'.format(name) changes_mediatype_created = {name: {'old': 'Mediatype {0} does not exist.'.format(name), 'new': 'Mediatype {0} created.'.format(name), } } # Zabbix API expects script parameters as a string of arguments seperated by newline characters if 'exec_params' in kwargs: if isinstance(kwargs['exec_params'], list): kwargs['exec_params'] = '\n'.join(kwargs['exec_params'])+'\n' else: kwargs['exec_params'] = six.text_type(kwargs['exec_params'])+'\n' mediatype_exists = __salt__['zabbix.mediatype_get'](name, **connection_args) if mediatype_exists: mediatypeobj = mediatype_exists[0] mediatypeid = int(mediatypeobj['mediatypeid']) update_email = False update_email_port = False update_email_security = False update_email_verify_peer = False update_email_verify_host = False update_email_auth = False update_script = False update_script_params = False update_sms = False update_jabber = False update_eztext = False update_status = False if int(mediatype) == 0 and 'smtp_server' in kwargs and 'smtp_helo' in kwargs and 'smtp_email' in kwargs: if (int(mediatype) != int(mediatypeobj['type']) or kwargs['smtp_server'] != mediatypeobj['smtp_server'] or kwargs['smtp_email'] != mediatypeobj['smtp_email'] or kwargs['smtp_helo'] != mediatypeobj['smtp_helo']): update_email = True if int(mediatype) == 0 and 'smtp_port' in kwargs: if int(kwargs['smtp_port']) != int(mediatypeobj['smtp_port']): update_email_port = True if int(mediatype) == 0 and 'smtp_security' in kwargs: if int(kwargs['smtp_security']) != int(mediatypeobj['smtp_security']): update_email_security = True if int(mediatype) == 0 and 'smtp_verify_peer' in kwargs: if int(kwargs['smtp_verify_peer']) != int(mediatypeobj['smtp_verify_peer']): update_email_verify_peer = True if int(mediatype) == 0 and 'smtp_verify_host' in kwargs: if int(kwargs['smtp_verify_host']) != int(mediatypeobj['smtp_verify_host']): update_email_verify_host = True if int(mediatype) == 0 and 'smtp_authentication' in kwargs and 'username' in kwargs and 'passwd' in kwargs: if (int(kwargs['smtp_authentication']) != int(mediatypeobj['smtp_authentication']) or kwargs['username'] != mediatypeobj['username'] or kwargs['passwd'] != mediatypeobj['passwd']): update_email_auth = True if int(mediatype) == 1 and 'exec_path' in kwargs: if (int(mediatype) != int(mediatypeobj['type']) or kwargs['exec_path'] != mediatypeobj['exec_path']): update_script = True if int(mediatype) == 1 and 'exec_params' in kwargs: if kwargs['exec_params'] != mediatypeobj['exec_params']: update_script_params = True if int(mediatype) == 2 and 'gsm_modem' in kwargs: if (int(mediatype) != int(mediatypeobj['type']) or kwargs['gsm_modem'] != mediatypeobj['gsm_modem']): update_sms = True if int(mediatype) == 3 and 'username' in kwargs and 'passwd' in kwargs: if (int(mediatype) != int(mediatypeobj['type']) or kwargs['username'] != mediatypeobj['username'] or kwargs['passwd'] != mediatypeobj['passwd']): update_jabber = True if int(mediatype) == 100 and 'username' in kwargs and 'passwd' in kwargs and 'exec_path' in kwargs: if (int(mediatype) != int(mediatypeobj['type']) or kwargs['username'] != mediatypeobj['username'] or kwargs['passwd'] != mediatypeobj['passwd'] or kwargs['exec_path'] != mediatypeobj['exec_path']): update_eztext = True if 'status' in kwargs: if int(kwargs['status']) != int(mediatypeobj['status']): update_status = True # Dry run, test=true mode if __opts__['test']: if mediatype_exists: if update_status: ret['result'] = None ret['comment'] = comment_mediatype_updated else: ret['result'] = True ret['comment'] = comment_mediatype_exists else: ret['result'] = None ret['comment'] = comment_mediatype_created return ret error = [] if mediatype_exists: if (update_email or update_email_port or update_email_security or update_email_verify_peer or update_email_verify_host or update_email_auth or update_script or update_script_params or update_sms or update_jabber or update_eztext or update_status): ret['result'] = True ret['comment'] = comment_mediatype_updated if update_email: updated_email = __salt__['zabbix.mediatype_update'](mediatypeid, type=mediatype, smtp_server=kwargs['smtp_server'], smtp_helo=kwargs['smtp_helo'], smtp_email=kwargs['smtp_email'], **connection_args) if 'error' in updated_email: error.append(updated_email['error']) else: ret['changes']['smtp_server'] = kwargs['smtp_server'] ret['changes']['smtp_helo'] = kwargs['smtp_helo'] ret['changes']['smtp_email'] = kwargs['smtp_email'] if update_email_port: updated_email_port = __salt__['zabbix.mediatype_update'](mediatypeid, smtp_port=kwargs['smtp_port'], **connection_args) if 'error' in updated_email_port: error.append(updated_email_port['error']) else: ret['changes']['smtp_port'] = kwargs['smtp_port'] if update_email_security: updated_email_security = __salt__['zabbix.mediatype_update'](mediatypeid, smtp_security=kwargs['smtp_security'], **connection_args) if 'error' in updated_email_security: error.append(updated_email_security['error']) else: ret['changes']['smtp_security'] = kwargs['smtp_security'] if update_email_verify_peer: updated_email_verify_peer = __salt__['zabbix.mediatype_update'](mediatypeid, smtp_verify_peer=kwargs['smtp_verify_peer'], **connection_args) if 'error' in updated_email_verify_peer: error.append(updated_email_verify_peer['error']) else: ret['changes']['smtp_verify_peer'] = kwargs['smtp_verify_peer'] if update_email_verify_host: updated_email_verify_host = __salt__['zabbix.mediatype_update'](mediatypeid, smtp_verify_host=kwargs['smtp_verify_host'], **connection_args) if 'error' in updated_email_verify_host: error.append(updated_email_verify_host['error']) else: ret['changes']['smtp_verify_host'] = kwargs['smtp_verify_host'] if update_email_auth: updated_email_auth = __salt__['zabbix.mediatype_update'](mediatypeid, username=kwargs['username'], passwd=kwargs['passwd'], smtp_authentication=kwargs['smtp_authentication'], **connection_args) if 'error' in updated_email_auth: error.append(updated_email_auth['error']) else: ret['changes']['smtp_authentication'] = kwargs['smtp_authentication'] ret['changes']['username'] = kwargs['username'] if update_script: updated_script = __salt__['zabbix.mediatype_update'](mediatypeid, type=mediatype, exec_path=kwargs['exec_path'], **connection_args) if 'error' in updated_script: error.append(updated_script['error']) else: ret['changes']['exec_path'] = kwargs['exec_path'] if update_script_params: updated_script_params = __salt__['zabbix.mediatype_update'](mediatypeid, exec_params=kwargs['exec_params'], **connection_args) if 'error' in updated_script_params: error.append(updated_script['error']) else: ret['changes']['exec_params'] = kwargs['exec_params'] if update_sms: updated_sms = __salt__['zabbix.mediatype_update'](mediatypeid, type=mediatype, gsm_modem=kwargs['gsm_modem'], **connection_args) if 'error' in updated_sms: error.append(updated_sms['error']) else: ret['changes']['gsm_modem'] = kwargs['gsm_modem'] if update_jabber: updated_jabber = __salt__['zabbix.mediatype_update'](mediatypeid, type=mediatype, username=kwargs['username'], passwd=kwargs['passwd'], **connection_args) if 'error' in updated_jabber: error.append(updated_jabber['error']) else: ret['changes']['username'] = kwargs['username'] if update_eztext: updated_eztext = __salt__['zabbix.mediatype_update'](mediatypeid, type=mediatype, username=kwargs['username'], passwd=kwargs['passwd'], exec_path=kwargs['exec_path'], **connection_args) if 'error' in updated_eztext: error.append(updated_eztext['error']) else: ret['changes']['username'] = kwargs['username'] ret['changes']['exec_path'] = kwargs['exec_path'] if update_status: updated_status = __salt__['zabbix.mediatype_update'](mediatypeid, status=kwargs['status'], **connection_args) if 'error' in updated_status: error.append(updated_status['error']) else: ret['changes']['status'] = kwargs['status'] else: ret['result'] = True ret['comment'] = comment_mediatype_exists else: mediatype_create = __salt__['zabbix.mediatype_create'](name, mediatype, **kwargs) if 'error' not in mediatype_create: ret['result'] = True ret['comment'] = comment_mediatype_created ret['changes'] = changes_mediatype_created else: ret['result'] = False ret['comment'] = comment_mediatype_notcreated + six.text_type(mediatype_create['error']) # error detected if error: ret['changes'] = {} ret['result'] = False ret['comment'] = six.text_type(error) return ret
Creates new mediatype. NOTE: This function accepts all standard mediatype properties: keyword argument names differ depending on your zabbix version, see: https://www.zabbix.com/documentation/3.0/manual/api/reference/host/object#host_inventory :param name: name of the mediatype :param _connection_user: Optional - zabbix user (can also be set in opts or pillar, see module's docstring) :param _connection_password: Optional - zabbix password (can also be set in opts or pillar, see module's docstring) :param _connection_url: Optional - url of zabbix frontend (can also be set in opts, pillar, see module's docstring) .. code-block:: yaml make_new_mediatype: zabbix_mediatype.present: - name: 'Email' - mediatype: 0 - smtp_server: smtp.example.com - smtp_hello: zabbix.example.com - smtp_email: zabbix@example.com
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/zabbix_mediatype.py#L23-L319
null
# -*- coding: utf-8 -*- ''' Management of Zabbix mediatypes. :codeauthor: Raymond Kuiper <qix@the-wired.net> ''' # Import Python libs from __future__ import absolute_import, print_function, unicode_literals # Import Salt libs from salt.ext import six def __virtual__(): ''' Only make these states available if Zabbix module is available. ''' return 'zabbix.mediatype_create' in __salt__ def absent(name, **kwargs): ''' Ensures that the mediatype does not exist, eventually deletes the mediatype. :param name: name of the mediatype :param _connection_user: Optional - zabbix user (can also be set in opts or pillar, see module's docstring) :param _connection_password: Optional - zabbix password (can also be set in opts or pillar, see module's docstring) :param _connection_url: Optional - url of zabbix frontend (can also be set in opts, pillar, see module's docstring) .. code-block:: yaml delete_mediatype: zabbix_mediatype.absent: - name: 'Email' ''' connection_args = {} if '_connection_user' in kwargs: connection_args['_connection_user'] = kwargs['_connection_user'] if '_connection_password' in kwargs: connection_args['_connection_password'] = kwargs['_connection_password'] if '_connection_url' in kwargs: connection_args['_connection_url'] = kwargs['_connection_url'] ret = {'name': name, 'changes': {}, 'result': False, 'comment': ''} # Comment and change messages comment_mediatype_deleted = 'Mediatype {0} deleted.'.format(name) comment_mediatype_notdeleted = 'Unable to delete mediatype: {0}. '.format(name) comment_mediatype_notexists = 'Mediatype {0} does not exist.'.format(name) changes_mediatype_deleted = {name: {'old': 'Mediatype {0} exists.'.format(name), 'new': 'Mediatype {0} deleted.'.format(name), } } mediatype_exists = __salt__['zabbix.mediatype_get'](name, **connection_args) # Dry run, test=true mode if __opts__['test']: if not mediatype_exists: ret['result'] = True ret['comment'] = comment_mediatype_notexists else: ret['result'] = None ret['comment'] = comment_mediatype_deleted return ret if not mediatype_exists: ret['result'] = True ret['comment'] = comment_mediatype_notexists else: try: mediatypeid = mediatype_exists[0]['mediatypeid'] mediatype_delete = __salt__['zabbix.mediatype_delete'](mediatypeid, **connection_args) except KeyError: mediatype_delete = False if mediatype_delete and 'error' not in mediatype_delete: ret['result'] = True ret['comment'] = comment_mediatype_deleted ret['changes'] = changes_mediatype_deleted else: ret['result'] = False ret['comment'] = comment_mediatype_notdeleted + six.text_type(mediatype_delete['error']) return ret
saltstack/salt
salt/states/zabbix_mediatype.py
absent
python
def absent(name, **kwargs): ''' Ensures that the mediatype does not exist, eventually deletes the mediatype. :param name: name of the mediatype :param _connection_user: Optional - zabbix user (can also be set in opts or pillar, see module's docstring) :param _connection_password: Optional - zabbix password (can also be set in opts or pillar, see module's docstring) :param _connection_url: Optional - url of zabbix frontend (can also be set in opts, pillar, see module's docstring) .. code-block:: yaml delete_mediatype: zabbix_mediatype.absent: - name: 'Email' ''' connection_args = {} if '_connection_user' in kwargs: connection_args['_connection_user'] = kwargs['_connection_user'] if '_connection_password' in kwargs: connection_args['_connection_password'] = kwargs['_connection_password'] if '_connection_url' in kwargs: connection_args['_connection_url'] = kwargs['_connection_url'] ret = {'name': name, 'changes': {}, 'result': False, 'comment': ''} # Comment and change messages comment_mediatype_deleted = 'Mediatype {0} deleted.'.format(name) comment_mediatype_notdeleted = 'Unable to delete mediatype: {0}. '.format(name) comment_mediatype_notexists = 'Mediatype {0} does not exist.'.format(name) changes_mediatype_deleted = {name: {'old': 'Mediatype {0} exists.'.format(name), 'new': 'Mediatype {0} deleted.'.format(name), } } mediatype_exists = __salt__['zabbix.mediatype_get'](name, **connection_args) # Dry run, test=true mode if __opts__['test']: if not mediatype_exists: ret['result'] = True ret['comment'] = comment_mediatype_notexists else: ret['result'] = None ret['comment'] = comment_mediatype_deleted return ret if not mediatype_exists: ret['result'] = True ret['comment'] = comment_mediatype_notexists else: try: mediatypeid = mediatype_exists[0]['mediatypeid'] mediatype_delete = __salt__['zabbix.mediatype_delete'](mediatypeid, **connection_args) except KeyError: mediatype_delete = False if mediatype_delete and 'error' not in mediatype_delete: ret['result'] = True ret['comment'] = comment_mediatype_deleted ret['changes'] = changes_mediatype_deleted else: ret['result'] = False ret['comment'] = comment_mediatype_notdeleted + six.text_type(mediatype_delete['error']) return ret
Ensures that the mediatype does not exist, eventually deletes the mediatype. :param name: name of the mediatype :param _connection_user: Optional - zabbix user (can also be set in opts or pillar, see module's docstring) :param _connection_password: Optional - zabbix password (can also be set in opts or pillar, see module's docstring) :param _connection_url: Optional - url of zabbix frontend (can also be set in opts, pillar, see module's docstring) .. code-block:: yaml delete_mediatype: zabbix_mediatype.absent: - name: 'Email'
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/zabbix_mediatype.py#L322-L386
null
# -*- coding: utf-8 -*- ''' Management of Zabbix mediatypes. :codeauthor: Raymond Kuiper <qix@the-wired.net> ''' # Import Python libs from __future__ import absolute_import, print_function, unicode_literals # Import Salt libs from salt.ext import six def __virtual__(): ''' Only make these states available if Zabbix module is available. ''' return 'zabbix.mediatype_create' in __salt__ def present(name, mediatype, **kwargs): ''' Creates new mediatype. NOTE: This function accepts all standard mediatype properties: keyword argument names differ depending on your zabbix version, see: https://www.zabbix.com/documentation/3.0/manual/api/reference/host/object#host_inventory :param name: name of the mediatype :param _connection_user: Optional - zabbix user (can also be set in opts or pillar, see module's docstring) :param _connection_password: Optional - zabbix password (can also be set in opts or pillar, see module's docstring) :param _connection_url: Optional - url of zabbix frontend (can also be set in opts, pillar, see module's docstring) .. code-block:: yaml make_new_mediatype: zabbix_mediatype.present: - name: 'Email' - mediatype: 0 - smtp_server: smtp.example.com - smtp_hello: zabbix.example.com - smtp_email: zabbix@example.com ''' connection_args = {} if '_connection_user' in kwargs: connection_args['_connection_user'] = kwargs['_connection_user'] if '_connection_password' in kwargs: connection_args['_connection_password'] = kwargs['_connection_password'] if '_connection_url' in kwargs: connection_args['_connection_url'] = kwargs['_connection_url'] ret = {'name': name, 'changes': {}, 'result': False, 'comment': ''} # Comment and change messages comment_mediatype_created = 'Mediatype {0} created.'.format(name) comment_mediatype_updated = 'Mediatype {0} updated.'.format(name) comment_mediatype_notcreated = 'Unable to create mediatype: {0}. '.format(name) comment_mediatype_exists = 'Mediatype {0} already exists.'.format(name) changes_mediatype_created = {name: {'old': 'Mediatype {0} does not exist.'.format(name), 'new': 'Mediatype {0} created.'.format(name), } } # Zabbix API expects script parameters as a string of arguments seperated by newline characters if 'exec_params' in kwargs: if isinstance(kwargs['exec_params'], list): kwargs['exec_params'] = '\n'.join(kwargs['exec_params'])+'\n' else: kwargs['exec_params'] = six.text_type(kwargs['exec_params'])+'\n' mediatype_exists = __salt__['zabbix.mediatype_get'](name, **connection_args) if mediatype_exists: mediatypeobj = mediatype_exists[0] mediatypeid = int(mediatypeobj['mediatypeid']) update_email = False update_email_port = False update_email_security = False update_email_verify_peer = False update_email_verify_host = False update_email_auth = False update_script = False update_script_params = False update_sms = False update_jabber = False update_eztext = False update_status = False if int(mediatype) == 0 and 'smtp_server' in kwargs and 'smtp_helo' in kwargs and 'smtp_email' in kwargs: if (int(mediatype) != int(mediatypeobj['type']) or kwargs['smtp_server'] != mediatypeobj['smtp_server'] or kwargs['smtp_email'] != mediatypeobj['smtp_email'] or kwargs['smtp_helo'] != mediatypeobj['smtp_helo']): update_email = True if int(mediatype) == 0 and 'smtp_port' in kwargs: if int(kwargs['smtp_port']) != int(mediatypeobj['smtp_port']): update_email_port = True if int(mediatype) == 0 and 'smtp_security' in kwargs: if int(kwargs['smtp_security']) != int(mediatypeobj['smtp_security']): update_email_security = True if int(mediatype) == 0 and 'smtp_verify_peer' in kwargs: if int(kwargs['smtp_verify_peer']) != int(mediatypeobj['smtp_verify_peer']): update_email_verify_peer = True if int(mediatype) == 0 and 'smtp_verify_host' in kwargs: if int(kwargs['smtp_verify_host']) != int(mediatypeobj['smtp_verify_host']): update_email_verify_host = True if int(mediatype) == 0 and 'smtp_authentication' in kwargs and 'username' in kwargs and 'passwd' in kwargs: if (int(kwargs['smtp_authentication']) != int(mediatypeobj['smtp_authentication']) or kwargs['username'] != mediatypeobj['username'] or kwargs['passwd'] != mediatypeobj['passwd']): update_email_auth = True if int(mediatype) == 1 and 'exec_path' in kwargs: if (int(mediatype) != int(mediatypeobj['type']) or kwargs['exec_path'] != mediatypeobj['exec_path']): update_script = True if int(mediatype) == 1 and 'exec_params' in kwargs: if kwargs['exec_params'] != mediatypeobj['exec_params']: update_script_params = True if int(mediatype) == 2 and 'gsm_modem' in kwargs: if (int(mediatype) != int(mediatypeobj['type']) or kwargs['gsm_modem'] != mediatypeobj['gsm_modem']): update_sms = True if int(mediatype) == 3 and 'username' in kwargs and 'passwd' in kwargs: if (int(mediatype) != int(mediatypeobj['type']) or kwargs['username'] != mediatypeobj['username'] or kwargs['passwd'] != mediatypeobj['passwd']): update_jabber = True if int(mediatype) == 100 and 'username' in kwargs and 'passwd' in kwargs and 'exec_path' in kwargs: if (int(mediatype) != int(mediatypeobj['type']) or kwargs['username'] != mediatypeobj['username'] or kwargs['passwd'] != mediatypeobj['passwd'] or kwargs['exec_path'] != mediatypeobj['exec_path']): update_eztext = True if 'status' in kwargs: if int(kwargs['status']) != int(mediatypeobj['status']): update_status = True # Dry run, test=true mode if __opts__['test']: if mediatype_exists: if update_status: ret['result'] = None ret['comment'] = comment_mediatype_updated else: ret['result'] = True ret['comment'] = comment_mediatype_exists else: ret['result'] = None ret['comment'] = comment_mediatype_created return ret error = [] if mediatype_exists: if (update_email or update_email_port or update_email_security or update_email_verify_peer or update_email_verify_host or update_email_auth or update_script or update_script_params or update_sms or update_jabber or update_eztext or update_status): ret['result'] = True ret['comment'] = comment_mediatype_updated if update_email: updated_email = __salt__['zabbix.mediatype_update'](mediatypeid, type=mediatype, smtp_server=kwargs['smtp_server'], smtp_helo=kwargs['smtp_helo'], smtp_email=kwargs['smtp_email'], **connection_args) if 'error' in updated_email: error.append(updated_email['error']) else: ret['changes']['smtp_server'] = kwargs['smtp_server'] ret['changes']['smtp_helo'] = kwargs['smtp_helo'] ret['changes']['smtp_email'] = kwargs['smtp_email'] if update_email_port: updated_email_port = __salt__['zabbix.mediatype_update'](mediatypeid, smtp_port=kwargs['smtp_port'], **connection_args) if 'error' in updated_email_port: error.append(updated_email_port['error']) else: ret['changes']['smtp_port'] = kwargs['smtp_port'] if update_email_security: updated_email_security = __salt__['zabbix.mediatype_update'](mediatypeid, smtp_security=kwargs['smtp_security'], **connection_args) if 'error' in updated_email_security: error.append(updated_email_security['error']) else: ret['changes']['smtp_security'] = kwargs['smtp_security'] if update_email_verify_peer: updated_email_verify_peer = __salt__['zabbix.mediatype_update'](mediatypeid, smtp_verify_peer=kwargs['smtp_verify_peer'], **connection_args) if 'error' in updated_email_verify_peer: error.append(updated_email_verify_peer['error']) else: ret['changes']['smtp_verify_peer'] = kwargs['smtp_verify_peer'] if update_email_verify_host: updated_email_verify_host = __salt__['zabbix.mediatype_update'](mediatypeid, smtp_verify_host=kwargs['smtp_verify_host'], **connection_args) if 'error' in updated_email_verify_host: error.append(updated_email_verify_host['error']) else: ret['changes']['smtp_verify_host'] = kwargs['smtp_verify_host'] if update_email_auth: updated_email_auth = __salt__['zabbix.mediatype_update'](mediatypeid, username=kwargs['username'], passwd=kwargs['passwd'], smtp_authentication=kwargs['smtp_authentication'], **connection_args) if 'error' in updated_email_auth: error.append(updated_email_auth['error']) else: ret['changes']['smtp_authentication'] = kwargs['smtp_authentication'] ret['changes']['username'] = kwargs['username'] if update_script: updated_script = __salt__['zabbix.mediatype_update'](mediatypeid, type=mediatype, exec_path=kwargs['exec_path'], **connection_args) if 'error' in updated_script: error.append(updated_script['error']) else: ret['changes']['exec_path'] = kwargs['exec_path'] if update_script_params: updated_script_params = __salt__['zabbix.mediatype_update'](mediatypeid, exec_params=kwargs['exec_params'], **connection_args) if 'error' in updated_script_params: error.append(updated_script['error']) else: ret['changes']['exec_params'] = kwargs['exec_params'] if update_sms: updated_sms = __salt__['zabbix.mediatype_update'](mediatypeid, type=mediatype, gsm_modem=kwargs['gsm_modem'], **connection_args) if 'error' in updated_sms: error.append(updated_sms['error']) else: ret['changes']['gsm_modem'] = kwargs['gsm_modem'] if update_jabber: updated_jabber = __salt__['zabbix.mediatype_update'](mediatypeid, type=mediatype, username=kwargs['username'], passwd=kwargs['passwd'], **connection_args) if 'error' in updated_jabber: error.append(updated_jabber['error']) else: ret['changes']['username'] = kwargs['username'] if update_eztext: updated_eztext = __salt__['zabbix.mediatype_update'](mediatypeid, type=mediatype, username=kwargs['username'], passwd=kwargs['passwd'], exec_path=kwargs['exec_path'], **connection_args) if 'error' in updated_eztext: error.append(updated_eztext['error']) else: ret['changes']['username'] = kwargs['username'] ret['changes']['exec_path'] = kwargs['exec_path'] if update_status: updated_status = __salt__['zabbix.mediatype_update'](mediatypeid, status=kwargs['status'], **connection_args) if 'error' in updated_status: error.append(updated_status['error']) else: ret['changes']['status'] = kwargs['status'] else: ret['result'] = True ret['comment'] = comment_mediatype_exists else: mediatype_create = __salt__['zabbix.mediatype_create'](name, mediatype, **kwargs) if 'error' not in mediatype_create: ret['result'] = True ret['comment'] = comment_mediatype_created ret['changes'] = changes_mediatype_created else: ret['result'] = False ret['comment'] = comment_mediatype_notcreated + six.text_type(mediatype_create['error']) # error detected if error: ret['changes'] = {} ret['result'] = False ret['comment'] = six.text_type(error) return ret
saltstack/salt
salt/utils/network.py
sanitize_host
python
def sanitize_host(host): ''' Sanitize host string. https://tools.ietf.org/html/rfc1123#section-2.1 ''' RFC952_characters = ascii_letters + digits + ".-" return "".join([c for c in host[0:255] if c in RFC952_characters])
Sanitize host string. https://tools.ietf.org/html/rfc1123#section-2.1
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/network.py#L58-L64
null
# -*- coding: utf-8 -*- ''' Define some generic socket functions for network modules ''' # Import python libs from __future__ import absolute_import, unicode_literals, print_function import itertools import os import re import types import socket import logging import platform import random import subprocess from string import ascii_letters, digits # Import 3rd-party libs from salt.ext import six from salt.ext.six.moves import range # pylint: disable=import-error,redefined-builtin # Attempt to import wmi try: import wmi import salt.utils.winapi except ImportError: pass # Import salt libs import salt.utils.args import salt.utils.files import salt.utils.path import salt.utils.platform import salt.utils.stringutils import salt.utils.zeromq from salt._compat import ipaddress from salt.exceptions import SaltClientError, SaltSystemExit from salt.utils.decorators.jinja import jinja_filter from salt.utils.versions import LooseVersion # inet_pton does not exist in Windows, this is a workaround if salt.utils.platform.is_windows(): from salt.ext import win_inet_pton # pylint: disable=unused-import log = logging.getLogger(__name__) try: import ctypes import ctypes.util libc = ctypes.cdll.LoadLibrary(ctypes.util.find_library("c")) res_init = libc.__res_init except (ImportError, OSError, AttributeError, TypeError): pass # pylint: disable=C0103 def isportopen(host, port): ''' Return status of a port ''' if not 1 <= int(port) <= 65535: return False sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) out = sock.connect_ex((sanitize_host(host), int(port))) return out def host_to_ips(host): ''' Returns a list of IP addresses of a given hostname or None if not found. ''' ips = [] try: for family, socktype, proto, canonname, sockaddr in socket.getaddrinfo( host, 0, socket.AF_UNSPEC, socket.SOCK_STREAM): if family == socket.AF_INET: ip, port = sockaddr elif family == socket.AF_INET6: ip, port, flow_info, scope_id = sockaddr ips.append(ip) if not ips: ips = None except Exception: ips = None return ips def _generate_minion_id(): ''' Get list of possible host names and convention names. :return: ''' # There are three types of hostnames: # 1. Network names. How host is accessed from the network. # 2. Host aliases. They might be not available in all the network or only locally (/etc/hosts) # 3. Convention names, an internal nodename. class DistinctList(list): ''' List, which allows one to append only distinct objects. Needs to work on Python 2.6, because of collections.OrderedDict only since 2.7 version. Override 'filter()' for custom filtering. ''' localhost_matchers = [r'localhost.*', r'ip6-.*', r'127[.]\d', r'0\.0\.0\.0', r'::1.*', r'ipv6-.*', r'fe00::.*', r'fe02::.*', r'1.0.0.*.ip6.arpa'] def append(self, p_object): if p_object and p_object not in self and not self.filter(p_object): super(DistinctList, self).append(p_object) return self def extend(self, iterable): for obj in iterable: self.append(obj) return self def filter(self, element): 'Returns True if element needs to be filtered' for rgx in self.localhost_matchers: if re.match(rgx, element): return True def first(self): return self and self[0] or None hostname = socket.gethostname() hosts = DistinctList().append( salt.utils.stringutils.to_unicode(socket.getfqdn(salt.utils.stringutils.to_bytes(hostname))) ).append(platform.node()).append(hostname) if not hosts: try: for a_nfo in socket.getaddrinfo(hosts.first() or 'localhost', None, socket.AF_INET, socket.SOCK_RAW, socket.IPPROTO_IP, socket.AI_CANONNAME): if len(a_nfo) > 3: hosts.append(a_nfo[3]) except socket.gaierror: log.warning('Cannot resolve address %s info via socket: %s', hosts.first() or 'localhost (N/A)', socket.gaierror) # Universal method for everywhere (Linux, Slowlaris, Windows etc) for f_name in ('/etc/hostname', '/etc/nodename', '/etc/hosts', r'{win}\system32\drivers\etc\hosts'.format(win=os.getenv('WINDIR'))): try: with salt.utils.files.fopen(f_name) as f_hdl: for line in f_hdl: line = salt.utils.stringutils.to_unicode(line) hst = line.strip().split('#')[0].strip().split() if hst: if hst[0][:4] in ('127.', '::1') or len(hst) == 1: hosts.extend(hst) except IOError: pass # include public and private ipaddresses return hosts.extend([addr for addr in ip_addrs() if not ipaddress.ip_address(addr).is_loopback]) def generate_minion_id(): ''' Return only first element of the hostname from all possible list. :return: ''' try: ret = salt.utils.stringutils.to_unicode(_generate_minion_id().first()) except TypeError: ret = None return ret or 'localhost' def get_socket(addr, type=socket.SOCK_STREAM, proto=0): ''' Return a socket object for the addr IP-version agnostic ''' version = ipaddress.ip_address(addr).version if version == 4: family = socket.AF_INET elif version == 6: family = socket.AF_INET6 return socket.socket(family, type, proto) def get_fqhostname(): ''' Returns the fully qualified hostname ''' # try getaddrinfo() fqdn = None try: addrinfo = socket.getaddrinfo( socket.gethostname(), 0, socket.AF_UNSPEC, socket.SOCK_STREAM, socket.SOL_TCP, socket.AI_CANONNAME ) for info in addrinfo: # info struct [family, socktype, proto, canonname, sockaddr] # On Windows `canonname` can be an empty string # This can cause the function to return `None` if len(info) > 3 and info[3]: fqdn = info[3] break except socket.gaierror: pass # NOTE: this used to log.error() but it was later disabled except socket.error as err: log.debug('socket.getaddrinfo() failure while finding fqdn: %s', err) if fqdn is None: fqdn = socket.getfqdn() return fqdn def ip_to_host(ip): ''' Returns the hostname of a given IP ''' try: hostname, aliaslist, ipaddrlist = socket.gethostbyaddr(ip) except Exception as exc: log.debug('salt.utils.network.ip_to_host(%r) failed: %s', ip, exc) hostname = None return hostname # pylint: enable=C0103 def is_reachable_host(entity_name): ''' Returns a bool telling if the entity name is a reachable host (IPv4/IPv6/FQDN/etc). :param hostname: :return: ''' try: assert type(socket.getaddrinfo(entity_name, 0, 0, 0, 0)) == list ret = True except socket.gaierror: ret = False return ret def is_ip(ip): ''' Returns a bool telling if the passed IP is a valid IPv4 or IPv6 address. ''' return is_ipv4(ip) or is_ipv6(ip) def is_ipv4(ip): ''' Returns a bool telling if the value passed to it was a valid IPv4 address ''' try: return ipaddress.ip_address(ip).version == 4 except ValueError: return False def is_ipv6(ip): ''' Returns a bool telling if the value passed to it was a valid IPv6 address ''' try: return ipaddress.ip_address(ip).version == 6 except ValueError: return False def is_subnet(cidr): ''' Returns a bool telling if the passed string is an IPv4 or IPv6 subnet ''' return is_ipv4_subnet(cidr) or is_ipv6_subnet(cidr) def is_ipv4_subnet(cidr): ''' Returns a bool telling if the passed string is an IPv4 subnet ''' try: return '/' in cidr and bool(ipaddress.IPv4Network(cidr)) except Exception: return False def is_ipv6_subnet(cidr): ''' Returns a bool telling if the passed string is an IPv6 subnet ''' try: return '/' in cidr and bool(ipaddress.IPv6Network(cidr)) except Exception: return False @jinja_filter('is_ip') def is_ip_filter(ip, options=None): ''' Returns a bool telling if the passed IP is a valid IPv4 or IPv6 address. ''' return is_ipv4_filter(ip, options=options) or is_ipv6_filter(ip, options=options) def _ip_options_global(ip_obj, version): return not ip_obj.is_private def _ip_options_multicast(ip_obj, version): return ip_obj.is_multicast def _ip_options_loopback(ip_obj, version): return ip_obj.is_loopback def _ip_options_link_local(ip_obj, version): return ip_obj.is_link_local def _ip_options_private(ip_obj, version): return ip_obj.is_private def _ip_options_reserved(ip_obj, version): return ip_obj.is_reserved def _ip_options_site_local(ip_obj, version): if version == 6: return ip_obj.is_site_local return False def _ip_options_unspecified(ip_obj, version): return ip_obj.is_unspecified def _ip_options(ip_obj, version, options=None): # will process and IP options options_fun_map = { 'global': _ip_options_global, 'link-local': _ip_options_link_local, 'linklocal': _ip_options_link_local, 'll': _ip_options_link_local, 'link_local': _ip_options_link_local, 'loopback': _ip_options_loopback, 'lo': _ip_options_loopback, 'multicast': _ip_options_multicast, 'private': _ip_options_private, 'public': _ip_options_global, 'reserved': _ip_options_reserved, 'site-local': _ip_options_site_local, 'sl': _ip_options_site_local, 'site_local': _ip_options_site_local, 'unspecified': _ip_options_unspecified } if not options: return six.text_type(ip_obj) # IP version already checked options_list = [option.strip() for option in options.split(',')] for option, fun in options_fun_map.items(): if option in options_list: fun_res = fun(ip_obj, version) if not fun_res: return None # stop at first failed test # else continue return six.text_type(ip_obj) def _is_ipv(ip, version, options=None): if not version: version = 4 if version not in (4, 6): return None try: ip_obj = ipaddress.ip_address(ip) except ValueError: # maybe it is an IP network try: ip_obj = ipaddress.ip_interface(ip) except ValueError: # nope, still not :( return None if not ip_obj.version == version: return None # has the right version, let's move on return _ip_options(ip_obj, version, options=options) @jinja_filter('is_ipv4') def is_ipv4_filter(ip, options=None): ''' Returns a bool telling if the value passed to it was a valid IPv4 address. ip The IP address. net: False Consider IP addresses followed by netmask. options CSV of options regarding the nature of the IP address. E.g.: loopback, multicast, private etc. ''' _is_ipv4 = _is_ipv(ip, 4, options=options) return isinstance(_is_ipv4, six.string_types) @jinja_filter('is_ipv6') def is_ipv6_filter(ip, options=None): ''' Returns a bool telling if the value passed to it was a valid IPv6 address. ip The IP address. net: False Consider IP addresses followed by netmask. options CSV of options regarding the nature of the IP address. E.g.: loopback, multicast, private etc. ''' _is_ipv6 = _is_ipv(ip, 6, options=options) return isinstance(_is_ipv6, six.string_types) def _ipv_filter(value, version, options=None): if version not in (4, 6): return if isinstance(value, (six.string_types, six.text_type, six.binary_type)): return _is_ipv(value, version, options=options) # calls is_ipv4 or is_ipv6 for `value` elif isinstance(value, (list, tuple, types.GeneratorType)): # calls is_ipv4 or is_ipv6 for each element in the list # os it filters and returns only those elements having the desired IP version return [ _is_ipv(addr, version, options=options) for addr in value if _is_ipv(addr, version, options=options) is not None ] return None @jinja_filter('ipv4') def ipv4(value, options=None): ''' Filters a list and returns IPv4 values only. ''' return _ipv_filter(value, 4, options=options) @jinja_filter('ipv6') def ipv6(value, options=None): ''' Filters a list and returns IPv6 values only. ''' return _ipv_filter(value, 6, options=options) @jinja_filter('ipaddr') def ipaddr(value, options=None): ''' Filters and returns only valid IP objects. ''' ipv4_obj = ipv4(value, options=options) ipv6_obj = ipv6(value, options=options) if ipv4_obj is None or ipv6_obj is None: # an IP address can be either IPv4 either IPv6 # therefofe if the value passed as arg is not a list, at least one of the calls above will return None # if one of them is none, means that we should return only one of them return ipv4_obj or ipv6_obj # one of them else: return ipv4_obj + ipv6_obj # extend lists def _filter_ipaddr(value, options, version=None): ipaddr_filter_out = None if version: if version == 4: ipaddr_filter_out = ipv4(value, options) elif version == 6: ipaddr_filter_out = ipv6(value, options) else: ipaddr_filter_out = ipaddr(value, options) if not ipaddr_filter_out: return if not isinstance(ipaddr_filter_out, (list, tuple, types.GeneratorType)): ipaddr_filter_out = [ipaddr_filter_out] return ipaddr_filter_out @jinja_filter('ip_host') def ip_host(value, options=None, version=None): ''' Returns the interfaces IP address, e.g.: 192.168.0.1/28. ''' ipaddr_filter_out = _filter_ipaddr(value, options=options, version=version) if not ipaddr_filter_out: return if not isinstance(value, (list, tuple, types.GeneratorType)): return six.text_type(ipaddress.ip_interface(ipaddr_filter_out[0])) return [six.text_type(ipaddress.ip_interface(ip_a)) for ip_a in ipaddr_filter_out] def _network_hosts(ip_addr_entry): return [ six.text_type(host) for host in ipaddress.ip_network(ip_addr_entry, strict=False).hosts() ] @jinja_filter('network_hosts') def network_hosts(value, options=None, version=None): ''' Return the list of hosts within a network. .. note:: When running this command with a large IPv6 network, the command will take a long time to gather all of the hosts. ''' ipaddr_filter_out = _filter_ipaddr(value, options=options, version=version) if not ipaddr_filter_out: return if not isinstance(value, (list, tuple, types.GeneratorType)): return _network_hosts(ipaddr_filter_out[0]) return [ _network_hosts(ip_a) for ip_a in ipaddr_filter_out ] def _network_size(ip_addr_entry): return ipaddress.ip_network(ip_addr_entry, strict=False).num_addresses @jinja_filter('network_size') def network_size(value, options=None, version=None): ''' Get the size of a network. ''' ipaddr_filter_out = _filter_ipaddr(value, options=options, version=version) if not ipaddr_filter_out: return if not isinstance(value, (list, tuple, types.GeneratorType)): return _network_size(ipaddr_filter_out[0]) return [ _network_size(ip_a) for ip_a in ipaddr_filter_out ] def natural_ipv4_netmask(ip, fmt='prefixlen'): ''' Returns the "natural" mask of an IPv4 address ''' bits = _ipv4_to_bits(ip) if bits.startswith('11'): mask = '24' elif bits.startswith('1'): mask = '16' else: mask = '8' if fmt == 'netmask': return cidr_to_ipv4_netmask(mask) else: return '/' + mask def rpad_ipv4_network(ip): ''' Returns an IP network address padded with zeros. Ex: '192.168.3' -> '192.168.3.0' '10.209' -> '10.209.0.0' ''' return '.'.join(itertools.islice(itertools.chain(ip.split('.'), '0000'), 0, 4)) def cidr_to_ipv4_netmask(cidr_bits): ''' Returns an IPv4 netmask ''' try: cidr_bits = int(cidr_bits) if not 1 <= cidr_bits <= 32: return '' except ValueError: return '' netmask = '' for idx in range(4): if idx: netmask += '.' if cidr_bits >= 8: netmask += '255' cidr_bits -= 8 else: netmask += '{0:d}'.format(256 - (2 ** (8 - cidr_bits))) cidr_bits = 0 return netmask def _number_of_set_bits_to_ipv4_netmask(set_bits): # pylint: disable=C0103 ''' Returns an IPv4 netmask from the integer representation of that mask. Ex. 0xffffff00 -> '255.255.255.0' ''' return cidr_to_ipv4_netmask(_number_of_set_bits(set_bits)) # pylint: disable=C0103 def _number_of_set_bits(x): ''' Returns the number of bits that are set in a 32bit int ''' # Taken from http://stackoverflow.com/a/4912729. Many thanks! x -= (x >> 1) & 0x55555555 x = ((x >> 2) & 0x33333333) + (x & 0x33333333) x = ((x >> 4) + x) & 0x0f0f0f0f x += x >> 8 x += x >> 16 return x & 0x0000003f # pylint: enable=C0103 def _interfaces_ip(out): ''' Uses ip to return a dictionary of interfaces with various information about each (up/down state, ip address, netmask, and hwaddr) ''' ret = dict() def parse_network(value, cols): ''' Return a tuple of ip, netmask, broadcast based on the current set of cols ''' brd = None scope = None if '/' in value: # we have a CIDR in this address ip, cidr = value.split('/') # pylint: disable=C0103 else: ip = value # pylint: disable=C0103 cidr = 32 if type_ == 'inet': mask = cidr_to_ipv4_netmask(int(cidr)) if 'brd' in cols: brd = cols[cols.index('brd') + 1] elif type_ == 'inet6': mask = cidr if 'scope' in cols: scope = cols[cols.index('scope') + 1] return (ip, mask, brd, scope) groups = re.compile('\r?\n\\d').split(out) for group in groups: iface = None data = dict() for line in group.splitlines(): if ' ' not in line: continue match = re.match(r'^\d*:\s+([\w.\-]+)(?:@)?([\w.\-]+)?:\s+<(.+)>', line) if match: iface, parent, attrs = match.groups() if 'UP' in attrs.split(','): data['up'] = True else: data['up'] = False if parent: data['parent'] = parent continue cols = line.split() if len(cols) >= 2: type_, value = tuple(cols[0:2]) iflabel = cols[-1:][0] if type_ in ('inet', 'inet6'): if 'secondary' not in cols: ipaddr, netmask, broadcast, scope = parse_network(value, cols) if type_ == 'inet': if 'inet' not in data: data['inet'] = list() addr_obj = dict() addr_obj['address'] = ipaddr addr_obj['netmask'] = netmask addr_obj['broadcast'] = broadcast addr_obj['label'] = iflabel data['inet'].append(addr_obj) elif type_ == 'inet6': if 'inet6' not in data: data['inet6'] = list() addr_obj = dict() addr_obj['address'] = ipaddr addr_obj['prefixlen'] = netmask addr_obj['scope'] = scope data['inet6'].append(addr_obj) else: if 'secondary' not in data: data['secondary'] = list() ip_, mask, brd, scp = parse_network(value, cols) data['secondary'].append({ 'type': type_, 'address': ip_, 'netmask': mask, 'broadcast': brd, 'label': iflabel, }) del ip_, mask, brd, scp elif type_.startswith('link'): data['hwaddr'] = value if iface: ret[iface] = data del iface, data return ret def _interfaces_ifconfig(out): ''' Uses ifconfig to return a dictionary of interfaces with various information about each (up/down state, ip address, netmask, and hwaddr) ''' ret = dict() piface = re.compile(r'^([^\s:]+)') pmac = re.compile('.*?(?:HWaddr|ether|address:|lladdr) ([0-9a-fA-F:]+)') if salt.utils.platform.is_sunos(): pip = re.compile(r'.*?(?:inet\s+)([0-9]+\.[0-9]+\.[0-9]+\.[0-9]+)(.*)') pip6 = re.compile('.*?(?:inet6 )([0-9a-fA-F:]+)') pmask6 = re.compile(r'.*?(?:inet6 [0-9a-fA-F:]+/(\d+)).*') else: pip = re.compile(r'.*?(?:inet addr:|inet [^\d]*)(.*?)\s') pip6 = re.compile('.*?(?:inet6 addr: (.*?)/|inet6 )([0-9a-fA-F:]+)') pmask6 = re.compile(r'.*?(?:inet6 addr: [0-9a-fA-F:]+/(\d+)|prefixlen (\d+))(?: Scope:([a-zA-Z]+)| scopeid (0x[0-9a-fA-F]))?') pmask = re.compile(r'.*?(?:Mask:|netmask )(?:((?:0x)?[0-9a-fA-F]{8})|([\d\.]+))') pupdown = re.compile('UP') pbcast = re.compile(r'.*?(?:Bcast:|broadcast )([\d\.]+)') groups = re.compile('\r?\n(?=\\S)').split(out) for group in groups: data = dict() iface = '' updown = False for line in group.splitlines(): miface = piface.match(line) mmac = pmac.match(line) mip = pip.match(line) mip6 = pip6.match(line) mupdown = pupdown.search(line) if miface: iface = miface.group(1) if mmac: data['hwaddr'] = mmac.group(1) if salt.utils.platform.is_sunos(): expand_mac = [] for chunk in data['hwaddr'].split(':'): expand_mac.append('0{0}'.format(chunk) if len(chunk) < 2 else '{0}'.format(chunk)) data['hwaddr'] = ':'.join(expand_mac) if mip: if 'inet' not in data: data['inet'] = list() addr_obj = dict() addr_obj['address'] = mip.group(1) mmask = pmask.match(line) if mmask: if mmask.group(1): mmask = _number_of_set_bits_to_ipv4_netmask( int(mmask.group(1), 16)) else: mmask = mmask.group(2) addr_obj['netmask'] = mmask mbcast = pbcast.match(line) if mbcast: addr_obj['broadcast'] = mbcast.group(1) data['inet'].append(addr_obj) if mupdown: updown = True if mip6: if 'inet6' not in data: data['inet6'] = list() addr_obj = dict() addr_obj['address'] = mip6.group(1) or mip6.group(2) mmask6 = pmask6.match(line) if mmask6: addr_obj['prefixlen'] = mmask6.group(1) or mmask6.group(2) if not salt.utils.platform.is_sunos(): ipv6scope = mmask6.group(3) or mmask6.group(4) addr_obj['scope'] = ipv6scope.lower() if ipv6scope is not None else ipv6scope # SunOS sometimes has ::/0 as inet6 addr when using addrconf if not salt.utils.platform.is_sunos() \ or addr_obj['address'] != '::' \ and addr_obj['prefixlen'] != 0: data['inet6'].append(addr_obj) data['up'] = updown if iface in ret: # SunOS optimization, where interfaces occur twice in 'ifconfig -a' # output with the same name: for ipv4 and then for ipv6 addr family. # Every instance has it's own 'UP' status and we assume that ipv4 # status determines global interface status. # # merge items with higher priority for older values # after that merge the inet and inet6 sub items for both ret[iface] = dict(list(data.items()) + list(ret[iface].items())) if 'inet' in data: ret[iface]['inet'].extend(x for x in data['inet'] if x not in ret[iface]['inet']) if 'inet6' in data: ret[iface]['inet6'].extend(x for x in data['inet6'] if x not in ret[iface]['inet6']) else: ret[iface] = data del data return ret def linux_interfaces(): ''' Obtain interface information for *NIX/BSD variants ''' ifaces = dict() ip_path = salt.utils.path.which('ip') ifconfig_path = None if ip_path else salt.utils.path.which('ifconfig') if ip_path: cmd1 = subprocess.Popen( '{0} link show'.format(ip_path), shell=True, close_fds=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate()[0] cmd2 = subprocess.Popen( '{0} addr show'.format(ip_path), shell=True, close_fds=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate()[0] ifaces = _interfaces_ip("{0}\n{1}".format( salt.utils.stringutils.to_str(cmd1), salt.utils.stringutils.to_str(cmd2))) elif ifconfig_path: cmd = subprocess.Popen( '{0} -a'.format(ifconfig_path), shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate()[0] ifaces = _interfaces_ifconfig(salt.utils.stringutils.to_str(cmd)) return ifaces def _netbsd_interfaces_ifconfig(out): ''' Uses ifconfig to return a dictionary of interfaces with various information about each (up/down state, ip address, netmask, and hwaddr) ''' ret = dict() piface = re.compile(r'^([^\s:]+)') pmac = re.compile('.*?address: ([0-9a-f:]+)') pip = re.compile(r'.*?inet [^\d]*(.*?)/([\d]*)\s') pip6 = re.compile(r'.*?inet6 ([0-9a-f:]+)%([a-zA-Z0-9]*)/([\d]*)\s') pupdown = re.compile('UP') pbcast = re.compile(r'.*?broadcast ([\d\.]+)') groups = re.compile('\r?\n(?=\\S)').split(out) for group in groups: data = dict() iface = '' updown = False for line in group.splitlines(): miface = piface.match(line) mmac = pmac.match(line) mip = pip.match(line) mip6 = pip6.match(line) mupdown = pupdown.search(line) if miface: iface = miface.group(1) if mmac: data['hwaddr'] = mmac.group(1) if mip: if 'inet' not in data: data['inet'] = list() addr_obj = dict() addr_obj['address'] = mip.group(1) mmask = mip.group(2) if mip.group(2): addr_obj['netmask'] = cidr_to_ipv4_netmask(mip.group(2)) mbcast = pbcast.match(line) if mbcast: addr_obj['broadcast'] = mbcast.group(1) data['inet'].append(addr_obj) if mupdown: updown = True if mip6: if 'inet6' not in data: data['inet6'] = list() addr_obj = dict() addr_obj['address'] = mip6.group(1) mmask6 = mip6.group(3) addr_obj['scope'] = mip6.group(2) addr_obj['prefixlen'] = mip6.group(3) data['inet6'].append(addr_obj) data['up'] = updown ret[iface] = data del data return ret def netbsd_interfaces(): ''' Obtain interface information for NetBSD >= 8 where the ifconfig output diverged from other BSD variants (Netmask is now part of the address) ''' # NetBSD versions prior to 8.0 can still use linux_interfaces() if LooseVersion(os.uname()[2]) < LooseVersion('8.0'): return linux_interfaces() ifconfig_path = salt.utils.path.which('ifconfig') cmd = subprocess.Popen( '{0} -a'.format(ifconfig_path), shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate()[0] return _netbsd_interfaces_ifconfig(salt.utils.stringutils.to_str(cmd)) def _interfaces_ipconfig(out): ''' Returns a dictionary of interfaces with various information about each (up/down state, ip address, netmask, and hwaddr) NOTE: This is not used by any function and may be able to be removed in the future. ''' ifaces = dict() iface = None adapter_iface_regex = re.compile(r'adapter (\S.+):$') for line in out.splitlines(): if not line: continue # TODO what does Windows call Infiniband and 10/40gige adapters if line.startswith('Ethernet'): iface = ifaces[adapter_iface_regex.search(line).group(1)] iface['up'] = True addr = None continue if iface: key, val = line.split(',', 1) key = key.strip(' .') val = val.strip() if addr and key == 'Subnet Mask': addr['netmask'] = val elif key in ('IP Address', 'IPv4 Address'): if 'inet' not in iface: iface['inet'] = list() addr = {'address': val.rstrip('(Preferred)'), 'netmask': None, 'broadcast': None} # TODO find the broadcast iface['inet'].append(addr) elif 'IPv6 Address' in key: if 'inet6' not in iface: iface['inet'] = list() # XXX What is the prefixlen!? addr = {'address': val.rstrip('(Preferred)'), 'prefixlen': None} iface['inet6'].append(addr) elif key == 'Physical Address': iface['hwaddr'] = val elif key == 'Media State': # XXX seen used for tunnel adaptors # might be useful iface['up'] = (val != 'Media disconnected') def win_interfaces(): ''' Obtain interface information for Windows systems ''' with salt.utils.winapi.Com(): c = wmi.WMI() ifaces = {} for iface in c.Win32_NetworkAdapterConfiguration(IPEnabled=1): ifaces[iface.Description] = dict() if iface.MACAddress: ifaces[iface.Description]['hwaddr'] = iface.MACAddress if iface.IPEnabled: ifaces[iface.Description]['up'] = True for ip in iface.IPAddress: if '.' in ip: if 'inet' not in ifaces[iface.Description]: ifaces[iface.Description]['inet'] = [] item = {'address': ip, 'label': iface.Description} if iface.DefaultIPGateway: broadcast = next((i for i in iface.DefaultIPGateway if '.' in i), '') if broadcast: item['broadcast'] = broadcast if iface.IPSubnet: netmask = next((i for i in iface.IPSubnet if '.' in i), '') if netmask: item['netmask'] = netmask ifaces[iface.Description]['inet'].append(item) if ':' in ip: if 'inet6' not in ifaces[iface.Description]: ifaces[iface.Description]['inet6'] = [] item = {'address': ip} if iface.DefaultIPGateway: broadcast = next((i for i in iface.DefaultIPGateway if ':' in i), '') if broadcast: item['broadcast'] = broadcast if iface.IPSubnet: netmask = next((i for i in iface.IPSubnet if ':' in i), '') if netmask: item['netmask'] = netmask ifaces[iface.Description]['inet6'].append(item) else: ifaces[iface.Description]['up'] = False return ifaces def interfaces(): ''' Return a dictionary of information about all the interfaces on the minion ''' if salt.utils.platform.is_windows(): return win_interfaces() elif salt.utils.platform.is_netbsd(): return netbsd_interfaces() else: return linux_interfaces() def get_net_start(ipaddr, netmask): ''' Return the address of the network ''' net = ipaddress.ip_network('{0}/{1}'.format(ipaddr, netmask), strict=False) return six.text_type(net.network_address) def get_net_size(mask): ''' Turns an IPv4 netmask into it's corresponding prefix length (255.255.255.0 -> 24 as in 192.168.1.10/24). ''' binary_str = '' for octet in mask.split('.'): binary_str += bin(int(octet))[2:].zfill(8) return len(binary_str.rstrip('0')) def calc_net(ipaddr, netmask=None): ''' Takes IP (CIDR notation supported) and optionally netmask and returns the network in CIDR-notation. (The IP can be any IP inside the subnet) ''' if netmask is not None: ipaddr = '{0}/{1}'.format(ipaddr, netmask) return six.text_type(ipaddress.ip_network(ipaddr, strict=False)) def _ipv4_to_bits(ipaddr): ''' Accepts an IPv4 dotted quad and returns a string representing its binary counterpart ''' return ''.join([bin(int(x))[2:].rjust(8, '0') for x in ipaddr.split('.')]) def _get_iface_info(iface): ''' If `iface` is available, return interface info and no error, otherwise return no info and log and return an error ''' iface_info = interfaces() if iface in iface_info.keys(): return iface_info, False else: error_msg = ('Interface "{0}" not in available interfaces: "{1}"' ''.format(iface, '", "'.join(iface_info.keys()))) log.error(error_msg) return None, error_msg def _hw_addr_aix(iface): ''' Return the hardware address (a.k.a. MAC address) for a given interface on AIX MAC address not available in through interfaces ''' cmd = subprocess.Popen( 'entstat -d {0} | grep \'Hardware Address\''.format(iface), shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate()[0] if cmd: comps = cmd.split(' ') if len(comps) == 3: mac_addr = comps[2].strip('\'').strip() return mac_addr error_msg = ('Interface "{0}" either not available or does not contain a hardware address'.format(iface)) log.error(error_msg) return error_msg def hw_addr(iface): ''' Return the hardware address (a.k.a. MAC address) for a given interface .. versionchanged:: 2016.11.4 Added support for AIX ''' if salt.utils.platform.is_aix(): return _hw_addr_aix iface_info, error = _get_iface_info(iface) if error is False: return iface_info.get(iface, {}).get('hwaddr', '') else: return error def interface(iface): ''' Return the details of `iface` or an error if it does not exist ''' iface_info, error = _get_iface_info(iface) if error is False: return iface_info.get(iface, {}).get('inet', '') else: return error def interface_ip(iface): ''' Return `iface` IPv4 addr or an error if `iface` does not exist ''' iface_info, error = _get_iface_info(iface) if error is False: inet = iface_info.get(iface, {}).get('inet', None) return inet[0].get('address', '') if inet else '' else: return error def _subnets(proto='inet', interfaces_=None): ''' Returns a list of subnets to which the host belongs ''' if interfaces_ is None: ifaces = interfaces() elif isinstance(interfaces_, list): ifaces = {} for key, value in six.iteritems(interfaces()): if key in interfaces_: ifaces[key] = value else: ifaces = {interfaces_: interfaces().get(interfaces_, {})} ret = set() if proto == 'inet': subnet = 'netmask' dflt_cidr = 32 elif proto == 'inet6': subnet = 'prefixlen' dflt_cidr = 128 else: log.error('Invalid proto %s calling subnets()', proto) return for ip_info in six.itervalues(ifaces): addrs = ip_info.get(proto, []) addrs.extend([addr for addr in ip_info.get('secondary', []) if addr.get('type') == proto]) for intf in addrs: if subnet in intf: intf = ipaddress.ip_interface('{0}/{1}'.format(intf['address'], intf[subnet])) else: intf = ipaddress.ip_interface('{0}/{1}'.format(intf['address'], dflt_cidr)) if not intf.is_loopback: ret.add(intf.network) return [six.text_type(net) for net in sorted(ret)] def subnets(interfaces=None): ''' Returns a list of IPv4 subnets to which the host belongs ''' return _subnets('inet', interfaces_=interfaces) def subnets6(): ''' Returns a list of IPv6 subnets to which the host belongs ''' return _subnets('inet6') def in_subnet(cidr, addr=None): ''' Returns True if host or (any of) addrs is within specified subnet, otherwise False ''' try: cidr = ipaddress.ip_network(cidr) except ValueError: log.error('Invalid CIDR \'%s\'', cidr) return False if addr is None: addr = ip_addrs() addr.extend(ip_addrs6()) elif not isinstance(addr, (list, tuple)): addr = (addr,) return any(ipaddress.ip_address(item) in cidr for item in addr) def _ip_addrs(interface=None, include_loopback=False, interface_data=None, proto='inet'): ''' Return the full list of IP adresses matching the criteria proto = inet|inet6 ''' ret = set() ifaces = interface_data \ if isinstance(interface_data, dict) \ else interfaces() if interface is None: target_ifaces = ifaces else: target_ifaces = dict([(k, v) for k, v in six.iteritems(ifaces) if k == interface]) if not target_ifaces: log.error('Interface %s not found.', interface) for ip_info in six.itervalues(target_ifaces): addrs = ip_info.get(proto, []) addrs.extend([addr for addr in ip_info.get('secondary', []) if addr.get('type') == proto]) for addr in addrs: addr = ipaddress.ip_address(addr.get('address')) if not addr.is_loopback or include_loopback: ret.add(addr) return [six.text_type(addr) for addr in sorted(ret)] def ip_addrs(interface=None, include_loopback=False, interface_data=None): ''' Returns a list of IPv4 addresses assigned to the host. 127.0.0.1 is ignored, unless 'include_loopback=True' is indicated. If 'interface' is provided, then only IP addresses from that interface will be returned. ''' return _ip_addrs(interface, include_loopback, interface_data, 'inet') def ip_addrs6(interface=None, include_loopback=False, interface_data=None): ''' Returns a list of IPv6 addresses assigned to the host. ::1 is ignored, unless 'include_loopback=True' is indicated. If 'interface' is provided, then only IP addresses from that interface will be returned. ''' return _ip_addrs(interface, include_loopback, interface_data, 'inet6') def hex2ip(hex_ip, invert=False): ''' Convert a hex string to an ip, if a failure occurs the original hex is returned. If 'invert=True' assume that ip from /proc/net/<proto> ''' if len(hex_ip) == 32: # ipv6 ip = [] for i in range(0, 32, 8): ip_part = hex_ip[i:i + 8] ip_part = [ip_part[x:x + 2] for x in range(0, 8, 2)] if invert: ip.append("{0[3]}{0[2]}:{0[1]}{0[0]}".format(ip_part)) else: ip.append("{0[0]}{0[1]}:{0[2]}{0[3]}".format(ip_part)) try: address = ipaddress.IPv6Address(":".join(ip)) if address.ipv4_mapped: return str(address.ipv4_mapped) else: return address.compressed except ipaddress.AddressValueError as ex: log.error('hex2ip - ipv6 address error: %s', ex) return hex_ip try: hip = int(hex_ip, 16) except ValueError: return hex_ip if invert: return '{3}.{2}.{1}.{0}'.format(hip >> 24 & 255, hip >> 16 & 255, hip >> 8 & 255, hip & 255) return '{0}.{1}.{2}.{3}'.format(hip >> 24 & 255, hip >> 16 & 255, hip >> 8 & 255, hip & 255) def mac2eui64(mac, prefix=None): ''' Convert a MAC address to a EUI64 identifier or, with prefix provided, a full IPv6 address ''' # http://tools.ietf.org/html/rfc4291#section-2.5.1 eui64 = re.sub(r'[.:-]', '', mac).lower() eui64 = eui64[0:6] + 'fffe' + eui64[6:] eui64 = hex(int(eui64[0:2], 16) | 2)[2:].zfill(2) + eui64[2:] if prefix is None: return ':'.join(re.findall(r'.{4}', eui64)) else: try: net = ipaddress.ip_network(prefix, strict=False) euil = int('0x{0}'.format(eui64), 16) return '{0}/{1}'.format(net[euil], net.prefixlen) except Exception: return def active_tcp(): ''' Return a dict describing all active tcp connections as quickly as possible ''' ret = {} for statf in ['/proc/net/tcp', '/proc/net/tcp6']: if os.path.isfile(statf): with salt.utils.files.fopen(statf, 'rb') as fp_: for line in fp_: line = salt.utils.stringutils.to_unicode(line) if line.strip().startswith('sl'): continue iret = _parse_tcp_line(line) sl = next(iter(iret)) if iret[sl]['state'] == 1: # 1 is ESTABLISHED del iret[sl]['state'] ret[len(ret)] = iret[sl] return ret def local_port_tcp(port): ''' Return a set of remote ip addrs attached to the specified local port ''' ret = _remotes_on(port, 'local_port') return ret def remote_port_tcp(port): ''' Return a set of ip addrs the current host is connected to on given port ''' ret = _remotes_on(port, 'remote_port') return ret def _remotes_on(port, which_end): ''' Return a set of ip addrs active tcp connections ''' port = int(port) ret = _netlink_tool_remote_on(port, which_end) if ret is not None: return ret ret = set() proc_available = False for statf in ['/proc/net/tcp', '/proc/net/tcp6']: if os.path.isfile(statf): proc_available = True with salt.utils.files.fopen(statf, 'r') as fp_: for line in fp_: line = salt.utils.stringutils.to_unicode(line) if line.strip().startswith('sl'): continue iret = _parse_tcp_line(line) sl = next(iter(iret)) if iret[sl][which_end] == port and iret[sl]['state'] == 1: # 1 is ESTABLISHED ret.add(iret[sl]['remote_addr']) if not proc_available: # Fallback to use OS specific tools if salt.utils.platform.is_sunos(): return _sunos_remotes_on(port, which_end) if salt.utils.platform.is_freebsd(): return _freebsd_remotes_on(port, which_end) if salt.utils.platform.is_netbsd(): return _netbsd_remotes_on(port, which_end) if salt.utils.platform.is_openbsd(): return _openbsd_remotes_on(port, which_end) if salt.utils.platform.is_windows(): return _windows_remotes_on(port, which_end) if salt.utils.platform.is_aix(): return _aix_remotes_on(port, which_end) return _linux_remotes_on(port, which_end) return ret def _parse_tcp_line(line): ''' Parse a single line from the contents of /proc/net/tcp or /proc/net/tcp6 ''' ret = {} comps = line.strip().split() sl = comps[0].rstrip(':') ret[sl] = {} l_addr, l_port = comps[1].split(':') r_addr, r_port = comps[2].split(':') ret[sl]['local_addr'] = hex2ip(l_addr, True) ret[sl]['local_port'] = int(l_port, 16) ret[sl]['remote_addr'] = hex2ip(r_addr, True) ret[sl]['remote_port'] = int(r_port, 16) ret[sl]['state'] = int(comps[3], 16) return ret def _netlink_tool_remote_on(port, which_end): ''' Returns set of ipv4 host addresses of remote established connections on local or remote tcp port. Parses output of shell 'ss' to get connections [root@salt-master ~]# ss -ant State Recv-Q Send-Q Local Address:Port Peer Address:Port LISTEN 0 511 *:80 *:* LISTEN 0 128 *:22 *:* ESTAB 0 0 127.0.0.1:56726 127.0.0.1:4505 ''' remotes = set() valid = False try: data = subprocess.check_output(['ss', '-ant']) # pylint: disable=minimum-python-version except subprocess.CalledProcessError: log.error('Failed ss') raise except OSError: # not command "No such file or directory" return None lines = salt.utils.stringutils.to_str(data).split('\n') for line in lines: if 'Address:Port' in line: # ss tools may not be valid valid = True continue elif 'ESTAB' not in line: continue chunks = line.split() local_host, local_port = chunks[3].split(':', 1) remote_host, remote_port = chunks[4].split(':', 1) if which_end == 'remote_port' and int(remote_port) != port: continue if which_end == 'local_port' and int(local_port) != port: continue remotes.add(remote_host) if valid is False: remotes = None return remotes def _sunos_remotes_on(port, which_end): ''' SunOS specific helper function. Returns set of ipv4 host addresses of remote established connections on local or remote tcp port. Parses output of shell 'netstat' to get connections [root@salt-master ~]# netstat -f inet -n TCP: IPv4 Local Address Remote Address Swind Send-Q Rwind Recv-Q State -------------------- -------------------- ----- ------ ----- ------ ----------- 10.0.0.101.4505 10.0.0.1.45329 1064800 0 1055864 0 ESTABLISHED 10.0.0.101.4505 10.0.0.100.50798 1064800 0 1055864 0 ESTABLISHED ''' remotes = set() try: data = subprocess.check_output(['netstat', '-f', 'inet', '-n']) # pylint: disable=minimum-python-version except subprocess.CalledProcessError: log.error('Failed netstat') raise lines = salt.utils.stringutils.to_str(data).split('\n') for line in lines: if 'ESTABLISHED' not in line: continue chunks = line.split() local_host, local_port = chunks[0].rsplit('.', 1) remote_host, remote_port = chunks[1].rsplit('.', 1) if which_end == 'remote_port' and int(remote_port) != port: continue if which_end == 'local_port' and int(local_port) != port: continue remotes.add(remote_host) return remotes def _freebsd_remotes_on(port, which_end): ''' Returns set of ipv4 host addresses of remote established connections on local tcp port port. Parses output of shell 'sockstat' (FreeBSD) to get connections $ sudo sockstat -4 USER COMMAND PID FD PROTO LOCAL ADDRESS FOREIGN ADDRESS root python2.7 1456 29 tcp4 *:4505 *:* root python2.7 1445 17 tcp4 *:4506 *:* root python2.7 1294 14 tcp4 127.0.0.1:11813 127.0.0.1:4505 root python2.7 1294 41 tcp4 127.0.0.1:61115 127.0.0.1:4506 $ sudo sockstat -4 -c -p 4506 USER COMMAND PID FD PROTO LOCAL ADDRESS FOREIGN ADDRESS root python2.7 1294 41 tcp4 127.0.0.1:61115 127.0.0.1:4506 ''' port = int(port) remotes = set() try: cmd = salt.utils.args.shlex_split('sockstat -4 -c -p {0}'.format(port)) data = subprocess.check_output(cmd) # pylint: disable=minimum-python-version except subprocess.CalledProcessError as ex: log.error('Failed "sockstat" with returncode = %s', ex.returncode) raise lines = salt.utils.stringutils.to_str(data).split('\n') for line in lines: chunks = line.split() if not chunks: continue # ['root', 'python2.7', '1456', '37', 'tcp4', # '127.0.0.1:4505-', '127.0.0.1:55703'] # print chunks if 'COMMAND' in chunks[1]: continue # ignore header if len(chunks) < 2: continue # sockstat -4 -c -p 4506 does this with high PIDs: # USER COMMAND PID FD PROTO LOCAL ADDRESS FOREIGN ADDRESS # salt-master python2.781106 35 tcp4 192.168.12.34:4506 192.168.12.45:60143 local = chunks[-2] remote = chunks[-1] lhost, lport = local.split(':') rhost, rport = remote.split(':') if which_end == 'local' and int(lport) != port: # ignore if local port not port continue if which_end == 'remote' and int(rport) != port: # ignore if remote port not port continue remotes.add(rhost) return remotes def _netbsd_remotes_on(port, which_end): ''' Returns set of ipv4 host addresses of remote established connections on local tcp port port. Parses output of shell 'sockstat' (NetBSD) to get connections $ sudo sockstat -4 -n USER COMMAND PID FD PROTO LOCAL ADDRESS FOREIGN ADDRESS root python2.7 1456 29 tcp *.4505 *.* root python2.7 1445 17 tcp *.4506 *.* root python2.7 1294 14 tcp 127.0.0.1.11813 127.0.0.1.4505 root python2.7 1294 41 tcp 127.0.0.1.61115 127.0.0.1.4506 $ sudo sockstat -4 -c -n -p 4506 USER COMMAND PID FD PROTO LOCAL ADDRESS FOREIGN ADDRESS root python2.7 1294 41 tcp 127.0.0.1.61115 127.0.0.1.4506 ''' port = int(port) remotes = set() try: cmd = salt.utils.args.shlex_split('sockstat -4 -c -n -p {0}'.format(port)) data = subprocess.check_output(cmd) # pylint: disable=minimum-python-version except subprocess.CalledProcessError as ex: log.error('Failed "sockstat" with returncode = %s', ex.returncode) raise lines = salt.utils.stringutils.to_str(data).split('\n') for line in lines: chunks = line.split() if not chunks: continue # ['root', 'python2.7', '1456', '37', 'tcp', # '127.0.0.1.4505-', '127.0.0.1.55703'] # print chunks if 'COMMAND' in chunks[1]: continue # ignore header if len(chunks) < 2: continue local = chunks[5].split('.') lport = local.pop() lhost = '.'.join(local) remote = chunks[6].split('.') rport = remote.pop() rhost = '.'.join(remote) if which_end == 'local' and int(lport) != port: # ignore if local port not port continue if which_end == 'remote' and int(rport) != port: # ignore if remote port not port continue remotes.add(rhost) return remotes def _openbsd_remotes_on(port, which_end): ''' OpenBSD specific helper function. Returns set of ipv4 host addresses of remote established connections on local or remote tcp port. Parses output of shell 'netstat' to get connections $ netstat -nf inet Active Internet connections Proto Recv-Q Send-Q Local Address Foreign Address (state) tcp 0 0 10.0.0.101.4505 10.0.0.1.45329 ESTABLISHED tcp 0 0 10.0.0.101.4505 10.0.0.100.50798 ESTABLISHED ''' remotes = set() try: data = subprocess.check_output(['netstat', '-nf', 'inet']) # pylint: disable=minimum-python-version except subprocess.CalledProcessError: log.error('Failed netstat') raise lines = data.split('\n') for line in lines: if 'ESTABLISHED' not in line: continue chunks = line.split() local_host, local_port = chunks[3].rsplit('.', 1) remote_host, remote_port = chunks[4].rsplit('.', 1) if which_end == 'remote_port' and int(remote_port) != port: continue if which_end == 'local_port' and int(local_port) != port: continue remotes.add(remote_host) return remotes def _windows_remotes_on(port, which_end): r''' Windows specific helper function. Returns set of ipv4 host addresses of remote established connections on local or remote tcp port. Parses output of shell 'netstat' to get connections C:\>netstat -n Active Connections Proto Local Address Foreign Address State TCP 10.2.33.17:3007 130.164.12.233:10123 ESTABLISHED TCP 10.2.33.17:3389 130.164.30.5:10378 ESTABLISHED ''' remotes = set() try: data = subprocess.check_output(['netstat', '-n']) # pylint: disable=minimum-python-version except subprocess.CalledProcessError: log.error('Failed netstat') raise lines = salt.utils.stringutils.to_str(data).split('\n') for line in lines: if 'ESTABLISHED' not in line: continue chunks = line.split() local_host, local_port = chunks[1].rsplit(':', 1) remote_host, remote_port = chunks[2].rsplit(':', 1) if which_end == 'remote_port' and int(remote_port) != port: continue if which_end == 'local_port' and int(local_port) != port: continue remotes.add(remote_host) return remotes def _linux_remotes_on(port, which_end): ''' Linux specific helper function. Returns set of ip host addresses of remote established connections on local tcp port port. Parses output of shell 'lsof' to get connections $ sudo lsof -iTCP:4505 -n COMMAND PID USER FD TYPE DEVICE SIZE/OFF NODE NAME Python 9971 root 35u IPv4 0x18a8464a29ca329d 0t0 TCP *:4505 (LISTEN) Python 9971 root 37u IPv4 0x18a8464a29b2b29d 0t0 TCP 127.0.0.1:4505->127.0.0.1:55703 (ESTABLISHED) Python 10152 root 22u IPv4 0x18a8464a29c8cab5 0t0 TCP 127.0.0.1:55703->127.0.0.1:4505 (ESTABLISHED) Python 10153 root 22u IPv4 0x18a8464a29c8cab5 0t0 TCP [fe80::249a]:4505->[fe80::150]:59367 (ESTABLISHED) ''' remotes = set() try: data = subprocess.check_output( ['lsof', '-iTCP:{0:d}'.format(port), '-n', '-P'] # pylint: disable=minimum-python-version ) except subprocess.CalledProcessError as ex: if ex.returncode == 1: # Lsof return 1 if any error was detected, including the failure # to locate Internet addresses, and it is not an error in this case. log.warning('"lsof" returncode = 1, likely no active TCP sessions.') return remotes log.error('Failed "lsof" with returncode = %s', ex.returncode) raise lines = salt.utils.stringutils.to_str(data).split('\n') for line in lines: chunks = line.split() if not chunks: continue # ['Python', '9971', 'root', '37u', 'IPv4', '0x18a8464a29b2b29d', '0t0', # 'TCP', '127.0.0.1:4505->127.0.0.1:55703', '(ESTABLISHED)'] # print chunks if 'COMMAND' in chunks[0]: continue # ignore header if 'ESTABLISHED' not in chunks[-1]: continue # ignore if not ESTABLISHED # '127.0.0.1:4505->127.0.0.1:55703' local, remote = chunks[8].split('->') _, lport = local.rsplit(':', 1) rhost, rport = remote.rsplit(':', 1) if which_end == 'remote_port' and int(rport) != port: continue if which_end == 'local_port' and int(lport) != port: continue remotes.add(rhost.strip("[]")) return remotes def _aix_remotes_on(port, which_end): ''' AIX specific helper function. Returns set of ipv4 host addresses of remote established connections on local or remote tcp port. Parses output of shell 'netstat' to get connections root@la68pp002_pub:/opt/salt/lib/python2.7/site-packages/salt/modules# netstat -f inet -n Active Internet connections Proto Recv-Q Send-Q Local Address Foreign Address (state) tcp4 0 0 172.29.149.95.50093 209.41.78.13.4505 ESTABLISHED tcp4 0 0 127.0.0.1.9514 *.* LISTEN tcp4 0 0 127.0.0.1.9515 *.* LISTEN tcp4 0 0 127.0.0.1.199 127.0.0.1.32779 ESTABLISHED tcp4 0 0 127.0.0.1.32779 127.0.0.1.199 ESTABLISHED tcp4 0 40 172.29.149.95.22 172.29.96.83.41022 ESTABLISHED tcp4 0 0 172.29.149.95.22 172.29.96.83.41032 ESTABLISHED tcp4 0 0 127.0.0.1.32771 127.0.0.1.32775 ESTABLISHED tcp 0 0 127.0.0.1.32775 127.0.0.1.32771 ESTABLISHED tcp4 0 0 127.0.0.1.32771 127.0.0.1.32776 ESTABLISHED tcp 0 0 127.0.0.1.32776 127.0.0.1.32771 ESTABLISHED tcp4 0 0 127.0.0.1.32771 127.0.0.1.32777 ESTABLISHED tcp 0 0 127.0.0.1.32777 127.0.0.1.32771 ESTABLISHED tcp4 0 0 127.0.0.1.32771 127.0.0.1.32778 ESTABLISHED tcp 0 0 127.0.0.1.32778 127.0.0.1.32771 ESTABLISHED ''' remotes = set() try: data = subprocess.check_output(['netstat', '-f', 'inet', '-n']) # pylint: disable=minimum-python-version except subprocess.CalledProcessError: log.error('Failed netstat') raise lines = salt.utils.stringutils.to_str(data).split('\n') for line in lines: if 'ESTABLISHED' not in line: continue chunks = line.split() local_host, local_port = chunks[3].rsplit('.', 1) remote_host, remote_port = chunks[4].rsplit('.', 1) if which_end == 'remote_port' and int(remote_port) != port: continue if which_end == 'local_port' and int(local_port) != port: continue remotes.add(remote_host) return remotes @jinja_filter('gen_mac') def gen_mac(prefix='AC:DE:48'): ''' Generates a MAC address with the defined OUI prefix. Common prefixes: - ``00:16:3E`` -- Xen - ``00:18:51`` -- OpenVZ - ``00:50:56`` -- VMware (manually generated) - ``52:54:00`` -- QEMU/KVM - ``AC:DE:48`` -- PRIVATE References: - http://standards.ieee.org/develop/regauth/oui/oui.txt - https://www.wireshark.org/tools/oui-lookup.html - https://en.wikipedia.org/wiki/MAC_address ''' return '{0}:{1:02X}:{2:02X}:{3:02X}'.format(prefix, random.randint(0, 0xff), random.randint(0, 0xff), random.randint(0, 0xff)) @jinja_filter('mac_str_to_bytes') def mac_str_to_bytes(mac_str): ''' Convert a MAC address string into bytes. Works with or without separators: b1 = mac_str_to_bytes('08:00:27:13:69:77') b2 = mac_str_to_bytes('080027136977') assert b1 == b2 assert isinstance(b1, bytes) ''' if len(mac_str) == 12: pass elif len(mac_str) == 17: sep = mac_str[2] mac_str = mac_str.replace(sep, '') else: raise ValueError('Invalid MAC address') chars = (int(mac_str[s:s+2], 16) for s in range(0, 12, 2)) return bytes(chars) if six.PY3 else b''.join(chr(x) for x in chars) def refresh_dns(): ''' issue #21397: force glibc to re-read resolv.conf ''' try: res_init() except NameError: # Exception raised loading the library, thus res_init is not defined pass @jinja_filter('connection_check') def connection_check(addr, port=80, safe=False, ipv6=None): ''' Provides a convenient alias for the dns_check filter. ''' return dns_check(addr, port, safe, ipv6) @jinja_filter('dns_check') def dns_check(addr, port=80, safe=False, ipv6=None, attempt_connect=True): ''' Return the ip resolved by dns, but do not exit on failure, only raise an exception. Obeys system preference for IPv4/6 address resolution - this can be overridden by the ipv6 flag. Tries to connect to the address before considering it useful. If no address can be reached, the first one resolved is used as a fallback. ''' error = False lookup = addr seen_ipv6 = False family = socket.AF_INET6 if ipv6 else socket.AF_INET if ipv6 is False else socket.AF_UNSPEC hostnames = [] try: refresh_dns() hostnames = socket.getaddrinfo(addr, port, family, socket.SOCK_STREAM) except TypeError: err = ('Attempt to resolve address \'{0}\' failed. Invalid or unresolveable address').format(lookup) raise SaltSystemExit(code=42, msg=err) except socket.error: error = True # If ipv6 is set to True, attempt another lookup using the IPv4 family, # just in case we're attempting to lookup an IPv4 IP # as an IPv6 hostname. if error and ipv6: try: refresh_dns() hostnames = socket.getaddrinfo(addr, port, socket.AF_INET, socket.SOCK_STREAM) except TypeError: err = ('Attempt to resolve address \'{0}\' failed. Invalid or unresolveable address').format(lookup) raise SaltSystemExit(code=42, msg=err) except socket.error: error = True try: if not hostnames: error = True else: resolved = False candidates = [] for h in hostnames: # Input is IP address, passed through unchanged, just return it if h[4][0] == addr: resolved = salt.utils.zeromq.ip_bracket(addr) break candidate_addr = salt.utils.zeromq.ip_bracket(h[4][0]) # sometimes /etc/hosts contains ::1 localhost if not ipv6 and candidate_addr == '[::1]': continue candidates.append(candidate_addr) if attempt_connect: try: s = socket.socket(h[0], socket.SOCK_STREAM) s.settimeout(2) s.connect((candidate_addr.strip('[]'), h[4][1])) s.close() resolved = candidate_addr break except socket.error: pass if not resolved: if candidates: resolved = candidates[0] else: error = True except TypeError: err = ('Attempt to resolve address \'{0}\' failed. Invalid or unresolveable address').format(lookup) raise SaltSystemExit(code=42, msg=err) except socket.error: error = True if error: err = ('DNS lookup or connection check of \'{0}\' failed.').format(addr) if safe: if salt.log.is_console_configured(): # If logging is not configured it also means that either # the master or minion instance calling this hasn't even # started running log.error(err) raise SaltClientError() raise SaltSystemExit(code=42, msg=err) return resolved def parse_host_port(host_port): """ Takes a string argument specifying host or host:port. Returns a (hostname, port) or (ip_address, port) tuple. If no port is given, the second (port) element of the returned tuple will be None. host:port argument, for example, is accepted in the forms of: - hostname - hostname:1234 - hostname.domain.tld - hostname.domain.tld:5678 - [1234::5]:5678 - 1234::5 - 10.11.12.13:4567 - 10.11.12.13 """ host, port = None, None # default _s_ = host_port[:] if _s_[0] == "[": if "]" in host_port: host, _s_ = _s_.lstrip("[").rsplit("]", 1) host = ipaddress.IPv6Address(host).compressed if _s_[0] == ":": port = int(_s_.lstrip(":")) else: if len(_s_) > 1: raise ValueError('found ambiguous "{}" port in "{}"'.format(_s_, host_port)) else: if _s_.count(":") == 1: host, _hostport_separator_, port = _s_.partition(":") try: port = int(port) except ValueError as _e_: log.error('host_port "%s" port value "%s" is not an integer.', host_port, port) raise _e_ else: host = _s_ try: if not isinstance(host, ipaddress._BaseAddress): host_ip = ipaddress.ip_address(host).compressed host = host_ip except ValueError: log.debug('"%s" Not an IP address? Assuming it is a hostname.', host) if host != sanitize_host(host): log.error('bad hostname: "%s"', host) raise ValueError('bad hostname: "{}"'.format(host)) return host, port def is_fqdn(hostname): """ Verify if hostname conforms to be a FQDN. :param hostname: text string with the name of the host :return: bool, True if hostname is correct FQDN, False otherwise """ compliant = re.compile(r"(?!-)[A-Z\d\-\_]{1,63}(?<!-)$", re.IGNORECASE) return "." in hostname and len(hostname) < 0xff and all(compliant.match(x) for x in hostname.rstrip(".").split("."))
saltstack/salt
salt/utils/network.py
isportopen
python
def isportopen(host, port): ''' Return status of a port ''' if not 1 <= int(port) <= 65535: return False sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) out = sock.connect_ex((sanitize_host(host), int(port))) return out
Return status of a port
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/network.py#L67-L78
null
# -*- coding: utf-8 -*- ''' Define some generic socket functions for network modules ''' # Import python libs from __future__ import absolute_import, unicode_literals, print_function import itertools import os import re import types import socket import logging import platform import random import subprocess from string import ascii_letters, digits # Import 3rd-party libs from salt.ext import six from salt.ext.six.moves import range # pylint: disable=import-error,redefined-builtin # Attempt to import wmi try: import wmi import salt.utils.winapi except ImportError: pass # Import salt libs import salt.utils.args import salt.utils.files import salt.utils.path import salt.utils.platform import salt.utils.stringutils import salt.utils.zeromq from salt._compat import ipaddress from salt.exceptions import SaltClientError, SaltSystemExit from salt.utils.decorators.jinja import jinja_filter from salt.utils.versions import LooseVersion # inet_pton does not exist in Windows, this is a workaround if salt.utils.platform.is_windows(): from salt.ext import win_inet_pton # pylint: disable=unused-import log = logging.getLogger(__name__) try: import ctypes import ctypes.util libc = ctypes.cdll.LoadLibrary(ctypes.util.find_library("c")) res_init = libc.__res_init except (ImportError, OSError, AttributeError, TypeError): pass # pylint: disable=C0103 def sanitize_host(host): ''' Sanitize host string. https://tools.ietf.org/html/rfc1123#section-2.1 ''' RFC952_characters = ascii_letters + digits + ".-" return "".join([c for c in host[0:255] if c in RFC952_characters]) def host_to_ips(host): ''' Returns a list of IP addresses of a given hostname or None if not found. ''' ips = [] try: for family, socktype, proto, canonname, sockaddr in socket.getaddrinfo( host, 0, socket.AF_UNSPEC, socket.SOCK_STREAM): if family == socket.AF_INET: ip, port = sockaddr elif family == socket.AF_INET6: ip, port, flow_info, scope_id = sockaddr ips.append(ip) if not ips: ips = None except Exception: ips = None return ips def _generate_minion_id(): ''' Get list of possible host names and convention names. :return: ''' # There are three types of hostnames: # 1. Network names. How host is accessed from the network. # 2. Host aliases. They might be not available in all the network or only locally (/etc/hosts) # 3. Convention names, an internal nodename. class DistinctList(list): ''' List, which allows one to append only distinct objects. Needs to work on Python 2.6, because of collections.OrderedDict only since 2.7 version. Override 'filter()' for custom filtering. ''' localhost_matchers = [r'localhost.*', r'ip6-.*', r'127[.]\d', r'0\.0\.0\.0', r'::1.*', r'ipv6-.*', r'fe00::.*', r'fe02::.*', r'1.0.0.*.ip6.arpa'] def append(self, p_object): if p_object and p_object not in self and not self.filter(p_object): super(DistinctList, self).append(p_object) return self def extend(self, iterable): for obj in iterable: self.append(obj) return self def filter(self, element): 'Returns True if element needs to be filtered' for rgx in self.localhost_matchers: if re.match(rgx, element): return True def first(self): return self and self[0] or None hostname = socket.gethostname() hosts = DistinctList().append( salt.utils.stringutils.to_unicode(socket.getfqdn(salt.utils.stringutils.to_bytes(hostname))) ).append(platform.node()).append(hostname) if not hosts: try: for a_nfo in socket.getaddrinfo(hosts.first() or 'localhost', None, socket.AF_INET, socket.SOCK_RAW, socket.IPPROTO_IP, socket.AI_CANONNAME): if len(a_nfo) > 3: hosts.append(a_nfo[3]) except socket.gaierror: log.warning('Cannot resolve address %s info via socket: %s', hosts.first() or 'localhost (N/A)', socket.gaierror) # Universal method for everywhere (Linux, Slowlaris, Windows etc) for f_name in ('/etc/hostname', '/etc/nodename', '/etc/hosts', r'{win}\system32\drivers\etc\hosts'.format(win=os.getenv('WINDIR'))): try: with salt.utils.files.fopen(f_name) as f_hdl: for line in f_hdl: line = salt.utils.stringutils.to_unicode(line) hst = line.strip().split('#')[0].strip().split() if hst: if hst[0][:4] in ('127.', '::1') or len(hst) == 1: hosts.extend(hst) except IOError: pass # include public and private ipaddresses return hosts.extend([addr for addr in ip_addrs() if not ipaddress.ip_address(addr).is_loopback]) def generate_minion_id(): ''' Return only first element of the hostname from all possible list. :return: ''' try: ret = salt.utils.stringutils.to_unicode(_generate_minion_id().first()) except TypeError: ret = None return ret or 'localhost' def get_socket(addr, type=socket.SOCK_STREAM, proto=0): ''' Return a socket object for the addr IP-version agnostic ''' version = ipaddress.ip_address(addr).version if version == 4: family = socket.AF_INET elif version == 6: family = socket.AF_INET6 return socket.socket(family, type, proto) def get_fqhostname(): ''' Returns the fully qualified hostname ''' # try getaddrinfo() fqdn = None try: addrinfo = socket.getaddrinfo( socket.gethostname(), 0, socket.AF_UNSPEC, socket.SOCK_STREAM, socket.SOL_TCP, socket.AI_CANONNAME ) for info in addrinfo: # info struct [family, socktype, proto, canonname, sockaddr] # On Windows `canonname` can be an empty string # This can cause the function to return `None` if len(info) > 3 and info[3]: fqdn = info[3] break except socket.gaierror: pass # NOTE: this used to log.error() but it was later disabled except socket.error as err: log.debug('socket.getaddrinfo() failure while finding fqdn: %s', err) if fqdn is None: fqdn = socket.getfqdn() return fqdn def ip_to_host(ip): ''' Returns the hostname of a given IP ''' try: hostname, aliaslist, ipaddrlist = socket.gethostbyaddr(ip) except Exception as exc: log.debug('salt.utils.network.ip_to_host(%r) failed: %s', ip, exc) hostname = None return hostname # pylint: enable=C0103 def is_reachable_host(entity_name): ''' Returns a bool telling if the entity name is a reachable host (IPv4/IPv6/FQDN/etc). :param hostname: :return: ''' try: assert type(socket.getaddrinfo(entity_name, 0, 0, 0, 0)) == list ret = True except socket.gaierror: ret = False return ret def is_ip(ip): ''' Returns a bool telling if the passed IP is a valid IPv4 or IPv6 address. ''' return is_ipv4(ip) or is_ipv6(ip) def is_ipv4(ip): ''' Returns a bool telling if the value passed to it was a valid IPv4 address ''' try: return ipaddress.ip_address(ip).version == 4 except ValueError: return False def is_ipv6(ip): ''' Returns a bool telling if the value passed to it was a valid IPv6 address ''' try: return ipaddress.ip_address(ip).version == 6 except ValueError: return False def is_subnet(cidr): ''' Returns a bool telling if the passed string is an IPv4 or IPv6 subnet ''' return is_ipv4_subnet(cidr) or is_ipv6_subnet(cidr) def is_ipv4_subnet(cidr): ''' Returns a bool telling if the passed string is an IPv4 subnet ''' try: return '/' in cidr and bool(ipaddress.IPv4Network(cidr)) except Exception: return False def is_ipv6_subnet(cidr): ''' Returns a bool telling if the passed string is an IPv6 subnet ''' try: return '/' in cidr and bool(ipaddress.IPv6Network(cidr)) except Exception: return False @jinja_filter('is_ip') def is_ip_filter(ip, options=None): ''' Returns a bool telling if the passed IP is a valid IPv4 or IPv6 address. ''' return is_ipv4_filter(ip, options=options) or is_ipv6_filter(ip, options=options) def _ip_options_global(ip_obj, version): return not ip_obj.is_private def _ip_options_multicast(ip_obj, version): return ip_obj.is_multicast def _ip_options_loopback(ip_obj, version): return ip_obj.is_loopback def _ip_options_link_local(ip_obj, version): return ip_obj.is_link_local def _ip_options_private(ip_obj, version): return ip_obj.is_private def _ip_options_reserved(ip_obj, version): return ip_obj.is_reserved def _ip_options_site_local(ip_obj, version): if version == 6: return ip_obj.is_site_local return False def _ip_options_unspecified(ip_obj, version): return ip_obj.is_unspecified def _ip_options(ip_obj, version, options=None): # will process and IP options options_fun_map = { 'global': _ip_options_global, 'link-local': _ip_options_link_local, 'linklocal': _ip_options_link_local, 'll': _ip_options_link_local, 'link_local': _ip_options_link_local, 'loopback': _ip_options_loopback, 'lo': _ip_options_loopback, 'multicast': _ip_options_multicast, 'private': _ip_options_private, 'public': _ip_options_global, 'reserved': _ip_options_reserved, 'site-local': _ip_options_site_local, 'sl': _ip_options_site_local, 'site_local': _ip_options_site_local, 'unspecified': _ip_options_unspecified } if not options: return six.text_type(ip_obj) # IP version already checked options_list = [option.strip() for option in options.split(',')] for option, fun in options_fun_map.items(): if option in options_list: fun_res = fun(ip_obj, version) if not fun_res: return None # stop at first failed test # else continue return six.text_type(ip_obj) def _is_ipv(ip, version, options=None): if not version: version = 4 if version not in (4, 6): return None try: ip_obj = ipaddress.ip_address(ip) except ValueError: # maybe it is an IP network try: ip_obj = ipaddress.ip_interface(ip) except ValueError: # nope, still not :( return None if not ip_obj.version == version: return None # has the right version, let's move on return _ip_options(ip_obj, version, options=options) @jinja_filter('is_ipv4') def is_ipv4_filter(ip, options=None): ''' Returns a bool telling if the value passed to it was a valid IPv4 address. ip The IP address. net: False Consider IP addresses followed by netmask. options CSV of options regarding the nature of the IP address. E.g.: loopback, multicast, private etc. ''' _is_ipv4 = _is_ipv(ip, 4, options=options) return isinstance(_is_ipv4, six.string_types) @jinja_filter('is_ipv6') def is_ipv6_filter(ip, options=None): ''' Returns a bool telling if the value passed to it was a valid IPv6 address. ip The IP address. net: False Consider IP addresses followed by netmask. options CSV of options regarding the nature of the IP address. E.g.: loopback, multicast, private etc. ''' _is_ipv6 = _is_ipv(ip, 6, options=options) return isinstance(_is_ipv6, six.string_types) def _ipv_filter(value, version, options=None): if version not in (4, 6): return if isinstance(value, (six.string_types, six.text_type, six.binary_type)): return _is_ipv(value, version, options=options) # calls is_ipv4 or is_ipv6 for `value` elif isinstance(value, (list, tuple, types.GeneratorType)): # calls is_ipv4 or is_ipv6 for each element in the list # os it filters and returns only those elements having the desired IP version return [ _is_ipv(addr, version, options=options) for addr in value if _is_ipv(addr, version, options=options) is not None ] return None @jinja_filter('ipv4') def ipv4(value, options=None): ''' Filters a list and returns IPv4 values only. ''' return _ipv_filter(value, 4, options=options) @jinja_filter('ipv6') def ipv6(value, options=None): ''' Filters a list and returns IPv6 values only. ''' return _ipv_filter(value, 6, options=options) @jinja_filter('ipaddr') def ipaddr(value, options=None): ''' Filters and returns only valid IP objects. ''' ipv4_obj = ipv4(value, options=options) ipv6_obj = ipv6(value, options=options) if ipv4_obj is None or ipv6_obj is None: # an IP address can be either IPv4 either IPv6 # therefofe if the value passed as arg is not a list, at least one of the calls above will return None # if one of them is none, means that we should return only one of them return ipv4_obj or ipv6_obj # one of them else: return ipv4_obj + ipv6_obj # extend lists def _filter_ipaddr(value, options, version=None): ipaddr_filter_out = None if version: if version == 4: ipaddr_filter_out = ipv4(value, options) elif version == 6: ipaddr_filter_out = ipv6(value, options) else: ipaddr_filter_out = ipaddr(value, options) if not ipaddr_filter_out: return if not isinstance(ipaddr_filter_out, (list, tuple, types.GeneratorType)): ipaddr_filter_out = [ipaddr_filter_out] return ipaddr_filter_out @jinja_filter('ip_host') def ip_host(value, options=None, version=None): ''' Returns the interfaces IP address, e.g.: 192.168.0.1/28. ''' ipaddr_filter_out = _filter_ipaddr(value, options=options, version=version) if not ipaddr_filter_out: return if not isinstance(value, (list, tuple, types.GeneratorType)): return six.text_type(ipaddress.ip_interface(ipaddr_filter_out[0])) return [six.text_type(ipaddress.ip_interface(ip_a)) for ip_a in ipaddr_filter_out] def _network_hosts(ip_addr_entry): return [ six.text_type(host) for host in ipaddress.ip_network(ip_addr_entry, strict=False).hosts() ] @jinja_filter('network_hosts') def network_hosts(value, options=None, version=None): ''' Return the list of hosts within a network. .. note:: When running this command with a large IPv6 network, the command will take a long time to gather all of the hosts. ''' ipaddr_filter_out = _filter_ipaddr(value, options=options, version=version) if not ipaddr_filter_out: return if not isinstance(value, (list, tuple, types.GeneratorType)): return _network_hosts(ipaddr_filter_out[0]) return [ _network_hosts(ip_a) for ip_a in ipaddr_filter_out ] def _network_size(ip_addr_entry): return ipaddress.ip_network(ip_addr_entry, strict=False).num_addresses @jinja_filter('network_size') def network_size(value, options=None, version=None): ''' Get the size of a network. ''' ipaddr_filter_out = _filter_ipaddr(value, options=options, version=version) if not ipaddr_filter_out: return if not isinstance(value, (list, tuple, types.GeneratorType)): return _network_size(ipaddr_filter_out[0]) return [ _network_size(ip_a) for ip_a in ipaddr_filter_out ] def natural_ipv4_netmask(ip, fmt='prefixlen'): ''' Returns the "natural" mask of an IPv4 address ''' bits = _ipv4_to_bits(ip) if bits.startswith('11'): mask = '24' elif bits.startswith('1'): mask = '16' else: mask = '8' if fmt == 'netmask': return cidr_to_ipv4_netmask(mask) else: return '/' + mask def rpad_ipv4_network(ip): ''' Returns an IP network address padded with zeros. Ex: '192.168.3' -> '192.168.3.0' '10.209' -> '10.209.0.0' ''' return '.'.join(itertools.islice(itertools.chain(ip.split('.'), '0000'), 0, 4)) def cidr_to_ipv4_netmask(cidr_bits): ''' Returns an IPv4 netmask ''' try: cidr_bits = int(cidr_bits) if not 1 <= cidr_bits <= 32: return '' except ValueError: return '' netmask = '' for idx in range(4): if idx: netmask += '.' if cidr_bits >= 8: netmask += '255' cidr_bits -= 8 else: netmask += '{0:d}'.format(256 - (2 ** (8 - cidr_bits))) cidr_bits = 0 return netmask def _number_of_set_bits_to_ipv4_netmask(set_bits): # pylint: disable=C0103 ''' Returns an IPv4 netmask from the integer representation of that mask. Ex. 0xffffff00 -> '255.255.255.0' ''' return cidr_to_ipv4_netmask(_number_of_set_bits(set_bits)) # pylint: disable=C0103 def _number_of_set_bits(x): ''' Returns the number of bits that are set in a 32bit int ''' # Taken from http://stackoverflow.com/a/4912729. Many thanks! x -= (x >> 1) & 0x55555555 x = ((x >> 2) & 0x33333333) + (x & 0x33333333) x = ((x >> 4) + x) & 0x0f0f0f0f x += x >> 8 x += x >> 16 return x & 0x0000003f # pylint: enable=C0103 def _interfaces_ip(out): ''' Uses ip to return a dictionary of interfaces with various information about each (up/down state, ip address, netmask, and hwaddr) ''' ret = dict() def parse_network(value, cols): ''' Return a tuple of ip, netmask, broadcast based on the current set of cols ''' brd = None scope = None if '/' in value: # we have a CIDR in this address ip, cidr = value.split('/') # pylint: disable=C0103 else: ip = value # pylint: disable=C0103 cidr = 32 if type_ == 'inet': mask = cidr_to_ipv4_netmask(int(cidr)) if 'brd' in cols: brd = cols[cols.index('brd') + 1] elif type_ == 'inet6': mask = cidr if 'scope' in cols: scope = cols[cols.index('scope') + 1] return (ip, mask, brd, scope) groups = re.compile('\r?\n\\d').split(out) for group in groups: iface = None data = dict() for line in group.splitlines(): if ' ' not in line: continue match = re.match(r'^\d*:\s+([\w.\-]+)(?:@)?([\w.\-]+)?:\s+<(.+)>', line) if match: iface, parent, attrs = match.groups() if 'UP' in attrs.split(','): data['up'] = True else: data['up'] = False if parent: data['parent'] = parent continue cols = line.split() if len(cols) >= 2: type_, value = tuple(cols[0:2]) iflabel = cols[-1:][0] if type_ in ('inet', 'inet6'): if 'secondary' not in cols: ipaddr, netmask, broadcast, scope = parse_network(value, cols) if type_ == 'inet': if 'inet' not in data: data['inet'] = list() addr_obj = dict() addr_obj['address'] = ipaddr addr_obj['netmask'] = netmask addr_obj['broadcast'] = broadcast addr_obj['label'] = iflabel data['inet'].append(addr_obj) elif type_ == 'inet6': if 'inet6' not in data: data['inet6'] = list() addr_obj = dict() addr_obj['address'] = ipaddr addr_obj['prefixlen'] = netmask addr_obj['scope'] = scope data['inet6'].append(addr_obj) else: if 'secondary' not in data: data['secondary'] = list() ip_, mask, brd, scp = parse_network(value, cols) data['secondary'].append({ 'type': type_, 'address': ip_, 'netmask': mask, 'broadcast': brd, 'label': iflabel, }) del ip_, mask, brd, scp elif type_.startswith('link'): data['hwaddr'] = value if iface: ret[iface] = data del iface, data return ret def _interfaces_ifconfig(out): ''' Uses ifconfig to return a dictionary of interfaces with various information about each (up/down state, ip address, netmask, and hwaddr) ''' ret = dict() piface = re.compile(r'^([^\s:]+)') pmac = re.compile('.*?(?:HWaddr|ether|address:|lladdr) ([0-9a-fA-F:]+)') if salt.utils.platform.is_sunos(): pip = re.compile(r'.*?(?:inet\s+)([0-9]+\.[0-9]+\.[0-9]+\.[0-9]+)(.*)') pip6 = re.compile('.*?(?:inet6 )([0-9a-fA-F:]+)') pmask6 = re.compile(r'.*?(?:inet6 [0-9a-fA-F:]+/(\d+)).*') else: pip = re.compile(r'.*?(?:inet addr:|inet [^\d]*)(.*?)\s') pip6 = re.compile('.*?(?:inet6 addr: (.*?)/|inet6 )([0-9a-fA-F:]+)') pmask6 = re.compile(r'.*?(?:inet6 addr: [0-9a-fA-F:]+/(\d+)|prefixlen (\d+))(?: Scope:([a-zA-Z]+)| scopeid (0x[0-9a-fA-F]))?') pmask = re.compile(r'.*?(?:Mask:|netmask )(?:((?:0x)?[0-9a-fA-F]{8})|([\d\.]+))') pupdown = re.compile('UP') pbcast = re.compile(r'.*?(?:Bcast:|broadcast )([\d\.]+)') groups = re.compile('\r?\n(?=\\S)').split(out) for group in groups: data = dict() iface = '' updown = False for line in group.splitlines(): miface = piface.match(line) mmac = pmac.match(line) mip = pip.match(line) mip6 = pip6.match(line) mupdown = pupdown.search(line) if miface: iface = miface.group(1) if mmac: data['hwaddr'] = mmac.group(1) if salt.utils.platform.is_sunos(): expand_mac = [] for chunk in data['hwaddr'].split(':'): expand_mac.append('0{0}'.format(chunk) if len(chunk) < 2 else '{0}'.format(chunk)) data['hwaddr'] = ':'.join(expand_mac) if mip: if 'inet' not in data: data['inet'] = list() addr_obj = dict() addr_obj['address'] = mip.group(1) mmask = pmask.match(line) if mmask: if mmask.group(1): mmask = _number_of_set_bits_to_ipv4_netmask( int(mmask.group(1), 16)) else: mmask = mmask.group(2) addr_obj['netmask'] = mmask mbcast = pbcast.match(line) if mbcast: addr_obj['broadcast'] = mbcast.group(1) data['inet'].append(addr_obj) if mupdown: updown = True if mip6: if 'inet6' not in data: data['inet6'] = list() addr_obj = dict() addr_obj['address'] = mip6.group(1) or mip6.group(2) mmask6 = pmask6.match(line) if mmask6: addr_obj['prefixlen'] = mmask6.group(1) or mmask6.group(2) if not salt.utils.platform.is_sunos(): ipv6scope = mmask6.group(3) or mmask6.group(4) addr_obj['scope'] = ipv6scope.lower() if ipv6scope is not None else ipv6scope # SunOS sometimes has ::/0 as inet6 addr when using addrconf if not salt.utils.platform.is_sunos() \ or addr_obj['address'] != '::' \ and addr_obj['prefixlen'] != 0: data['inet6'].append(addr_obj) data['up'] = updown if iface in ret: # SunOS optimization, where interfaces occur twice in 'ifconfig -a' # output with the same name: for ipv4 and then for ipv6 addr family. # Every instance has it's own 'UP' status and we assume that ipv4 # status determines global interface status. # # merge items with higher priority for older values # after that merge the inet and inet6 sub items for both ret[iface] = dict(list(data.items()) + list(ret[iface].items())) if 'inet' in data: ret[iface]['inet'].extend(x for x in data['inet'] if x not in ret[iface]['inet']) if 'inet6' in data: ret[iface]['inet6'].extend(x for x in data['inet6'] if x not in ret[iface]['inet6']) else: ret[iface] = data del data return ret def linux_interfaces(): ''' Obtain interface information for *NIX/BSD variants ''' ifaces = dict() ip_path = salt.utils.path.which('ip') ifconfig_path = None if ip_path else salt.utils.path.which('ifconfig') if ip_path: cmd1 = subprocess.Popen( '{0} link show'.format(ip_path), shell=True, close_fds=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate()[0] cmd2 = subprocess.Popen( '{0} addr show'.format(ip_path), shell=True, close_fds=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate()[0] ifaces = _interfaces_ip("{0}\n{1}".format( salt.utils.stringutils.to_str(cmd1), salt.utils.stringutils.to_str(cmd2))) elif ifconfig_path: cmd = subprocess.Popen( '{0} -a'.format(ifconfig_path), shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate()[0] ifaces = _interfaces_ifconfig(salt.utils.stringutils.to_str(cmd)) return ifaces def _netbsd_interfaces_ifconfig(out): ''' Uses ifconfig to return a dictionary of interfaces with various information about each (up/down state, ip address, netmask, and hwaddr) ''' ret = dict() piface = re.compile(r'^([^\s:]+)') pmac = re.compile('.*?address: ([0-9a-f:]+)') pip = re.compile(r'.*?inet [^\d]*(.*?)/([\d]*)\s') pip6 = re.compile(r'.*?inet6 ([0-9a-f:]+)%([a-zA-Z0-9]*)/([\d]*)\s') pupdown = re.compile('UP') pbcast = re.compile(r'.*?broadcast ([\d\.]+)') groups = re.compile('\r?\n(?=\\S)').split(out) for group in groups: data = dict() iface = '' updown = False for line in group.splitlines(): miface = piface.match(line) mmac = pmac.match(line) mip = pip.match(line) mip6 = pip6.match(line) mupdown = pupdown.search(line) if miface: iface = miface.group(1) if mmac: data['hwaddr'] = mmac.group(1) if mip: if 'inet' not in data: data['inet'] = list() addr_obj = dict() addr_obj['address'] = mip.group(1) mmask = mip.group(2) if mip.group(2): addr_obj['netmask'] = cidr_to_ipv4_netmask(mip.group(2)) mbcast = pbcast.match(line) if mbcast: addr_obj['broadcast'] = mbcast.group(1) data['inet'].append(addr_obj) if mupdown: updown = True if mip6: if 'inet6' not in data: data['inet6'] = list() addr_obj = dict() addr_obj['address'] = mip6.group(1) mmask6 = mip6.group(3) addr_obj['scope'] = mip6.group(2) addr_obj['prefixlen'] = mip6.group(3) data['inet6'].append(addr_obj) data['up'] = updown ret[iface] = data del data return ret def netbsd_interfaces(): ''' Obtain interface information for NetBSD >= 8 where the ifconfig output diverged from other BSD variants (Netmask is now part of the address) ''' # NetBSD versions prior to 8.0 can still use linux_interfaces() if LooseVersion(os.uname()[2]) < LooseVersion('8.0'): return linux_interfaces() ifconfig_path = salt.utils.path.which('ifconfig') cmd = subprocess.Popen( '{0} -a'.format(ifconfig_path), shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate()[0] return _netbsd_interfaces_ifconfig(salt.utils.stringutils.to_str(cmd)) def _interfaces_ipconfig(out): ''' Returns a dictionary of interfaces with various information about each (up/down state, ip address, netmask, and hwaddr) NOTE: This is not used by any function and may be able to be removed in the future. ''' ifaces = dict() iface = None adapter_iface_regex = re.compile(r'adapter (\S.+):$') for line in out.splitlines(): if not line: continue # TODO what does Windows call Infiniband and 10/40gige adapters if line.startswith('Ethernet'): iface = ifaces[adapter_iface_regex.search(line).group(1)] iface['up'] = True addr = None continue if iface: key, val = line.split(',', 1) key = key.strip(' .') val = val.strip() if addr and key == 'Subnet Mask': addr['netmask'] = val elif key in ('IP Address', 'IPv4 Address'): if 'inet' not in iface: iface['inet'] = list() addr = {'address': val.rstrip('(Preferred)'), 'netmask': None, 'broadcast': None} # TODO find the broadcast iface['inet'].append(addr) elif 'IPv6 Address' in key: if 'inet6' not in iface: iface['inet'] = list() # XXX What is the prefixlen!? addr = {'address': val.rstrip('(Preferred)'), 'prefixlen': None} iface['inet6'].append(addr) elif key == 'Physical Address': iface['hwaddr'] = val elif key == 'Media State': # XXX seen used for tunnel adaptors # might be useful iface['up'] = (val != 'Media disconnected') def win_interfaces(): ''' Obtain interface information for Windows systems ''' with salt.utils.winapi.Com(): c = wmi.WMI() ifaces = {} for iface in c.Win32_NetworkAdapterConfiguration(IPEnabled=1): ifaces[iface.Description] = dict() if iface.MACAddress: ifaces[iface.Description]['hwaddr'] = iface.MACAddress if iface.IPEnabled: ifaces[iface.Description]['up'] = True for ip in iface.IPAddress: if '.' in ip: if 'inet' not in ifaces[iface.Description]: ifaces[iface.Description]['inet'] = [] item = {'address': ip, 'label': iface.Description} if iface.DefaultIPGateway: broadcast = next((i for i in iface.DefaultIPGateway if '.' in i), '') if broadcast: item['broadcast'] = broadcast if iface.IPSubnet: netmask = next((i for i in iface.IPSubnet if '.' in i), '') if netmask: item['netmask'] = netmask ifaces[iface.Description]['inet'].append(item) if ':' in ip: if 'inet6' not in ifaces[iface.Description]: ifaces[iface.Description]['inet6'] = [] item = {'address': ip} if iface.DefaultIPGateway: broadcast = next((i for i in iface.DefaultIPGateway if ':' in i), '') if broadcast: item['broadcast'] = broadcast if iface.IPSubnet: netmask = next((i for i in iface.IPSubnet if ':' in i), '') if netmask: item['netmask'] = netmask ifaces[iface.Description]['inet6'].append(item) else: ifaces[iface.Description]['up'] = False return ifaces def interfaces(): ''' Return a dictionary of information about all the interfaces on the minion ''' if salt.utils.platform.is_windows(): return win_interfaces() elif salt.utils.platform.is_netbsd(): return netbsd_interfaces() else: return linux_interfaces() def get_net_start(ipaddr, netmask): ''' Return the address of the network ''' net = ipaddress.ip_network('{0}/{1}'.format(ipaddr, netmask), strict=False) return six.text_type(net.network_address) def get_net_size(mask): ''' Turns an IPv4 netmask into it's corresponding prefix length (255.255.255.0 -> 24 as in 192.168.1.10/24). ''' binary_str = '' for octet in mask.split('.'): binary_str += bin(int(octet))[2:].zfill(8) return len(binary_str.rstrip('0')) def calc_net(ipaddr, netmask=None): ''' Takes IP (CIDR notation supported) and optionally netmask and returns the network in CIDR-notation. (The IP can be any IP inside the subnet) ''' if netmask is not None: ipaddr = '{0}/{1}'.format(ipaddr, netmask) return six.text_type(ipaddress.ip_network(ipaddr, strict=False)) def _ipv4_to_bits(ipaddr): ''' Accepts an IPv4 dotted quad and returns a string representing its binary counterpart ''' return ''.join([bin(int(x))[2:].rjust(8, '0') for x in ipaddr.split('.')]) def _get_iface_info(iface): ''' If `iface` is available, return interface info and no error, otherwise return no info and log and return an error ''' iface_info = interfaces() if iface in iface_info.keys(): return iface_info, False else: error_msg = ('Interface "{0}" not in available interfaces: "{1}"' ''.format(iface, '", "'.join(iface_info.keys()))) log.error(error_msg) return None, error_msg def _hw_addr_aix(iface): ''' Return the hardware address (a.k.a. MAC address) for a given interface on AIX MAC address not available in through interfaces ''' cmd = subprocess.Popen( 'entstat -d {0} | grep \'Hardware Address\''.format(iface), shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate()[0] if cmd: comps = cmd.split(' ') if len(comps) == 3: mac_addr = comps[2].strip('\'').strip() return mac_addr error_msg = ('Interface "{0}" either not available or does not contain a hardware address'.format(iface)) log.error(error_msg) return error_msg def hw_addr(iface): ''' Return the hardware address (a.k.a. MAC address) for a given interface .. versionchanged:: 2016.11.4 Added support for AIX ''' if salt.utils.platform.is_aix(): return _hw_addr_aix iface_info, error = _get_iface_info(iface) if error is False: return iface_info.get(iface, {}).get('hwaddr', '') else: return error def interface(iface): ''' Return the details of `iface` or an error if it does not exist ''' iface_info, error = _get_iface_info(iface) if error is False: return iface_info.get(iface, {}).get('inet', '') else: return error def interface_ip(iface): ''' Return `iface` IPv4 addr or an error if `iface` does not exist ''' iface_info, error = _get_iface_info(iface) if error is False: inet = iface_info.get(iface, {}).get('inet', None) return inet[0].get('address', '') if inet else '' else: return error def _subnets(proto='inet', interfaces_=None): ''' Returns a list of subnets to which the host belongs ''' if interfaces_ is None: ifaces = interfaces() elif isinstance(interfaces_, list): ifaces = {} for key, value in six.iteritems(interfaces()): if key in interfaces_: ifaces[key] = value else: ifaces = {interfaces_: interfaces().get(interfaces_, {})} ret = set() if proto == 'inet': subnet = 'netmask' dflt_cidr = 32 elif proto == 'inet6': subnet = 'prefixlen' dflt_cidr = 128 else: log.error('Invalid proto %s calling subnets()', proto) return for ip_info in six.itervalues(ifaces): addrs = ip_info.get(proto, []) addrs.extend([addr for addr in ip_info.get('secondary', []) if addr.get('type') == proto]) for intf in addrs: if subnet in intf: intf = ipaddress.ip_interface('{0}/{1}'.format(intf['address'], intf[subnet])) else: intf = ipaddress.ip_interface('{0}/{1}'.format(intf['address'], dflt_cidr)) if not intf.is_loopback: ret.add(intf.network) return [six.text_type(net) for net in sorted(ret)] def subnets(interfaces=None): ''' Returns a list of IPv4 subnets to which the host belongs ''' return _subnets('inet', interfaces_=interfaces) def subnets6(): ''' Returns a list of IPv6 subnets to which the host belongs ''' return _subnets('inet6') def in_subnet(cidr, addr=None): ''' Returns True if host or (any of) addrs is within specified subnet, otherwise False ''' try: cidr = ipaddress.ip_network(cidr) except ValueError: log.error('Invalid CIDR \'%s\'', cidr) return False if addr is None: addr = ip_addrs() addr.extend(ip_addrs6()) elif not isinstance(addr, (list, tuple)): addr = (addr,) return any(ipaddress.ip_address(item) in cidr for item in addr) def _ip_addrs(interface=None, include_loopback=False, interface_data=None, proto='inet'): ''' Return the full list of IP adresses matching the criteria proto = inet|inet6 ''' ret = set() ifaces = interface_data \ if isinstance(interface_data, dict) \ else interfaces() if interface is None: target_ifaces = ifaces else: target_ifaces = dict([(k, v) for k, v in six.iteritems(ifaces) if k == interface]) if not target_ifaces: log.error('Interface %s not found.', interface) for ip_info in six.itervalues(target_ifaces): addrs = ip_info.get(proto, []) addrs.extend([addr for addr in ip_info.get('secondary', []) if addr.get('type') == proto]) for addr in addrs: addr = ipaddress.ip_address(addr.get('address')) if not addr.is_loopback or include_loopback: ret.add(addr) return [six.text_type(addr) for addr in sorted(ret)] def ip_addrs(interface=None, include_loopback=False, interface_data=None): ''' Returns a list of IPv4 addresses assigned to the host. 127.0.0.1 is ignored, unless 'include_loopback=True' is indicated. If 'interface' is provided, then only IP addresses from that interface will be returned. ''' return _ip_addrs(interface, include_loopback, interface_data, 'inet') def ip_addrs6(interface=None, include_loopback=False, interface_data=None): ''' Returns a list of IPv6 addresses assigned to the host. ::1 is ignored, unless 'include_loopback=True' is indicated. If 'interface' is provided, then only IP addresses from that interface will be returned. ''' return _ip_addrs(interface, include_loopback, interface_data, 'inet6') def hex2ip(hex_ip, invert=False): ''' Convert a hex string to an ip, if a failure occurs the original hex is returned. If 'invert=True' assume that ip from /proc/net/<proto> ''' if len(hex_ip) == 32: # ipv6 ip = [] for i in range(0, 32, 8): ip_part = hex_ip[i:i + 8] ip_part = [ip_part[x:x + 2] for x in range(0, 8, 2)] if invert: ip.append("{0[3]}{0[2]}:{0[1]}{0[0]}".format(ip_part)) else: ip.append("{0[0]}{0[1]}:{0[2]}{0[3]}".format(ip_part)) try: address = ipaddress.IPv6Address(":".join(ip)) if address.ipv4_mapped: return str(address.ipv4_mapped) else: return address.compressed except ipaddress.AddressValueError as ex: log.error('hex2ip - ipv6 address error: %s', ex) return hex_ip try: hip = int(hex_ip, 16) except ValueError: return hex_ip if invert: return '{3}.{2}.{1}.{0}'.format(hip >> 24 & 255, hip >> 16 & 255, hip >> 8 & 255, hip & 255) return '{0}.{1}.{2}.{3}'.format(hip >> 24 & 255, hip >> 16 & 255, hip >> 8 & 255, hip & 255) def mac2eui64(mac, prefix=None): ''' Convert a MAC address to a EUI64 identifier or, with prefix provided, a full IPv6 address ''' # http://tools.ietf.org/html/rfc4291#section-2.5.1 eui64 = re.sub(r'[.:-]', '', mac).lower() eui64 = eui64[0:6] + 'fffe' + eui64[6:] eui64 = hex(int(eui64[0:2], 16) | 2)[2:].zfill(2) + eui64[2:] if prefix is None: return ':'.join(re.findall(r'.{4}', eui64)) else: try: net = ipaddress.ip_network(prefix, strict=False) euil = int('0x{0}'.format(eui64), 16) return '{0}/{1}'.format(net[euil], net.prefixlen) except Exception: return def active_tcp(): ''' Return a dict describing all active tcp connections as quickly as possible ''' ret = {} for statf in ['/proc/net/tcp', '/proc/net/tcp6']: if os.path.isfile(statf): with salt.utils.files.fopen(statf, 'rb') as fp_: for line in fp_: line = salt.utils.stringutils.to_unicode(line) if line.strip().startswith('sl'): continue iret = _parse_tcp_line(line) sl = next(iter(iret)) if iret[sl]['state'] == 1: # 1 is ESTABLISHED del iret[sl]['state'] ret[len(ret)] = iret[sl] return ret def local_port_tcp(port): ''' Return a set of remote ip addrs attached to the specified local port ''' ret = _remotes_on(port, 'local_port') return ret def remote_port_tcp(port): ''' Return a set of ip addrs the current host is connected to on given port ''' ret = _remotes_on(port, 'remote_port') return ret def _remotes_on(port, which_end): ''' Return a set of ip addrs active tcp connections ''' port = int(port) ret = _netlink_tool_remote_on(port, which_end) if ret is not None: return ret ret = set() proc_available = False for statf in ['/proc/net/tcp', '/proc/net/tcp6']: if os.path.isfile(statf): proc_available = True with salt.utils.files.fopen(statf, 'r') as fp_: for line in fp_: line = salt.utils.stringutils.to_unicode(line) if line.strip().startswith('sl'): continue iret = _parse_tcp_line(line) sl = next(iter(iret)) if iret[sl][which_end] == port and iret[sl]['state'] == 1: # 1 is ESTABLISHED ret.add(iret[sl]['remote_addr']) if not proc_available: # Fallback to use OS specific tools if salt.utils.platform.is_sunos(): return _sunos_remotes_on(port, which_end) if salt.utils.platform.is_freebsd(): return _freebsd_remotes_on(port, which_end) if salt.utils.platform.is_netbsd(): return _netbsd_remotes_on(port, which_end) if salt.utils.platform.is_openbsd(): return _openbsd_remotes_on(port, which_end) if salt.utils.platform.is_windows(): return _windows_remotes_on(port, which_end) if salt.utils.platform.is_aix(): return _aix_remotes_on(port, which_end) return _linux_remotes_on(port, which_end) return ret def _parse_tcp_line(line): ''' Parse a single line from the contents of /proc/net/tcp or /proc/net/tcp6 ''' ret = {} comps = line.strip().split() sl = comps[0].rstrip(':') ret[sl] = {} l_addr, l_port = comps[1].split(':') r_addr, r_port = comps[2].split(':') ret[sl]['local_addr'] = hex2ip(l_addr, True) ret[sl]['local_port'] = int(l_port, 16) ret[sl]['remote_addr'] = hex2ip(r_addr, True) ret[sl]['remote_port'] = int(r_port, 16) ret[sl]['state'] = int(comps[3], 16) return ret def _netlink_tool_remote_on(port, which_end): ''' Returns set of ipv4 host addresses of remote established connections on local or remote tcp port. Parses output of shell 'ss' to get connections [root@salt-master ~]# ss -ant State Recv-Q Send-Q Local Address:Port Peer Address:Port LISTEN 0 511 *:80 *:* LISTEN 0 128 *:22 *:* ESTAB 0 0 127.0.0.1:56726 127.0.0.1:4505 ''' remotes = set() valid = False try: data = subprocess.check_output(['ss', '-ant']) # pylint: disable=minimum-python-version except subprocess.CalledProcessError: log.error('Failed ss') raise except OSError: # not command "No such file or directory" return None lines = salt.utils.stringutils.to_str(data).split('\n') for line in lines: if 'Address:Port' in line: # ss tools may not be valid valid = True continue elif 'ESTAB' not in line: continue chunks = line.split() local_host, local_port = chunks[3].split(':', 1) remote_host, remote_port = chunks[4].split(':', 1) if which_end == 'remote_port' and int(remote_port) != port: continue if which_end == 'local_port' and int(local_port) != port: continue remotes.add(remote_host) if valid is False: remotes = None return remotes def _sunos_remotes_on(port, which_end): ''' SunOS specific helper function. Returns set of ipv4 host addresses of remote established connections on local or remote tcp port. Parses output of shell 'netstat' to get connections [root@salt-master ~]# netstat -f inet -n TCP: IPv4 Local Address Remote Address Swind Send-Q Rwind Recv-Q State -------------------- -------------------- ----- ------ ----- ------ ----------- 10.0.0.101.4505 10.0.0.1.45329 1064800 0 1055864 0 ESTABLISHED 10.0.0.101.4505 10.0.0.100.50798 1064800 0 1055864 0 ESTABLISHED ''' remotes = set() try: data = subprocess.check_output(['netstat', '-f', 'inet', '-n']) # pylint: disable=minimum-python-version except subprocess.CalledProcessError: log.error('Failed netstat') raise lines = salt.utils.stringutils.to_str(data).split('\n') for line in lines: if 'ESTABLISHED' not in line: continue chunks = line.split() local_host, local_port = chunks[0].rsplit('.', 1) remote_host, remote_port = chunks[1].rsplit('.', 1) if which_end == 'remote_port' and int(remote_port) != port: continue if which_end == 'local_port' and int(local_port) != port: continue remotes.add(remote_host) return remotes def _freebsd_remotes_on(port, which_end): ''' Returns set of ipv4 host addresses of remote established connections on local tcp port port. Parses output of shell 'sockstat' (FreeBSD) to get connections $ sudo sockstat -4 USER COMMAND PID FD PROTO LOCAL ADDRESS FOREIGN ADDRESS root python2.7 1456 29 tcp4 *:4505 *:* root python2.7 1445 17 tcp4 *:4506 *:* root python2.7 1294 14 tcp4 127.0.0.1:11813 127.0.0.1:4505 root python2.7 1294 41 tcp4 127.0.0.1:61115 127.0.0.1:4506 $ sudo sockstat -4 -c -p 4506 USER COMMAND PID FD PROTO LOCAL ADDRESS FOREIGN ADDRESS root python2.7 1294 41 tcp4 127.0.0.1:61115 127.0.0.1:4506 ''' port = int(port) remotes = set() try: cmd = salt.utils.args.shlex_split('sockstat -4 -c -p {0}'.format(port)) data = subprocess.check_output(cmd) # pylint: disable=minimum-python-version except subprocess.CalledProcessError as ex: log.error('Failed "sockstat" with returncode = %s', ex.returncode) raise lines = salt.utils.stringutils.to_str(data).split('\n') for line in lines: chunks = line.split() if not chunks: continue # ['root', 'python2.7', '1456', '37', 'tcp4', # '127.0.0.1:4505-', '127.0.0.1:55703'] # print chunks if 'COMMAND' in chunks[1]: continue # ignore header if len(chunks) < 2: continue # sockstat -4 -c -p 4506 does this with high PIDs: # USER COMMAND PID FD PROTO LOCAL ADDRESS FOREIGN ADDRESS # salt-master python2.781106 35 tcp4 192.168.12.34:4506 192.168.12.45:60143 local = chunks[-2] remote = chunks[-1] lhost, lport = local.split(':') rhost, rport = remote.split(':') if which_end == 'local' and int(lport) != port: # ignore if local port not port continue if which_end == 'remote' and int(rport) != port: # ignore if remote port not port continue remotes.add(rhost) return remotes def _netbsd_remotes_on(port, which_end): ''' Returns set of ipv4 host addresses of remote established connections on local tcp port port. Parses output of shell 'sockstat' (NetBSD) to get connections $ sudo sockstat -4 -n USER COMMAND PID FD PROTO LOCAL ADDRESS FOREIGN ADDRESS root python2.7 1456 29 tcp *.4505 *.* root python2.7 1445 17 tcp *.4506 *.* root python2.7 1294 14 tcp 127.0.0.1.11813 127.0.0.1.4505 root python2.7 1294 41 tcp 127.0.0.1.61115 127.0.0.1.4506 $ sudo sockstat -4 -c -n -p 4506 USER COMMAND PID FD PROTO LOCAL ADDRESS FOREIGN ADDRESS root python2.7 1294 41 tcp 127.0.0.1.61115 127.0.0.1.4506 ''' port = int(port) remotes = set() try: cmd = salt.utils.args.shlex_split('sockstat -4 -c -n -p {0}'.format(port)) data = subprocess.check_output(cmd) # pylint: disable=minimum-python-version except subprocess.CalledProcessError as ex: log.error('Failed "sockstat" with returncode = %s', ex.returncode) raise lines = salt.utils.stringutils.to_str(data).split('\n') for line in lines: chunks = line.split() if not chunks: continue # ['root', 'python2.7', '1456', '37', 'tcp', # '127.0.0.1.4505-', '127.0.0.1.55703'] # print chunks if 'COMMAND' in chunks[1]: continue # ignore header if len(chunks) < 2: continue local = chunks[5].split('.') lport = local.pop() lhost = '.'.join(local) remote = chunks[6].split('.') rport = remote.pop() rhost = '.'.join(remote) if which_end == 'local' and int(lport) != port: # ignore if local port not port continue if which_end == 'remote' and int(rport) != port: # ignore if remote port not port continue remotes.add(rhost) return remotes def _openbsd_remotes_on(port, which_end): ''' OpenBSD specific helper function. Returns set of ipv4 host addresses of remote established connections on local or remote tcp port. Parses output of shell 'netstat' to get connections $ netstat -nf inet Active Internet connections Proto Recv-Q Send-Q Local Address Foreign Address (state) tcp 0 0 10.0.0.101.4505 10.0.0.1.45329 ESTABLISHED tcp 0 0 10.0.0.101.4505 10.0.0.100.50798 ESTABLISHED ''' remotes = set() try: data = subprocess.check_output(['netstat', '-nf', 'inet']) # pylint: disable=minimum-python-version except subprocess.CalledProcessError: log.error('Failed netstat') raise lines = data.split('\n') for line in lines: if 'ESTABLISHED' not in line: continue chunks = line.split() local_host, local_port = chunks[3].rsplit('.', 1) remote_host, remote_port = chunks[4].rsplit('.', 1) if which_end == 'remote_port' and int(remote_port) != port: continue if which_end == 'local_port' and int(local_port) != port: continue remotes.add(remote_host) return remotes def _windows_remotes_on(port, which_end): r''' Windows specific helper function. Returns set of ipv4 host addresses of remote established connections on local or remote tcp port. Parses output of shell 'netstat' to get connections C:\>netstat -n Active Connections Proto Local Address Foreign Address State TCP 10.2.33.17:3007 130.164.12.233:10123 ESTABLISHED TCP 10.2.33.17:3389 130.164.30.5:10378 ESTABLISHED ''' remotes = set() try: data = subprocess.check_output(['netstat', '-n']) # pylint: disable=minimum-python-version except subprocess.CalledProcessError: log.error('Failed netstat') raise lines = salt.utils.stringutils.to_str(data).split('\n') for line in lines: if 'ESTABLISHED' not in line: continue chunks = line.split() local_host, local_port = chunks[1].rsplit(':', 1) remote_host, remote_port = chunks[2].rsplit(':', 1) if which_end == 'remote_port' and int(remote_port) != port: continue if which_end == 'local_port' and int(local_port) != port: continue remotes.add(remote_host) return remotes def _linux_remotes_on(port, which_end): ''' Linux specific helper function. Returns set of ip host addresses of remote established connections on local tcp port port. Parses output of shell 'lsof' to get connections $ sudo lsof -iTCP:4505 -n COMMAND PID USER FD TYPE DEVICE SIZE/OFF NODE NAME Python 9971 root 35u IPv4 0x18a8464a29ca329d 0t0 TCP *:4505 (LISTEN) Python 9971 root 37u IPv4 0x18a8464a29b2b29d 0t0 TCP 127.0.0.1:4505->127.0.0.1:55703 (ESTABLISHED) Python 10152 root 22u IPv4 0x18a8464a29c8cab5 0t0 TCP 127.0.0.1:55703->127.0.0.1:4505 (ESTABLISHED) Python 10153 root 22u IPv4 0x18a8464a29c8cab5 0t0 TCP [fe80::249a]:4505->[fe80::150]:59367 (ESTABLISHED) ''' remotes = set() try: data = subprocess.check_output( ['lsof', '-iTCP:{0:d}'.format(port), '-n', '-P'] # pylint: disable=minimum-python-version ) except subprocess.CalledProcessError as ex: if ex.returncode == 1: # Lsof return 1 if any error was detected, including the failure # to locate Internet addresses, and it is not an error in this case. log.warning('"lsof" returncode = 1, likely no active TCP sessions.') return remotes log.error('Failed "lsof" with returncode = %s', ex.returncode) raise lines = salt.utils.stringutils.to_str(data).split('\n') for line in lines: chunks = line.split() if not chunks: continue # ['Python', '9971', 'root', '37u', 'IPv4', '0x18a8464a29b2b29d', '0t0', # 'TCP', '127.0.0.1:4505->127.0.0.1:55703', '(ESTABLISHED)'] # print chunks if 'COMMAND' in chunks[0]: continue # ignore header if 'ESTABLISHED' not in chunks[-1]: continue # ignore if not ESTABLISHED # '127.0.0.1:4505->127.0.0.1:55703' local, remote = chunks[8].split('->') _, lport = local.rsplit(':', 1) rhost, rport = remote.rsplit(':', 1) if which_end == 'remote_port' and int(rport) != port: continue if which_end == 'local_port' and int(lport) != port: continue remotes.add(rhost.strip("[]")) return remotes def _aix_remotes_on(port, which_end): ''' AIX specific helper function. Returns set of ipv4 host addresses of remote established connections on local or remote tcp port. Parses output of shell 'netstat' to get connections root@la68pp002_pub:/opt/salt/lib/python2.7/site-packages/salt/modules# netstat -f inet -n Active Internet connections Proto Recv-Q Send-Q Local Address Foreign Address (state) tcp4 0 0 172.29.149.95.50093 209.41.78.13.4505 ESTABLISHED tcp4 0 0 127.0.0.1.9514 *.* LISTEN tcp4 0 0 127.0.0.1.9515 *.* LISTEN tcp4 0 0 127.0.0.1.199 127.0.0.1.32779 ESTABLISHED tcp4 0 0 127.0.0.1.32779 127.0.0.1.199 ESTABLISHED tcp4 0 40 172.29.149.95.22 172.29.96.83.41022 ESTABLISHED tcp4 0 0 172.29.149.95.22 172.29.96.83.41032 ESTABLISHED tcp4 0 0 127.0.0.1.32771 127.0.0.1.32775 ESTABLISHED tcp 0 0 127.0.0.1.32775 127.0.0.1.32771 ESTABLISHED tcp4 0 0 127.0.0.1.32771 127.0.0.1.32776 ESTABLISHED tcp 0 0 127.0.0.1.32776 127.0.0.1.32771 ESTABLISHED tcp4 0 0 127.0.0.1.32771 127.0.0.1.32777 ESTABLISHED tcp 0 0 127.0.0.1.32777 127.0.0.1.32771 ESTABLISHED tcp4 0 0 127.0.0.1.32771 127.0.0.1.32778 ESTABLISHED tcp 0 0 127.0.0.1.32778 127.0.0.1.32771 ESTABLISHED ''' remotes = set() try: data = subprocess.check_output(['netstat', '-f', 'inet', '-n']) # pylint: disable=minimum-python-version except subprocess.CalledProcessError: log.error('Failed netstat') raise lines = salt.utils.stringutils.to_str(data).split('\n') for line in lines: if 'ESTABLISHED' not in line: continue chunks = line.split() local_host, local_port = chunks[3].rsplit('.', 1) remote_host, remote_port = chunks[4].rsplit('.', 1) if which_end == 'remote_port' and int(remote_port) != port: continue if which_end == 'local_port' and int(local_port) != port: continue remotes.add(remote_host) return remotes @jinja_filter('gen_mac') def gen_mac(prefix='AC:DE:48'): ''' Generates a MAC address with the defined OUI prefix. Common prefixes: - ``00:16:3E`` -- Xen - ``00:18:51`` -- OpenVZ - ``00:50:56`` -- VMware (manually generated) - ``52:54:00`` -- QEMU/KVM - ``AC:DE:48`` -- PRIVATE References: - http://standards.ieee.org/develop/regauth/oui/oui.txt - https://www.wireshark.org/tools/oui-lookup.html - https://en.wikipedia.org/wiki/MAC_address ''' return '{0}:{1:02X}:{2:02X}:{3:02X}'.format(prefix, random.randint(0, 0xff), random.randint(0, 0xff), random.randint(0, 0xff)) @jinja_filter('mac_str_to_bytes') def mac_str_to_bytes(mac_str): ''' Convert a MAC address string into bytes. Works with or without separators: b1 = mac_str_to_bytes('08:00:27:13:69:77') b2 = mac_str_to_bytes('080027136977') assert b1 == b2 assert isinstance(b1, bytes) ''' if len(mac_str) == 12: pass elif len(mac_str) == 17: sep = mac_str[2] mac_str = mac_str.replace(sep, '') else: raise ValueError('Invalid MAC address') chars = (int(mac_str[s:s+2], 16) for s in range(0, 12, 2)) return bytes(chars) if six.PY3 else b''.join(chr(x) for x in chars) def refresh_dns(): ''' issue #21397: force glibc to re-read resolv.conf ''' try: res_init() except NameError: # Exception raised loading the library, thus res_init is not defined pass @jinja_filter('connection_check') def connection_check(addr, port=80, safe=False, ipv6=None): ''' Provides a convenient alias for the dns_check filter. ''' return dns_check(addr, port, safe, ipv6) @jinja_filter('dns_check') def dns_check(addr, port=80, safe=False, ipv6=None, attempt_connect=True): ''' Return the ip resolved by dns, but do not exit on failure, only raise an exception. Obeys system preference for IPv4/6 address resolution - this can be overridden by the ipv6 flag. Tries to connect to the address before considering it useful. If no address can be reached, the first one resolved is used as a fallback. ''' error = False lookup = addr seen_ipv6 = False family = socket.AF_INET6 if ipv6 else socket.AF_INET if ipv6 is False else socket.AF_UNSPEC hostnames = [] try: refresh_dns() hostnames = socket.getaddrinfo(addr, port, family, socket.SOCK_STREAM) except TypeError: err = ('Attempt to resolve address \'{0}\' failed. Invalid or unresolveable address').format(lookup) raise SaltSystemExit(code=42, msg=err) except socket.error: error = True # If ipv6 is set to True, attempt another lookup using the IPv4 family, # just in case we're attempting to lookup an IPv4 IP # as an IPv6 hostname. if error and ipv6: try: refresh_dns() hostnames = socket.getaddrinfo(addr, port, socket.AF_INET, socket.SOCK_STREAM) except TypeError: err = ('Attempt to resolve address \'{0}\' failed. Invalid or unresolveable address').format(lookup) raise SaltSystemExit(code=42, msg=err) except socket.error: error = True try: if not hostnames: error = True else: resolved = False candidates = [] for h in hostnames: # Input is IP address, passed through unchanged, just return it if h[4][0] == addr: resolved = salt.utils.zeromq.ip_bracket(addr) break candidate_addr = salt.utils.zeromq.ip_bracket(h[4][0]) # sometimes /etc/hosts contains ::1 localhost if not ipv6 and candidate_addr == '[::1]': continue candidates.append(candidate_addr) if attempt_connect: try: s = socket.socket(h[0], socket.SOCK_STREAM) s.settimeout(2) s.connect((candidate_addr.strip('[]'), h[4][1])) s.close() resolved = candidate_addr break except socket.error: pass if not resolved: if candidates: resolved = candidates[0] else: error = True except TypeError: err = ('Attempt to resolve address \'{0}\' failed. Invalid or unresolveable address').format(lookup) raise SaltSystemExit(code=42, msg=err) except socket.error: error = True if error: err = ('DNS lookup or connection check of \'{0}\' failed.').format(addr) if safe: if salt.log.is_console_configured(): # If logging is not configured it also means that either # the master or minion instance calling this hasn't even # started running log.error(err) raise SaltClientError() raise SaltSystemExit(code=42, msg=err) return resolved def parse_host_port(host_port): """ Takes a string argument specifying host or host:port. Returns a (hostname, port) or (ip_address, port) tuple. If no port is given, the second (port) element of the returned tuple will be None. host:port argument, for example, is accepted in the forms of: - hostname - hostname:1234 - hostname.domain.tld - hostname.domain.tld:5678 - [1234::5]:5678 - 1234::5 - 10.11.12.13:4567 - 10.11.12.13 """ host, port = None, None # default _s_ = host_port[:] if _s_[0] == "[": if "]" in host_port: host, _s_ = _s_.lstrip("[").rsplit("]", 1) host = ipaddress.IPv6Address(host).compressed if _s_[0] == ":": port = int(_s_.lstrip(":")) else: if len(_s_) > 1: raise ValueError('found ambiguous "{}" port in "{}"'.format(_s_, host_port)) else: if _s_.count(":") == 1: host, _hostport_separator_, port = _s_.partition(":") try: port = int(port) except ValueError as _e_: log.error('host_port "%s" port value "%s" is not an integer.', host_port, port) raise _e_ else: host = _s_ try: if not isinstance(host, ipaddress._BaseAddress): host_ip = ipaddress.ip_address(host).compressed host = host_ip except ValueError: log.debug('"%s" Not an IP address? Assuming it is a hostname.', host) if host != sanitize_host(host): log.error('bad hostname: "%s"', host) raise ValueError('bad hostname: "{}"'.format(host)) return host, port def is_fqdn(hostname): """ Verify if hostname conforms to be a FQDN. :param hostname: text string with the name of the host :return: bool, True if hostname is correct FQDN, False otherwise """ compliant = re.compile(r"(?!-)[A-Z\d\-\_]{1,63}(?<!-)$", re.IGNORECASE) return "." in hostname and len(hostname) < 0xff and all(compliant.match(x) for x in hostname.rstrip(".").split("."))
saltstack/salt
salt/utils/network.py
host_to_ips
python
def host_to_ips(host): ''' Returns a list of IP addresses of a given hostname or None if not found. ''' ips = [] try: for family, socktype, proto, canonname, sockaddr in socket.getaddrinfo( host, 0, socket.AF_UNSPEC, socket.SOCK_STREAM): if family == socket.AF_INET: ip, port = sockaddr elif family == socket.AF_INET6: ip, port, flow_info, scope_id = sockaddr ips.append(ip) if not ips: ips = None except Exception: ips = None return ips
Returns a list of IP addresses of a given hostname or None if not found.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/network.py#L81-L98
null
# -*- coding: utf-8 -*- ''' Define some generic socket functions for network modules ''' # Import python libs from __future__ import absolute_import, unicode_literals, print_function import itertools import os import re import types import socket import logging import platform import random import subprocess from string import ascii_letters, digits # Import 3rd-party libs from salt.ext import six from salt.ext.six.moves import range # pylint: disable=import-error,redefined-builtin # Attempt to import wmi try: import wmi import salt.utils.winapi except ImportError: pass # Import salt libs import salt.utils.args import salt.utils.files import salt.utils.path import salt.utils.platform import salt.utils.stringutils import salt.utils.zeromq from salt._compat import ipaddress from salt.exceptions import SaltClientError, SaltSystemExit from salt.utils.decorators.jinja import jinja_filter from salt.utils.versions import LooseVersion # inet_pton does not exist in Windows, this is a workaround if salt.utils.platform.is_windows(): from salt.ext import win_inet_pton # pylint: disable=unused-import log = logging.getLogger(__name__) try: import ctypes import ctypes.util libc = ctypes.cdll.LoadLibrary(ctypes.util.find_library("c")) res_init = libc.__res_init except (ImportError, OSError, AttributeError, TypeError): pass # pylint: disable=C0103 def sanitize_host(host): ''' Sanitize host string. https://tools.ietf.org/html/rfc1123#section-2.1 ''' RFC952_characters = ascii_letters + digits + ".-" return "".join([c for c in host[0:255] if c in RFC952_characters]) def isportopen(host, port): ''' Return status of a port ''' if not 1 <= int(port) <= 65535: return False sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) out = sock.connect_ex((sanitize_host(host), int(port))) return out def _generate_minion_id(): ''' Get list of possible host names and convention names. :return: ''' # There are three types of hostnames: # 1. Network names. How host is accessed from the network. # 2. Host aliases. They might be not available in all the network or only locally (/etc/hosts) # 3. Convention names, an internal nodename. class DistinctList(list): ''' List, which allows one to append only distinct objects. Needs to work on Python 2.6, because of collections.OrderedDict only since 2.7 version. Override 'filter()' for custom filtering. ''' localhost_matchers = [r'localhost.*', r'ip6-.*', r'127[.]\d', r'0\.0\.0\.0', r'::1.*', r'ipv6-.*', r'fe00::.*', r'fe02::.*', r'1.0.0.*.ip6.arpa'] def append(self, p_object): if p_object and p_object not in self and not self.filter(p_object): super(DistinctList, self).append(p_object) return self def extend(self, iterable): for obj in iterable: self.append(obj) return self def filter(self, element): 'Returns True if element needs to be filtered' for rgx in self.localhost_matchers: if re.match(rgx, element): return True def first(self): return self and self[0] or None hostname = socket.gethostname() hosts = DistinctList().append( salt.utils.stringutils.to_unicode(socket.getfqdn(salt.utils.stringutils.to_bytes(hostname))) ).append(platform.node()).append(hostname) if not hosts: try: for a_nfo in socket.getaddrinfo(hosts.first() or 'localhost', None, socket.AF_INET, socket.SOCK_RAW, socket.IPPROTO_IP, socket.AI_CANONNAME): if len(a_nfo) > 3: hosts.append(a_nfo[3]) except socket.gaierror: log.warning('Cannot resolve address %s info via socket: %s', hosts.first() or 'localhost (N/A)', socket.gaierror) # Universal method for everywhere (Linux, Slowlaris, Windows etc) for f_name in ('/etc/hostname', '/etc/nodename', '/etc/hosts', r'{win}\system32\drivers\etc\hosts'.format(win=os.getenv('WINDIR'))): try: with salt.utils.files.fopen(f_name) as f_hdl: for line in f_hdl: line = salt.utils.stringutils.to_unicode(line) hst = line.strip().split('#')[0].strip().split() if hst: if hst[0][:4] in ('127.', '::1') or len(hst) == 1: hosts.extend(hst) except IOError: pass # include public and private ipaddresses return hosts.extend([addr for addr in ip_addrs() if not ipaddress.ip_address(addr).is_loopback]) def generate_minion_id(): ''' Return only first element of the hostname from all possible list. :return: ''' try: ret = salt.utils.stringutils.to_unicode(_generate_minion_id().first()) except TypeError: ret = None return ret or 'localhost' def get_socket(addr, type=socket.SOCK_STREAM, proto=0): ''' Return a socket object for the addr IP-version agnostic ''' version = ipaddress.ip_address(addr).version if version == 4: family = socket.AF_INET elif version == 6: family = socket.AF_INET6 return socket.socket(family, type, proto) def get_fqhostname(): ''' Returns the fully qualified hostname ''' # try getaddrinfo() fqdn = None try: addrinfo = socket.getaddrinfo( socket.gethostname(), 0, socket.AF_UNSPEC, socket.SOCK_STREAM, socket.SOL_TCP, socket.AI_CANONNAME ) for info in addrinfo: # info struct [family, socktype, proto, canonname, sockaddr] # On Windows `canonname` can be an empty string # This can cause the function to return `None` if len(info) > 3 and info[3]: fqdn = info[3] break except socket.gaierror: pass # NOTE: this used to log.error() but it was later disabled except socket.error as err: log.debug('socket.getaddrinfo() failure while finding fqdn: %s', err) if fqdn is None: fqdn = socket.getfqdn() return fqdn def ip_to_host(ip): ''' Returns the hostname of a given IP ''' try: hostname, aliaslist, ipaddrlist = socket.gethostbyaddr(ip) except Exception as exc: log.debug('salt.utils.network.ip_to_host(%r) failed: %s', ip, exc) hostname = None return hostname # pylint: enable=C0103 def is_reachable_host(entity_name): ''' Returns a bool telling if the entity name is a reachable host (IPv4/IPv6/FQDN/etc). :param hostname: :return: ''' try: assert type(socket.getaddrinfo(entity_name, 0, 0, 0, 0)) == list ret = True except socket.gaierror: ret = False return ret def is_ip(ip): ''' Returns a bool telling if the passed IP is a valid IPv4 or IPv6 address. ''' return is_ipv4(ip) or is_ipv6(ip) def is_ipv4(ip): ''' Returns a bool telling if the value passed to it was a valid IPv4 address ''' try: return ipaddress.ip_address(ip).version == 4 except ValueError: return False def is_ipv6(ip): ''' Returns a bool telling if the value passed to it was a valid IPv6 address ''' try: return ipaddress.ip_address(ip).version == 6 except ValueError: return False def is_subnet(cidr): ''' Returns a bool telling if the passed string is an IPv4 or IPv6 subnet ''' return is_ipv4_subnet(cidr) or is_ipv6_subnet(cidr) def is_ipv4_subnet(cidr): ''' Returns a bool telling if the passed string is an IPv4 subnet ''' try: return '/' in cidr and bool(ipaddress.IPv4Network(cidr)) except Exception: return False def is_ipv6_subnet(cidr): ''' Returns a bool telling if the passed string is an IPv6 subnet ''' try: return '/' in cidr and bool(ipaddress.IPv6Network(cidr)) except Exception: return False @jinja_filter('is_ip') def is_ip_filter(ip, options=None): ''' Returns a bool telling if the passed IP is a valid IPv4 or IPv6 address. ''' return is_ipv4_filter(ip, options=options) or is_ipv6_filter(ip, options=options) def _ip_options_global(ip_obj, version): return not ip_obj.is_private def _ip_options_multicast(ip_obj, version): return ip_obj.is_multicast def _ip_options_loopback(ip_obj, version): return ip_obj.is_loopback def _ip_options_link_local(ip_obj, version): return ip_obj.is_link_local def _ip_options_private(ip_obj, version): return ip_obj.is_private def _ip_options_reserved(ip_obj, version): return ip_obj.is_reserved def _ip_options_site_local(ip_obj, version): if version == 6: return ip_obj.is_site_local return False def _ip_options_unspecified(ip_obj, version): return ip_obj.is_unspecified def _ip_options(ip_obj, version, options=None): # will process and IP options options_fun_map = { 'global': _ip_options_global, 'link-local': _ip_options_link_local, 'linklocal': _ip_options_link_local, 'll': _ip_options_link_local, 'link_local': _ip_options_link_local, 'loopback': _ip_options_loopback, 'lo': _ip_options_loopback, 'multicast': _ip_options_multicast, 'private': _ip_options_private, 'public': _ip_options_global, 'reserved': _ip_options_reserved, 'site-local': _ip_options_site_local, 'sl': _ip_options_site_local, 'site_local': _ip_options_site_local, 'unspecified': _ip_options_unspecified } if not options: return six.text_type(ip_obj) # IP version already checked options_list = [option.strip() for option in options.split(',')] for option, fun in options_fun_map.items(): if option in options_list: fun_res = fun(ip_obj, version) if not fun_res: return None # stop at first failed test # else continue return six.text_type(ip_obj) def _is_ipv(ip, version, options=None): if not version: version = 4 if version not in (4, 6): return None try: ip_obj = ipaddress.ip_address(ip) except ValueError: # maybe it is an IP network try: ip_obj = ipaddress.ip_interface(ip) except ValueError: # nope, still not :( return None if not ip_obj.version == version: return None # has the right version, let's move on return _ip_options(ip_obj, version, options=options) @jinja_filter('is_ipv4') def is_ipv4_filter(ip, options=None): ''' Returns a bool telling if the value passed to it was a valid IPv4 address. ip The IP address. net: False Consider IP addresses followed by netmask. options CSV of options regarding the nature of the IP address. E.g.: loopback, multicast, private etc. ''' _is_ipv4 = _is_ipv(ip, 4, options=options) return isinstance(_is_ipv4, six.string_types) @jinja_filter('is_ipv6') def is_ipv6_filter(ip, options=None): ''' Returns a bool telling if the value passed to it was a valid IPv6 address. ip The IP address. net: False Consider IP addresses followed by netmask. options CSV of options regarding the nature of the IP address. E.g.: loopback, multicast, private etc. ''' _is_ipv6 = _is_ipv(ip, 6, options=options) return isinstance(_is_ipv6, six.string_types) def _ipv_filter(value, version, options=None): if version not in (4, 6): return if isinstance(value, (six.string_types, six.text_type, six.binary_type)): return _is_ipv(value, version, options=options) # calls is_ipv4 or is_ipv6 for `value` elif isinstance(value, (list, tuple, types.GeneratorType)): # calls is_ipv4 or is_ipv6 for each element in the list # os it filters and returns only those elements having the desired IP version return [ _is_ipv(addr, version, options=options) for addr in value if _is_ipv(addr, version, options=options) is not None ] return None @jinja_filter('ipv4') def ipv4(value, options=None): ''' Filters a list and returns IPv4 values only. ''' return _ipv_filter(value, 4, options=options) @jinja_filter('ipv6') def ipv6(value, options=None): ''' Filters a list and returns IPv6 values only. ''' return _ipv_filter(value, 6, options=options) @jinja_filter('ipaddr') def ipaddr(value, options=None): ''' Filters and returns only valid IP objects. ''' ipv4_obj = ipv4(value, options=options) ipv6_obj = ipv6(value, options=options) if ipv4_obj is None or ipv6_obj is None: # an IP address can be either IPv4 either IPv6 # therefofe if the value passed as arg is not a list, at least one of the calls above will return None # if one of them is none, means that we should return only one of them return ipv4_obj or ipv6_obj # one of them else: return ipv4_obj + ipv6_obj # extend lists def _filter_ipaddr(value, options, version=None): ipaddr_filter_out = None if version: if version == 4: ipaddr_filter_out = ipv4(value, options) elif version == 6: ipaddr_filter_out = ipv6(value, options) else: ipaddr_filter_out = ipaddr(value, options) if not ipaddr_filter_out: return if not isinstance(ipaddr_filter_out, (list, tuple, types.GeneratorType)): ipaddr_filter_out = [ipaddr_filter_out] return ipaddr_filter_out @jinja_filter('ip_host') def ip_host(value, options=None, version=None): ''' Returns the interfaces IP address, e.g.: 192.168.0.1/28. ''' ipaddr_filter_out = _filter_ipaddr(value, options=options, version=version) if not ipaddr_filter_out: return if not isinstance(value, (list, tuple, types.GeneratorType)): return six.text_type(ipaddress.ip_interface(ipaddr_filter_out[0])) return [six.text_type(ipaddress.ip_interface(ip_a)) for ip_a in ipaddr_filter_out] def _network_hosts(ip_addr_entry): return [ six.text_type(host) for host in ipaddress.ip_network(ip_addr_entry, strict=False).hosts() ] @jinja_filter('network_hosts') def network_hosts(value, options=None, version=None): ''' Return the list of hosts within a network. .. note:: When running this command with a large IPv6 network, the command will take a long time to gather all of the hosts. ''' ipaddr_filter_out = _filter_ipaddr(value, options=options, version=version) if not ipaddr_filter_out: return if not isinstance(value, (list, tuple, types.GeneratorType)): return _network_hosts(ipaddr_filter_out[0]) return [ _network_hosts(ip_a) for ip_a in ipaddr_filter_out ] def _network_size(ip_addr_entry): return ipaddress.ip_network(ip_addr_entry, strict=False).num_addresses @jinja_filter('network_size') def network_size(value, options=None, version=None): ''' Get the size of a network. ''' ipaddr_filter_out = _filter_ipaddr(value, options=options, version=version) if not ipaddr_filter_out: return if not isinstance(value, (list, tuple, types.GeneratorType)): return _network_size(ipaddr_filter_out[0]) return [ _network_size(ip_a) for ip_a in ipaddr_filter_out ] def natural_ipv4_netmask(ip, fmt='prefixlen'): ''' Returns the "natural" mask of an IPv4 address ''' bits = _ipv4_to_bits(ip) if bits.startswith('11'): mask = '24' elif bits.startswith('1'): mask = '16' else: mask = '8' if fmt == 'netmask': return cidr_to_ipv4_netmask(mask) else: return '/' + mask def rpad_ipv4_network(ip): ''' Returns an IP network address padded with zeros. Ex: '192.168.3' -> '192.168.3.0' '10.209' -> '10.209.0.0' ''' return '.'.join(itertools.islice(itertools.chain(ip.split('.'), '0000'), 0, 4)) def cidr_to_ipv4_netmask(cidr_bits): ''' Returns an IPv4 netmask ''' try: cidr_bits = int(cidr_bits) if not 1 <= cidr_bits <= 32: return '' except ValueError: return '' netmask = '' for idx in range(4): if idx: netmask += '.' if cidr_bits >= 8: netmask += '255' cidr_bits -= 8 else: netmask += '{0:d}'.format(256 - (2 ** (8 - cidr_bits))) cidr_bits = 0 return netmask def _number_of_set_bits_to_ipv4_netmask(set_bits): # pylint: disable=C0103 ''' Returns an IPv4 netmask from the integer representation of that mask. Ex. 0xffffff00 -> '255.255.255.0' ''' return cidr_to_ipv4_netmask(_number_of_set_bits(set_bits)) # pylint: disable=C0103 def _number_of_set_bits(x): ''' Returns the number of bits that are set in a 32bit int ''' # Taken from http://stackoverflow.com/a/4912729. Many thanks! x -= (x >> 1) & 0x55555555 x = ((x >> 2) & 0x33333333) + (x & 0x33333333) x = ((x >> 4) + x) & 0x0f0f0f0f x += x >> 8 x += x >> 16 return x & 0x0000003f # pylint: enable=C0103 def _interfaces_ip(out): ''' Uses ip to return a dictionary of interfaces with various information about each (up/down state, ip address, netmask, and hwaddr) ''' ret = dict() def parse_network(value, cols): ''' Return a tuple of ip, netmask, broadcast based on the current set of cols ''' brd = None scope = None if '/' in value: # we have a CIDR in this address ip, cidr = value.split('/') # pylint: disable=C0103 else: ip = value # pylint: disable=C0103 cidr = 32 if type_ == 'inet': mask = cidr_to_ipv4_netmask(int(cidr)) if 'brd' in cols: brd = cols[cols.index('brd') + 1] elif type_ == 'inet6': mask = cidr if 'scope' in cols: scope = cols[cols.index('scope') + 1] return (ip, mask, brd, scope) groups = re.compile('\r?\n\\d').split(out) for group in groups: iface = None data = dict() for line in group.splitlines(): if ' ' not in line: continue match = re.match(r'^\d*:\s+([\w.\-]+)(?:@)?([\w.\-]+)?:\s+<(.+)>', line) if match: iface, parent, attrs = match.groups() if 'UP' in attrs.split(','): data['up'] = True else: data['up'] = False if parent: data['parent'] = parent continue cols = line.split() if len(cols) >= 2: type_, value = tuple(cols[0:2]) iflabel = cols[-1:][0] if type_ in ('inet', 'inet6'): if 'secondary' not in cols: ipaddr, netmask, broadcast, scope = parse_network(value, cols) if type_ == 'inet': if 'inet' not in data: data['inet'] = list() addr_obj = dict() addr_obj['address'] = ipaddr addr_obj['netmask'] = netmask addr_obj['broadcast'] = broadcast addr_obj['label'] = iflabel data['inet'].append(addr_obj) elif type_ == 'inet6': if 'inet6' not in data: data['inet6'] = list() addr_obj = dict() addr_obj['address'] = ipaddr addr_obj['prefixlen'] = netmask addr_obj['scope'] = scope data['inet6'].append(addr_obj) else: if 'secondary' not in data: data['secondary'] = list() ip_, mask, brd, scp = parse_network(value, cols) data['secondary'].append({ 'type': type_, 'address': ip_, 'netmask': mask, 'broadcast': brd, 'label': iflabel, }) del ip_, mask, brd, scp elif type_.startswith('link'): data['hwaddr'] = value if iface: ret[iface] = data del iface, data return ret def _interfaces_ifconfig(out): ''' Uses ifconfig to return a dictionary of interfaces with various information about each (up/down state, ip address, netmask, and hwaddr) ''' ret = dict() piface = re.compile(r'^([^\s:]+)') pmac = re.compile('.*?(?:HWaddr|ether|address:|lladdr) ([0-9a-fA-F:]+)') if salt.utils.platform.is_sunos(): pip = re.compile(r'.*?(?:inet\s+)([0-9]+\.[0-9]+\.[0-9]+\.[0-9]+)(.*)') pip6 = re.compile('.*?(?:inet6 )([0-9a-fA-F:]+)') pmask6 = re.compile(r'.*?(?:inet6 [0-9a-fA-F:]+/(\d+)).*') else: pip = re.compile(r'.*?(?:inet addr:|inet [^\d]*)(.*?)\s') pip6 = re.compile('.*?(?:inet6 addr: (.*?)/|inet6 )([0-9a-fA-F:]+)') pmask6 = re.compile(r'.*?(?:inet6 addr: [0-9a-fA-F:]+/(\d+)|prefixlen (\d+))(?: Scope:([a-zA-Z]+)| scopeid (0x[0-9a-fA-F]))?') pmask = re.compile(r'.*?(?:Mask:|netmask )(?:((?:0x)?[0-9a-fA-F]{8})|([\d\.]+))') pupdown = re.compile('UP') pbcast = re.compile(r'.*?(?:Bcast:|broadcast )([\d\.]+)') groups = re.compile('\r?\n(?=\\S)').split(out) for group in groups: data = dict() iface = '' updown = False for line in group.splitlines(): miface = piface.match(line) mmac = pmac.match(line) mip = pip.match(line) mip6 = pip6.match(line) mupdown = pupdown.search(line) if miface: iface = miface.group(1) if mmac: data['hwaddr'] = mmac.group(1) if salt.utils.platform.is_sunos(): expand_mac = [] for chunk in data['hwaddr'].split(':'): expand_mac.append('0{0}'.format(chunk) if len(chunk) < 2 else '{0}'.format(chunk)) data['hwaddr'] = ':'.join(expand_mac) if mip: if 'inet' not in data: data['inet'] = list() addr_obj = dict() addr_obj['address'] = mip.group(1) mmask = pmask.match(line) if mmask: if mmask.group(1): mmask = _number_of_set_bits_to_ipv4_netmask( int(mmask.group(1), 16)) else: mmask = mmask.group(2) addr_obj['netmask'] = mmask mbcast = pbcast.match(line) if mbcast: addr_obj['broadcast'] = mbcast.group(1) data['inet'].append(addr_obj) if mupdown: updown = True if mip6: if 'inet6' not in data: data['inet6'] = list() addr_obj = dict() addr_obj['address'] = mip6.group(1) or mip6.group(2) mmask6 = pmask6.match(line) if mmask6: addr_obj['prefixlen'] = mmask6.group(1) or mmask6.group(2) if not salt.utils.platform.is_sunos(): ipv6scope = mmask6.group(3) or mmask6.group(4) addr_obj['scope'] = ipv6scope.lower() if ipv6scope is not None else ipv6scope # SunOS sometimes has ::/0 as inet6 addr when using addrconf if not salt.utils.platform.is_sunos() \ or addr_obj['address'] != '::' \ and addr_obj['prefixlen'] != 0: data['inet6'].append(addr_obj) data['up'] = updown if iface in ret: # SunOS optimization, where interfaces occur twice in 'ifconfig -a' # output with the same name: for ipv4 and then for ipv6 addr family. # Every instance has it's own 'UP' status and we assume that ipv4 # status determines global interface status. # # merge items with higher priority for older values # after that merge the inet and inet6 sub items for both ret[iface] = dict(list(data.items()) + list(ret[iface].items())) if 'inet' in data: ret[iface]['inet'].extend(x for x in data['inet'] if x not in ret[iface]['inet']) if 'inet6' in data: ret[iface]['inet6'].extend(x for x in data['inet6'] if x not in ret[iface]['inet6']) else: ret[iface] = data del data return ret def linux_interfaces(): ''' Obtain interface information for *NIX/BSD variants ''' ifaces = dict() ip_path = salt.utils.path.which('ip') ifconfig_path = None if ip_path else salt.utils.path.which('ifconfig') if ip_path: cmd1 = subprocess.Popen( '{0} link show'.format(ip_path), shell=True, close_fds=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate()[0] cmd2 = subprocess.Popen( '{0} addr show'.format(ip_path), shell=True, close_fds=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate()[0] ifaces = _interfaces_ip("{0}\n{1}".format( salt.utils.stringutils.to_str(cmd1), salt.utils.stringutils.to_str(cmd2))) elif ifconfig_path: cmd = subprocess.Popen( '{0} -a'.format(ifconfig_path), shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate()[0] ifaces = _interfaces_ifconfig(salt.utils.stringutils.to_str(cmd)) return ifaces def _netbsd_interfaces_ifconfig(out): ''' Uses ifconfig to return a dictionary of interfaces with various information about each (up/down state, ip address, netmask, and hwaddr) ''' ret = dict() piface = re.compile(r'^([^\s:]+)') pmac = re.compile('.*?address: ([0-9a-f:]+)') pip = re.compile(r'.*?inet [^\d]*(.*?)/([\d]*)\s') pip6 = re.compile(r'.*?inet6 ([0-9a-f:]+)%([a-zA-Z0-9]*)/([\d]*)\s') pupdown = re.compile('UP') pbcast = re.compile(r'.*?broadcast ([\d\.]+)') groups = re.compile('\r?\n(?=\\S)').split(out) for group in groups: data = dict() iface = '' updown = False for line in group.splitlines(): miface = piface.match(line) mmac = pmac.match(line) mip = pip.match(line) mip6 = pip6.match(line) mupdown = pupdown.search(line) if miface: iface = miface.group(1) if mmac: data['hwaddr'] = mmac.group(1) if mip: if 'inet' not in data: data['inet'] = list() addr_obj = dict() addr_obj['address'] = mip.group(1) mmask = mip.group(2) if mip.group(2): addr_obj['netmask'] = cidr_to_ipv4_netmask(mip.group(2)) mbcast = pbcast.match(line) if mbcast: addr_obj['broadcast'] = mbcast.group(1) data['inet'].append(addr_obj) if mupdown: updown = True if mip6: if 'inet6' not in data: data['inet6'] = list() addr_obj = dict() addr_obj['address'] = mip6.group(1) mmask6 = mip6.group(3) addr_obj['scope'] = mip6.group(2) addr_obj['prefixlen'] = mip6.group(3) data['inet6'].append(addr_obj) data['up'] = updown ret[iface] = data del data return ret def netbsd_interfaces(): ''' Obtain interface information for NetBSD >= 8 where the ifconfig output diverged from other BSD variants (Netmask is now part of the address) ''' # NetBSD versions prior to 8.0 can still use linux_interfaces() if LooseVersion(os.uname()[2]) < LooseVersion('8.0'): return linux_interfaces() ifconfig_path = salt.utils.path.which('ifconfig') cmd = subprocess.Popen( '{0} -a'.format(ifconfig_path), shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate()[0] return _netbsd_interfaces_ifconfig(salt.utils.stringutils.to_str(cmd)) def _interfaces_ipconfig(out): ''' Returns a dictionary of interfaces with various information about each (up/down state, ip address, netmask, and hwaddr) NOTE: This is not used by any function and may be able to be removed in the future. ''' ifaces = dict() iface = None adapter_iface_regex = re.compile(r'adapter (\S.+):$') for line in out.splitlines(): if not line: continue # TODO what does Windows call Infiniband and 10/40gige adapters if line.startswith('Ethernet'): iface = ifaces[adapter_iface_regex.search(line).group(1)] iface['up'] = True addr = None continue if iface: key, val = line.split(',', 1) key = key.strip(' .') val = val.strip() if addr and key == 'Subnet Mask': addr['netmask'] = val elif key in ('IP Address', 'IPv4 Address'): if 'inet' not in iface: iface['inet'] = list() addr = {'address': val.rstrip('(Preferred)'), 'netmask': None, 'broadcast': None} # TODO find the broadcast iface['inet'].append(addr) elif 'IPv6 Address' in key: if 'inet6' not in iface: iface['inet'] = list() # XXX What is the prefixlen!? addr = {'address': val.rstrip('(Preferred)'), 'prefixlen': None} iface['inet6'].append(addr) elif key == 'Physical Address': iface['hwaddr'] = val elif key == 'Media State': # XXX seen used for tunnel adaptors # might be useful iface['up'] = (val != 'Media disconnected') def win_interfaces(): ''' Obtain interface information for Windows systems ''' with salt.utils.winapi.Com(): c = wmi.WMI() ifaces = {} for iface in c.Win32_NetworkAdapterConfiguration(IPEnabled=1): ifaces[iface.Description] = dict() if iface.MACAddress: ifaces[iface.Description]['hwaddr'] = iface.MACAddress if iface.IPEnabled: ifaces[iface.Description]['up'] = True for ip in iface.IPAddress: if '.' in ip: if 'inet' not in ifaces[iface.Description]: ifaces[iface.Description]['inet'] = [] item = {'address': ip, 'label': iface.Description} if iface.DefaultIPGateway: broadcast = next((i for i in iface.DefaultIPGateway if '.' in i), '') if broadcast: item['broadcast'] = broadcast if iface.IPSubnet: netmask = next((i for i in iface.IPSubnet if '.' in i), '') if netmask: item['netmask'] = netmask ifaces[iface.Description]['inet'].append(item) if ':' in ip: if 'inet6' not in ifaces[iface.Description]: ifaces[iface.Description]['inet6'] = [] item = {'address': ip} if iface.DefaultIPGateway: broadcast = next((i for i in iface.DefaultIPGateway if ':' in i), '') if broadcast: item['broadcast'] = broadcast if iface.IPSubnet: netmask = next((i for i in iface.IPSubnet if ':' in i), '') if netmask: item['netmask'] = netmask ifaces[iface.Description]['inet6'].append(item) else: ifaces[iface.Description]['up'] = False return ifaces def interfaces(): ''' Return a dictionary of information about all the interfaces on the minion ''' if salt.utils.platform.is_windows(): return win_interfaces() elif salt.utils.platform.is_netbsd(): return netbsd_interfaces() else: return linux_interfaces() def get_net_start(ipaddr, netmask): ''' Return the address of the network ''' net = ipaddress.ip_network('{0}/{1}'.format(ipaddr, netmask), strict=False) return six.text_type(net.network_address) def get_net_size(mask): ''' Turns an IPv4 netmask into it's corresponding prefix length (255.255.255.0 -> 24 as in 192.168.1.10/24). ''' binary_str = '' for octet in mask.split('.'): binary_str += bin(int(octet))[2:].zfill(8) return len(binary_str.rstrip('0')) def calc_net(ipaddr, netmask=None): ''' Takes IP (CIDR notation supported) and optionally netmask and returns the network in CIDR-notation. (The IP can be any IP inside the subnet) ''' if netmask is not None: ipaddr = '{0}/{1}'.format(ipaddr, netmask) return six.text_type(ipaddress.ip_network(ipaddr, strict=False)) def _ipv4_to_bits(ipaddr): ''' Accepts an IPv4 dotted quad and returns a string representing its binary counterpart ''' return ''.join([bin(int(x))[2:].rjust(8, '0') for x in ipaddr.split('.')]) def _get_iface_info(iface): ''' If `iface` is available, return interface info and no error, otherwise return no info and log and return an error ''' iface_info = interfaces() if iface in iface_info.keys(): return iface_info, False else: error_msg = ('Interface "{0}" not in available interfaces: "{1}"' ''.format(iface, '", "'.join(iface_info.keys()))) log.error(error_msg) return None, error_msg def _hw_addr_aix(iface): ''' Return the hardware address (a.k.a. MAC address) for a given interface on AIX MAC address not available in through interfaces ''' cmd = subprocess.Popen( 'entstat -d {0} | grep \'Hardware Address\''.format(iface), shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate()[0] if cmd: comps = cmd.split(' ') if len(comps) == 3: mac_addr = comps[2].strip('\'').strip() return mac_addr error_msg = ('Interface "{0}" either not available or does not contain a hardware address'.format(iface)) log.error(error_msg) return error_msg def hw_addr(iface): ''' Return the hardware address (a.k.a. MAC address) for a given interface .. versionchanged:: 2016.11.4 Added support for AIX ''' if salt.utils.platform.is_aix(): return _hw_addr_aix iface_info, error = _get_iface_info(iface) if error is False: return iface_info.get(iface, {}).get('hwaddr', '') else: return error def interface(iface): ''' Return the details of `iface` or an error if it does not exist ''' iface_info, error = _get_iface_info(iface) if error is False: return iface_info.get(iface, {}).get('inet', '') else: return error def interface_ip(iface): ''' Return `iface` IPv4 addr or an error if `iface` does not exist ''' iface_info, error = _get_iface_info(iface) if error is False: inet = iface_info.get(iface, {}).get('inet', None) return inet[0].get('address', '') if inet else '' else: return error def _subnets(proto='inet', interfaces_=None): ''' Returns a list of subnets to which the host belongs ''' if interfaces_ is None: ifaces = interfaces() elif isinstance(interfaces_, list): ifaces = {} for key, value in six.iteritems(interfaces()): if key in interfaces_: ifaces[key] = value else: ifaces = {interfaces_: interfaces().get(interfaces_, {})} ret = set() if proto == 'inet': subnet = 'netmask' dflt_cidr = 32 elif proto == 'inet6': subnet = 'prefixlen' dflt_cidr = 128 else: log.error('Invalid proto %s calling subnets()', proto) return for ip_info in six.itervalues(ifaces): addrs = ip_info.get(proto, []) addrs.extend([addr for addr in ip_info.get('secondary', []) if addr.get('type') == proto]) for intf in addrs: if subnet in intf: intf = ipaddress.ip_interface('{0}/{1}'.format(intf['address'], intf[subnet])) else: intf = ipaddress.ip_interface('{0}/{1}'.format(intf['address'], dflt_cidr)) if not intf.is_loopback: ret.add(intf.network) return [six.text_type(net) for net in sorted(ret)] def subnets(interfaces=None): ''' Returns a list of IPv4 subnets to which the host belongs ''' return _subnets('inet', interfaces_=interfaces) def subnets6(): ''' Returns a list of IPv6 subnets to which the host belongs ''' return _subnets('inet6') def in_subnet(cidr, addr=None): ''' Returns True if host or (any of) addrs is within specified subnet, otherwise False ''' try: cidr = ipaddress.ip_network(cidr) except ValueError: log.error('Invalid CIDR \'%s\'', cidr) return False if addr is None: addr = ip_addrs() addr.extend(ip_addrs6()) elif not isinstance(addr, (list, tuple)): addr = (addr,) return any(ipaddress.ip_address(item) in cidr for item in addr) def _ip_addrs(interface=None, include_loopback=False, interface_data=None, proto='inet'): ''' Return the full list of IP adresses matching the criteria proto = inet|inet6 ''' ret = set() ifaces = interface_data \ if isinstance(interface_data, dict) \ else interfaces() if interface is None: target_ifaces = ifaces else: target_ifaces = dict([(k, v) for k, v in six.iteritems(ifaces) if k == interface]) if not target_ifaces: log.error('Interface %s not found.', interface) for ip_info in six.itervalues(target_ifaces): addrs = ip_info.get(proto, []) addrs.extend([addr for addr in ip_info.get('secondary', []) if addr.get('type') == proto]) for addr in addrs: addr = ipaddress.ip_address(addr.get('address')) if not addr.is_loopback or include_loopback: ret.add(addr) return [six.text_type(addr) for addr in sorted(ret)] def ip_addrs(interface=None, include_loopback=False, interface_data=None): ''' Returns a list of IPv4 addresses assigned to the host. 127.0.0.1 is ignored, unless 'include_loopback=True' is indicated. If 'interface' is provided, then only IP addresses from that interface will be returned. ''' return _ip_addrs(interface, include_loopback, interface_data, 'inet') def ip_addrs6(interface=None, include_loopback=False, interface_data=None): ''' Returns a list of IPv6 addresses assigned to the host. ::1 is ignored, unless 'include_loopback=True' is indicated. If 'interface' is provided, then only IP addresses from that interface will be returned. ''' return _ip_addrs(interface, include_loopback, interface_data, 'inet6') def hex2ip(hex_ip, invert=False): ''' Convert a hex string to an ip, if a failure occurs the original hex is returned. If 'invert=True' assume that ip from /proc/net/<proto> ''' if len(hex_ip) == 32: # ipv6 ip = [] for i in range(0, 32, 8): ip_part = hex_ip[i:i + 8] ip_part = [ip_part[x:x + 2] for x in range(0, 8, 2)] if invert: ip.append("{0[3]}{0[2]}:{0[1]}{0[0]}".format(ip_part)) else: ip.append("{0[0]}{0[1]}:{0[2]}{0[3]}".format(ip_part)) try: address = ipaddress.IPv6Address(":".join(ip)) if address.ipv4_mapped: return str(address.ipv4_mapped) else: return address.compressed except ipaddress.AddressValueError as ex: log.error('hex2ip - ipv6 address error: %s', ex) return hex_ip try: hip = int(hex_ip, 16) except ValueError: return hex_ip if invert: return '{3}.{2}.{1}.{0}'.format(hip >> 24 & 255, hip >> 16 & 255, hip >> 8 & 255, hip & 255) return '{0}.{1}.{2}.{3}'.format(hip >> 24 & 255, hip >> 16 & 255, hip >> 8 & 255, hip & 255) def mac2eui64(mac, prefix=None): ''' Convert a MAC address to a EUI64 identifier or, with prefix provided, a full IPv6 address ''' # http://tools.ietf.org/html/rfc4291#section-2.5.1 eui64 = re.sub(r'[.:-]', '', mac).lower() eui64 = eui64[0:6] + 'fffe' + eui64[6:] eui64 = hex(int(eui64[0:2], 16) | 2)[2:].zfill(2) + eui64[2:] if prefix is None: return ':'.join(re.findall(r'.{4}', eui64)) else: try: net = ipaddress.ip_network(prefix, strict=False) euil = int('0x{0}'.format(eui64), 16) return '{0}/{1}'.format(net[euil], net.prefixlen) except Exception: return def active_tcp(): ''' Return a dict describing all active tcp connections as quickly as possible ''' ret = {} for statf in ['/proc/net/tcp', '/proc/net/tcp6']: if os.path.isfile(statf): with salt.utils.files.fopen(statf, 'rb') as fp_: for line in fp_: line = salt.utils.stringutils.to_unicode(line) if line.strip().startswith('sl'): continue iret = _parse_tcp_line(line) sl = next(iter(iret)) if iret[sl]['state'] == 1: # 1 is ESTABLISHED del iret[sl]['state'] ret[len(ret)] = iret[sl] return ret def local_port_tcp(port): ''' Return a set of remote ip addrs attached to the specified local port ''' ret = _remotes_on(port, 'local_port') return ret def remote_port_tcp(port): ''' Return a set of ip addrs the current host is connected to on given port ''' ret = _remotes_on(port, 'remote_port') return ret def _remotes_on(port, which_end): ''' Return a set of ip addrs active tcp connections ''' port = int(port) ret = _netlink_tool_remote_on(port, which_end) if ret is not None: return ret ret = set() proc_available = False for statf in ['/proc/net/tcp', '/proc/net/tcp6']: if os.path.isfile(statf): proc_available = True with salt.utils.files.fopen(statf, 'r') as fp_: for line in fp_: line = salt.utils.stringutils.to_unicode(line) if line.strip().startswith('sl'): continue iret = _parse_tcp_line(line) sl = next(iter(iret)) if iret[sl][which_end] == port and iret[sl]['state'] == 1: # 1 is ESTABLISHED ret.add(iret[sl]['remote_addr']) if not proc_available: # Fallback to use OS specific tools if salt.utils.platform.is_sunos(): return _sunos_remotes_on(port, which_end) if salt.utils.platform.is_freebsd(): return _freebsd_remotes_on(port, which_end) if salt.utils.platform.is_netbsd(): return _netbsd_remotes_on(port, which_end) if salt.utils.platform.is_openbsd(): return _openbsd_remotes_on(port, which_end) if salt.utils.platform.is_windows(): return _windows_remotes_on(port, which_end) if salt.utils.platform.is_aix(): return _aix_remotes_on(port, which_end) return _linux_remotes_on(port, which_end) return ret def _parse_tcp_line(line): ''' Parse a single line from the contents of /proc/net/tcp or /proc/net/tcp6 ''' ret = {} comps = line.strip().split() sl = comps[0].rstrip(':') ret[sl] = {} l_addr, l_port = comps[1].split(':') r_addr, r_port = comps[2].split(':') ret[sl]['local_addr'] = hex2ip(l_addr, True) ret[sl]['local_port'] = int(l_port, 16) ret[sl]['remote_addr'] = hex2ip(r_addr, True) ret[sl]['remote_port'] = int(r_port, 16) ret[sl]['state'] = int(comps[3], 16) return ret def _netlink_tool_remote_on(port, which_end): ''' Returns set of ipv4 host addresses of remote established connections on local or remote tcp port. Parses output of shell 'ss' to get connections [root@salt-master ~]# ss -ant State Recv-Q Send-Q Local Address:Port Peer Address:Port LISTEN 0 511 *:80 *:* LISTEN 0 128 *:22 *:* ESTAB 0 0 127.0.0.1:56726 127.0.0.1:4505 ''' remotes = set() valid = False try: data = subprocess.check_output(['ss', '-ant']) # pylint: disable=minimum-python-version except subprocess.CalledProcessError: log.error('Failed ss') raise except OSError: # not command "No such file or directory" return None lines = salt.utils.stringutils.to_str(data).split('\n') for line in lines: if 'Address:Port' in line: # ss tools may not be valid valid = True continue elif 'ESTAB' not in line: continue chunks = line.split() local_host, local_port = chunks[3].split(':', 1) remote_host, remote_port = chunks[4].split(':', 1) if which_end == 'remote_port' and int(remote_port) != port: continue if which_end == 'local_port' and int(local_port) != port: continue remotes.add(remote_host) if valid is False: remotes = None return remotes def _sunos_remotes_on(port, which_end): ''' SunOS specific helper function. Returns set of ipv4 host addresses of remote established connections on local or remote tcp port. Parses output of shell 'netstat' to get connections [root@salt-master ~]# netstat -f inet -n TCP: IPv4 Local Address Remote Address Swind Send-Q Rwind Recv-Q State -------------------- -------------------- ----- ------ ----- ------ ----------- 10.0.0.101.4505 10.0.0.1.45329 1064800 0 1055864 0 ESTABLISHED 10.0.0.101.4505 10.0.0.100.50798 1064800 0 1055864 0 ESTABLISHED ''' remotes = set() try: data = subprocess.check_output(['netstat', '-f', 'inet', '-n']) # pylint: disable=minimum-python-version except subprocess.CalledProcessError: log.error('Failed netstat') raise lines = salt.utils.stringutils.to_str(data).split('\n') for line in lines: if 'ESTABLISHED' not in line: continue chunks = line.split() local_host, local_port = chunks[0].rsplit('.', 1) remote_host, remote_port = chunks[1].rsplit('.', 1) if which_end == 'remote_port' and int(remote_port) != port: continue if which_end == 'local_port' and int(local_port) != port: continue remotes.add(remote_host) return remotes def _freebsd_remotes_on(port, which_end): ''' Returns set of ipv4 host addresses of remote established connections on local tcp port port. Parses output of shell 'sockstat' (FreeBSD) to get connections $ sudo sockstat -4 USER COMMAND PID FD PROTO LOCAL ADDRESS FOREIGN ADDRESS root python2.7 1456 29 tcp4 *:4505 *:* root python2.7 1445 17 tcp4 *:4506 *:* root python2.7 1294 14 tcp4 127.0.0.1:11813 127.0.0.1:4505 root python2.7 1294 41 tcp4 127.0.0.1:61115 127.0.0.1:4506 $ sudo sockstat -4 -c -p 4506 USER COMMAND PID FD PROTO LOCAL ADDRESS FOREIGN ADDRESS root python2.7 1294 41 tcp4 127.0.0.1:61115 127.0.0.1:4506 ''' port = int(port) remotes = set() try: cmd = salt.utils.args.shlex_split('sockstat -4 -c -p {0}'.format(port)) data = subprocess.check_output(cmd) # pylint: disable=minimum-python-version except subprocess.CalledProcessError as ex: log.error('Failed "sockstat" with returncode = %s', ex.returncode) raise lines = salt.utils.stringutils.to_str(data).split('\n') for line in lines: chunks = line.split() if not chunks: continue # ['root', 'python2.7', '1456', '37', 'tcp4', # '127.0.0.1:4505-', '127.0.0.1:55703'] # print chunks if 'COMMAND' in chunks[1]: continue # ignore header if len(chunks) < 2: continue # sockstat -4 -c -p 4506 does this with high PIDs: # USER COMMAND PID FD PROTO LOCAL ADDRESS FOREIGN ADDRESS # salt-master python2.781106 35 tcp4 192.168.12.34:4506 192.168.12.45:60143 local = chunks[-2] remote = chunks[-1] lhost, lport = local.split(':') rhost, rport = remote.split(':') if which_end == 'local' and int(lport) != port: # ignore if local port not port continue if which_end == 'remote' and int(rport) != port: # ignore if remote port not port continue remotes.add(rhost) return remotes def _netbsd_remotes_on(port, which_end): ''' Returns set of ipv4 host addresses of remote established connections on local tcp port port. Parses output of shell 'sockstat' (NetBSD) to get connections $ sudo sockstat -4 -n USER COMMAND PID FD PROTO LOCAL ADDRESS FOREIGN ADDRESS root python2.7 1456 29 tcp *.4505 *.* root python2.7 1445 17 tcp *.4506 *.* root python2.7 1294 14 tcp 127.0.0.1.11813 127.0.0.1.4505 root python2.7 1294 41 tcp 127.0.0.1.61115 127.0.0.1.4506 $ sudo sockstat -4 -c -n -p 4506 USER COMMAND PID FD PROTO LOCAL ADDRESS FOREIGN ADDRESS root python2.7 1294 41 tcp 127.0.0.1.61115 127.0.0.1.4506 ''' port = int(port) remotes = set() try: cmd = salt.utils.args.shlex_split('sockstat -4 -c -n -p {0}'.format(port)) data = subprocess.check_output(cmd) # pylint: disable=minimum-python-version except subprocess.CalledProcessError as ex: log.error('Failed "sockstat" with returncode = %s', ex.returncode) raise lines = salt.utils.stringutils.to_str(data).split('\n') for line in lines: chunks = line.split() if not chunks: continue # ['root', 'python2.7', '1456', '37', 'tcp', # '127.0.0.1.4505-', '127.0.0.1.55703'] # print chunks if 'COMMAND' in chunks[1]: continue # ignore header if len(chunks) < 2: continue local = chunks[5].split('.') lport = local.pop() lhost = '.'.join(local) remote = chunks[6].split('.') rport = remote.pop() rhost = '.'.join(remote) if which_end == 'local' and int(lport) != port: # ignore if local port not port continue if which_end == 'remote' and int(rport) != port: # ignore if remote port not port continue remotes.add(rhost) return remotes def _openbsd_remotes_on(port, which_end): ''' OpenBSD specific helper function. Returns set of ipv4 host addresses of remote established connections on local or remote tcp port. Parses output of shell 'netstat' to get connections $ netstat -nf inet Active Internet connections Proto Recv-Q Send-Q Local Address Foreign Address (state) tcp 0 0 10.0.0.101.4505 10.0.0.1.45329 ESTABLISHED tcp 0 0 10.0.0.101.4505 10.0.0.100.50798 ESTABLISHED ''' remotes = set() try: data = subprocess.check_output(['netstat', '-nf', 'inet']) # pylint: disable=minimum-python-version except subprocess.CalledProcessError: log.error('Failed netstat') raise lines = data.split('\n') for line in lines: if 'ESTABLISHED' not in line: continue chunks = line.split() local_host, local_port = chunks[3].rsplit('.', 1) remote_host, remote_port = chunks[4].rsplit('.', 1) if which_end == 'remote_port' and int(remote_port) != port: continue if which_end == 'local_port' and int(local_port) != port: continue remotes.add(remote_host) return remotes def _windows_remotes_on(port, which_end): r''' Windows specific helper function. Returns set of ipv4 host addresses of remote established connections on local or remote tcp port. Parses output of shell 'netstat' to get connections C:\>netstat -n Active Connections Proto Local Address Foreign Address State TCP 10.2.33.17:3007 130.164.12.233:10123 ESTABLISHED TCP 10.2.33.17:3389 130.164.30.5:10378 ESTABLISHED ''' remotes = set() try: data = subprocess.check_output(['netstat', '-n']) # pylint: disable=minimum-python-version except subprocess.CalledProcessError: log.error('Failed netstat') raise lines = salt.utils.stringutils.to_str(data).split('\n') for line in lines: if 'ESTABLISHED' not in line: continue chunks = line.split() local_host, local_port = chunks[1].rsplit(':', 1) remote_host, remote_port = chunks[2].rsplit(':', 1) if which_end == 'remote_port' and int(remote_port) != port: continue if which_end == 'local_port' and int(local_port) != port: continue remotes.add(remote_host) return remotes def _linux_remotes_on(port, which_end): ''' Linux specific helper function. Returns set of ip host addresses of remote established connections on local tcp port port. Parses output of shell 'lsof' to get connections $ sudo lsof -iTCP:4505 -n COMMAND PID USER FD TYPE DEVICE SIZE/OFF NODE NAME Python 9971 root 35u IPv4 0x18a8464a29ca329d 0t0 TCP *:4505 (LISTEN) Python 9971 root 37u IPv4 0x18a8464a29b2b29d 0t0 TCP 127.0.0.1:4505->127.0.0.1:55703 (ESTABLISHED) Python 10152 root 22u IPv4 0x18a8464a29c8cab5 0t0 TCP 127.0.0.1:55703->127.0.0.1:4505 (ESTABLISHED) Python 10153 root 22u IPv4 0x18a8464a29c8cab5 0t0 TCP [fe80::249a]:4505->[fe80::150]:59367 (ESTABLISHED) ''' remotes = set() try: data = subprocess.check_output( ['lsof', '-iTCP:{0:d}'.format(port), '-n', '-P'] # pylint: disable=minimum-python-version ) except subprocess.CalledProcessError as ex: if ex.returncode == 1: # Lsof return 1 if any error was detected, including the failure # to locate Internet addresses, and it is not an error in this case. log.warning('"lsof" returncode = 1, likely no active TCP sessions.') return remotes log.error('Failed "lsof" with returncode = %s', ex.returncode) raise lines = salt.utils.stringutils.to_str(data).split('\n') for line in lines: chunks = line.split() if not chunks: continue # ['Python', '9971', 'root', '37u', 'IPv4', '0x18a8464a29b2b29d', '0t0', # 'TCP', '127.0.0.1:4505->127.0.0.1:55703', '(ESTABLISHED)'] # print chunks if 'COMMAND' in chunks[0]: continue # ignore header if 'ESTABLISHED' not in chunks[-1]: continue # ignore if not ESTABLISHED # '127.0.0.1:4505->127.0.0.1:55703' local, remote = chunks[8].split('->') _, lport = local.rsplit(':', 1) rhost, rport = remote.rsplit(':', 1) if which_end == 'remote_port' and int(rport) != port: continue if which_end == 'local_port' and int(lport) != port: continue remotes.add(rhost.strip("[]")) return remotes def _aix_remotes_on(port, which_end): ''' AIX specific helper function. Returns set of ipv4 host addresses of remote established connections on local or remote tcp port. Parses output of shell 'netstat' to get connections root@la68pp002_pub:/opt/salt/lib/python2.7/site-packages/salt/modules# netstat -f inet -n Active Internet connections Proto Recv-Q Send-Q Local Address Foreign Address (state) tcp4 0 0 172.29.149.95.50093 209.41.78.13.4505 ESTABLISHED tcp4 0 0 127.0.0.1.9514 *.* LISTEN tcp4 0 0 127.0.0.1.9515 *.* LISTEN tcp4 0 0 127.0.0.1.199 127.0.0.1.32779 ESTABLISHED tcp4 0 0 127.0.0.1.32779 127.0.0.1.199 ESTABLISHED tcp4 0 40 172.29.149.95.22 172.29.96.83.41022 ESTABLISHED tcp4 0 0 172.29.149.95.22 172.29.96.83.41032 ESTABLISHED tcp4 0 0 127.0.0.1.32771 127.0.0.1.32775 ESTABLISHED tcp 0 0 127.0.0.1.32775 127.0.0.1.32771 ESTABLISHED tcp4 0 0 127.0.0.1.32771 127.0.0.1.32776 ESTABLISHED tcp 0 0 127.0.0.1.32776 127.0.0.1.32771 ESTABLISHED tcp4 0 0 127.0.0.1.32771 127.0.0.1.32777 ESTABLISHED tcp 0 0 127.0.0.1.32777 127.0.0.1.32771 ESTABLISHED tcp4 0 0 127.0.0.1.32771 127.0.0.1.32778 ESTABLISHED tcp 0 0 127.0.0.1.32778 127.0.0.1.32771 ESTABLISHED ''' remotes = set() try: data = subprocess.check_output(['netstat', '-f', 'inet', '-n']) # pylint: disable=minimum-python-version except subprocess.CalledProcessError: log.error('Failed netstat') raise lines = salt.utils.stringutils.to_str(data).split('\n') for line in lines: if 'ESTABLISHED' not in line: continue chunks = line.split() local_host, local_port = chunks[3].rsplit('.', 1) remote_host, remote_port = chunks[4].rsplit('.', 1) if which_end == 'remote_port' and int(remote_port) != port: continue if which_end == 'local_port' and int(local_port) != port: continue remotes.add(remote_host) return remotes @jinja_filter('gen_mac') def gen_mac(prefix='AC:DE:48'): ''' Generates a MAC address with the defined OUI prefix. Common prefixes: - ``00:16:3E`` -- Xen - ``00:18:51`` -- OpenVZ - ``00:50:56`` -- VMware (manually generated) - ``52:54:00`` -- QEMU/KVM - ``AC:DE:48`` -- PRIVATE References: - http://standards.ieee.org/develop/regauth/oui/oui.txt - https://www.wireshark.org/tools/oui-lookup.html - https://en.wikipedia.org/wiki/MAC_address ''' return '{0}:{1:02X}:{2:02X}:{3:02X}'.format(prefix, random.randint(0, 0xff), random.randint(0, 0xff), random.randint(0, 0xff)) @jinja_filter('mac_str_to_bytes') def mac_str_to_bytes(mac_str): ''' Convert a MAC address string into bytes. Works with or without separators: b1 = mac_str_to_bytes('08:00:27:13:69:77') b2 = mac_str_to_bytes('080027136977') assert b1 == b2 assert isinstance(b1, bytes) ''' if len(mac_str) == 12: pass elif len(mac_str) == 17: sep = mac_str[2] mac_str = mac_str.replace(sep, '') else: raise ValueError('Invalid MAC address') chars = (int(mac_str[s:s+2], 16) for s in range(0, 12, 2)) return bytes(chars) if six.PY3 else b''.join(chr(x) for x in chars) def refresh_dns(): ''' issue #21397: force glibc to re-read resolv.conf ''' try: res_init() except NameError: # Exception raised loading the library, thus res_init is not defined pass @jinja_filter('connection_check') def connection_check(addr, port=80, safe=False, ipv6=None): ''' Provides a convenient alias for the dns_check filter. ''' return dns_check(addr, port, safe, ipv6) @jinja_filter('dns_check') def dns_check(addr, port=80, safe=False, ipv6=None, attempt_connect=True): ''' Return the ip resolved by dns, but do not exit on failure, only raise an exception. Obeys system preference for IPv4/6 address resolution - this can be overridden by the ipv6 flag. Tries to connect to the address before considering it useful. If no address can be reached, the first one resolved is used as a fallback. ''' error = False lookup = addr seen_ipv6 = False family = socket.AF_INET6 if ipv6 else socket.AF_INET if ipv6 is False else socket.AF_UNSPEC hostnames = [] try: refresh_dns() hostnames = socket.getaddrinfo(addr, port, family, socket.SOCK_STREAM) except TypeError: err = ('Attempt to resolve address \'{0}\' failed. Invalid or unresolveable address').format(lookup) raise SaltSystemExit(code=42, msg=err) except socket.error: error = True # If ipv6 is set to True, attempt another lookup using the IPv4 family, # just in case we're attempting to lookup an IPv4 IP # as an IPv6 hostname. if error and ipv6: try: refresh_dns() hostnames = socket.getaddrinfo(addr, port, socket.AF_INET, socket.SOCK_STREAM) except TypeError: err = ('Attempt to resolve address \'{0}\' failed. Invalid or unresolveable address').format(lookup) raise SaltSystemExit(code=42, msg=err) except socket.error: error = True try: if not hostnames: error = True else: resolved = False candidates = [] for h in hostnames: # Input is IP address, passed through unchanged, just return it if h[4][0] == addr: resolved = salt.utils.zeromq.ip_bracket(addr) break candidate_addr = salt.utils.zeromq.ip_bracket(h[4][0]) # sometimes /etc/hosts contains ::1 localhost if not ipv6 and candidate_addr == '[::1]': continue candidates.append(candidate_addr) if attempt_connect: try: s = socket.socket(h[0], socket.SOCK_STREAM) s.settimeout(2) s.connect((candidate_addr.strip('[]'), h[4][1])) s.close() resolved = candidate_addr break except socket.error: pass if not resolved: if candidates: resolved = candidates[0] else: error = True except TypeError: err = ('Attempt to resolve address \'{0}\' failed. Invalid or unresolveable address').format(lookup) raise SaltSystemExit(code=42, msg=err) except socket.error: error = True if error: err = ('DNS lookup or connection check of \'{0}\' failed.').format(addr) if safe: if salt.log.is_console_configured(): # If logging is not configured it also means that either # the master or minion instance calling this hasn't even # started running log.error(err) raise SaltClientError() raise SaltSystemExit(code=42, msg=err) return resolved def parse_host_port(host_port): """ Takes a string argument specifying host or host:port. Returns a (hostname, port) or (ip_address, port) tuple. If no port is given, the second (port) element of the returned tuple will be None. host:port argument, for example, is accepted in the forms of: - hostname - hostname:1234 - hostname.domain.tld - hostname.domain.tld:5678 - [1234::5]:5678 - 1234::5 - 10.11.12.13:4567 - 10.11.12.13 """ host, port = None, None # default _s_ = host_port[:] if _s_[0] == "[": if "]" in host_port: host, _s_ = _s_.lstrip("[").rsplit("]", 1) host = ipaddress.IPv6Address(host).compressed if _s_[0] == ":": port = int(_s_.lstrip(":")) else: if len(_s_) > 1: raise ValueError('found ambiguous "{}" port in "{}"'.format(_s_, host_port)) else: if _s_.count(":") == 1: host, _hostport_separator_, port = _s_.partition(":") try: port = int(port) except ValueError as _e_: log.error('host_port "%s" port value "%s" is not an integer.', host_port, port) raise _e_ else: host = _s_ try: if not isinstance(host, ipaddress._BaseAddress): host_ip = ipaddress.ip_address(host).compressed host = host_ip except ValueError: log.debug('"%s" Not an IP address? Assuming it is a hostname.', host) if host != sanitize_host(host): log.error('bad hostname: "%s"', host) raise ValueError('bad hostname: "{}"'.format(host)) return host, port def is_fqdn(hostname): """ Verify if hostname conforms to be a FQDN. :param hostname: text string with the name of the host :return: bool, True if hostname is correct FQDN, False otherwise """ compliant = re.compile(r"(?!-)[A-Z\d\-\_]{1,63}(?<!-)$", re.IGNORECASE) return "." in hostname and len(hostname) < 0xff and all(compliant.match(x) for x in hostname.rstrip(".").split("."))
saltstack/salt
salt/utils/network.py
_generate_minion_id
python
def _generate_minion_id(): ''' Get list of possible host names and convention names. :return: ''' # There are three types of hostnames: # 1. Network names. How host is accessed from the network. # 2. Host aliases. They might be not available in all the network or only locally (/etc/hosts) # 3. Convention names, an internal nodename. class DistinctList(list): ''' List, which allows one to append only distinct objects. Needs to work on Python 2.6, because of collections.OrderedDict only since 2.7 version. Override 'filter()' for custom filtering. ''' localhost_matchers = [r'localhost.*', r'ip6-.*', r'127[.]\d', r'0\.0\.0\.0', r'::1.*', r'ipv6-.*', r'fe00::.*', r'fe02::.*', r'1.0.0.*.ip6.arpa'] def append(self, p_object): if p_object and p_object not in self and not self.filter(p_object): super(DistinctList, self).append(p_object) return self def extend(self, iterable): for obj in iterable: self.append(obj) return self def filter(self, element): 'Returns True if element needs to be filtered' for rgx in self.localhost_matchers: if re.match(rgx, element): return True def first(self): return self and self[0] or None hostname = socket.gethostname() hosts = DistinctList().append( salt.utils.stringutils.to_unicode(socket.getfqdn(salt.utils.stringutils.to_bytes(hostname))) ).append(platform.node()).append(hostname) if not hosts: try: for a_nfo in socket.getaddrinfo(hosts.first() or 'localhost', None, socket.AF_INET, socket.SOCK_RAW, socket.IPPROTO_IP, socket.AI_CANONNAME): if len(a_nfo) > 3: hosts.append(a_nfo[3]) except socket.gaierror: log.warning('Cannot resolve address %s info via socket: %s', hosts.first() or 'localhost (N/A)', socket.gaierror) # Universal method for everywhere (Linux, Slowlaris, Windows etc) for f_name in ('/etc/hostname', '/etc/nodename', '/etc/hosts', r'{win}\system32\drivers\etc\hosts'.format(win=os.getenv('WINDIR'))): try: with salt.utils.files.fopen(f_name) as f_hdl: for line in f_hdl: line = salt.utils.stringutils.to_unicode(line) hst = line.strip().split('#')[0].strip().split() if hst: if hst[0][:4] in ('127.', '::1') or len(hst) == 1: hosts.extend(hst) except IOError: pass # include public and private ipaddresses return hosts.extend([addr for addr in ip_addrs() if not ipaddress.ip_address(addr).is_loopback])
Get list of possible host names and convention names. :return:
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/network.py#L101-L170
[ "def fopen(*args, **kwargs):\n '''\n Wrapper around open() built-in to set CLOEXEC on the fd.\n\n This flag specifies that the file descriptor should be closed when an exec\n function is invoked;\n\n When a file descriptor is allocated (as with open or dup), this bit is\n initially cleared on the new file descriptor, meaning that descriptor will\n survive into the new program after exec.\n\n NB! We still have small race condition between open and fcntl.\n '''\n if six.PY3:\n try:\n # Don't permit stdin/stdout/stderr to be opened. The boolean False\n # and True are treated by Python 3's open() as file descriptors 0\n # and 1, respectively.\n if args[0] in (0, 1, 2):\n raise TypeError(\n '{0} is not a permitted file descriptor'.format(args[0])\n )\n except IndexError:\n pass\n binary = None\n # ensure 'binary' mode is always used on Windows in Python 2\n if ((six.PY2 and salt.utils.platform.is_windows() and 'binary' not in kwargs) or\n kwargs.pop('binary', False)):\n if len(args) > 1:\n args = list(args)\n if 'b' not in args[1]:\n args[1] = args[1].replace('t', 'b')\n if 'b' not in args[1]:\n args[1] += 'b'\n elif kwargs.get('mode'):\n if 'b' not in kwargs['mode']:\n kwargs['mode'] = kwargs['mode'].replace('t', 'b')\n if 'b' not in kwargs['mode']:\n kwargs['mode'] += 'b'\n else:\n # the default is to read\n kwargs['mode'] = 'rb'\n elif six.PY3 and 'encoding' not in kwargs:\n # In Python 3, if text mode is used and the encoding\n # is not specified, set the encoding to 'utf-8'.\n binary = False\n if len(args) > 1:\n args = list(args)\n if 'b' in args[1]:\n binary = True\n if kwargs.get('mode', None):\n if 'b' in kwargs['mode']:\n binary = True\n if not binary:\n kwargs['encoding'] = __salt_system_encoding__\n\n if six.PY3 and not binary and not kwargs.get('newline', None):\n kwargs['newline'] = ''\n\n f_handle = open(*args, **kwargs) # pylint: disable=resource-leakage\n\n if is_fcntl_available():\n # modify the file descriptor on systems with fcntl\n # unix and unix-like systems only\n try:\n FD_CLOEXEC = fcntl.FD_CLOEXEC # pylint: disable=C0103\n except AttributeError:\n FD_CLOEXEC = 1 # pylint: disable=C0103\n old_flags = fcntl.fcntl(f_handle.fileno(), fcntl.F_GETFD)\n fcntl.fcntl(f_handle.fileno(), fcntl.F_SETFD, old_flags | FD_CLOEXEC)\n\n return f_handle\n", "def to_unicode(s, encoding=None, errors='strict', normalize=False):\n '''\n Given str or unicode, return unicode (str for python 3)\n '''\n def _normalize(s):\n return unicodedata.normalize('NFC', s) if normalize else s\n\n if encoding is None:\n # Try utf-8 first, and fall back to detected encoding\n encoding = ('utf-8', __salt_system_encoding__)\n if not isinstance(encoding, (tuple, list)):\n encoding = (encoding,)\n\n if not encoding:\n raise ValueError('encoding cannot be empty')\n\n exc = None\n if six.PY3:\n if isinstance(s, str):\n return _normalize(s)\n elif isinstance(s, (bytes, bytearray)):\n return _normalize(to_str(s, encoding, errors))\n raise TypeError('expected str, bytes, or bytearray')\n else:\n # This needs to be str and not six.string_types, since if the string is\n # already a unicode type, it does not need to be decoded (and doing so\n # will raise an exception).\n if isinstance(s, unicode): # pylint: disable=incompatible-py3-code,undefined-variable\n return _normalize(s)\n elif isinstance(s, (str, bytearray)):\n for enc in encoding:\n try:\n return _normalize(s.decode(enc, errors))\n except UnicodeDecodeError as err:\n exc = err\n continue\n # The only way we get this far is if a UnicodeDecodeError was\n # raised, otherwise we would have already returned (or raised some\n # other exception).\n raise exc # pylint: disable=raising-bad-type\n raise TypeError('expected str or bytearray')\n", "def ip_addrs(interface=None, include_loopback=False, interface_data=None):\n '''\n Returns a list of IPv4 addresses assigned to the host. 127.0.0.1 is\n ignored, unless 'include_loopback=True' is indicated. If 'interface' is\n provided, then only IP addresses from that interface will be returned.\n '''\n return _ip_addrs(interface, include_loopback, interface_data, 'inet')\n" ]
# -*- coding: utf-8 -*- ''' Define some generic socket functions for network modules ''' # Import python libs from __future__ import absolute_import, unicode_literals, print_function import itertools import os import re import types import socket import logging import platform import random import subprocess from string import ascii_letters, digits # Import 3rd-party libs from salt.ext import six from salt.ext.six.moves import range # pylint: disable=import-error,redefined-builtin # Attempt to import wmi try: import wmi import salt.utils.winapi except ImportError: pass # Import salt libs import salt.utils.args import salt.utils.files import salt.utils.path import salt.utils.platform import salt.utils.stringutils import salt.utils.zeromq from salt._compat import ipaddress from salt.exceptions import SaltClientError, SaltSystemExit from salt.utils.decorators.jinja import jinja_filter from salt.utils.versions import LooseVersion # inet_pton does not exist in Windows, this is a workaround if salt.utils.platform.is_windows(): from salt.ext import win_inet_pton # pylint: disable=unused-import log = logging.getLogger(__name__) try: import ctypes import ctypes.util libc = ctypes.cdll.LoadLibrary(ctypes.util.find_library("c")) res_init = libc.__res_init except (ImportError, OSError, AttributeError, TypeError): pass # pylint: disable=C0103 def sanitize_host(host): ''' Sanitize host string. https://tools.ietf.org/html/rfc1123#section-2.1 ''' RFC952_characters = ascii_letters + digits + ".-" return "".join([c for c in host[0:255] if c in RFC952_characters]) def isportopen(host, port): ''' Return status of a port ''' if not 1 <= int(port) <= 65535: return False sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) out = sock.connect_ex((sanitize_host(host), int(port))) return out def host_to_ips(host): ''' Returns a list of IP addresses of a given hostname or None if not found. ''' ips = [] try: for family, socktype, proto, canonname, sockaddr in socket.getaddrinfo( host, 0, socket.AF_UNSPEC, socket.SOCK_STREAM): if family == socket.AF_INET: ip, port = sockaddr elif family == socket.AF_INET6: ip, port, flow_info, scope_id = sockaddr ips.append(ip) if not ips: ips = None except Exception: ips = None return ips def generate_minion_id(): ''' Return only first element of the hostname from all possible list. :return: ''' try: ret = salt.utils.stringutils.to_unicode(_generate_minion_id().first()) except TypeError: ret = None return ret or 'localhost' def get_socket(addr, type=socket.SOCK_STREAM, proto=0): ''' Return a socket object for the addr IP-version agnostic ''' version = ipaddress.ip_address(addr).version if version == 4: family = socket.AF_INET elif version == 6: family = socket.AF_INET6 return socket.socket(family, type, proto) def get_fqhostname(): ''' Returns the fully qualified hostname ''' # try getaddrinfo() fqdn = None try: addrinfo = socket.getaddrinfo( socket.gethostname(), 0, socket.AF_UNSPEC, socket.SOCK_STREAM, socket.SOL_TCP, socket.AI_CANONNAME ) for info in addrinfo: # info struct [family, socktype, proto, canonname, sockaddr] # On Windows `canonname` can be an empty string # This can cause the function to return `None` if len(info) > 3 and info[3]: fqdn = info[3] break except socket.gaierror: pass # NOTE: this used to log.error() but it was later disabled except socket.error as err: log.debug('socket.getaddrinfo() failure while finding fqdn: %s', err) if fqdn is None: fqdn = socket.getfqdn() return fqdn def ip_to_host(ip): ''' Returns the hostname of a given IP ''' try: hostname, aliaslist, ipaddrlist = socket.gethostbyaddr(ip) except Exception as exc: log.debug('salt.utils.network.ip_to_host(%r) failed: %s', ip, exc) hostname = None return hostname # pylint: enable=C0103 def is_reachable_host(entity_name): ''' Returns a bool telling if the entity name is a reachable host (IPv4/IPv6/FQDN/etc). :param hostname: :return: ''' try: assert type(socket.getaddrinfo(entity_name, 0, 0, 0, 0)) == list ret = True except socket.gaierror: ret = False return ret def is_ip(ip): ''' Returns a bool telling if the passed IP is a valid IPv4 or IPv6 address. ''' return is_ipv4(ip) or is_ipv6(ip) def is_ipv4(ip): ''' Returns a bool telling if the value passed to it was a valid IPv4 address ''' try: return ipaddress.ip_address(ip).version == 4 except ValueError: return False def is_ipv6(ip): ''' Returns a bool telling if the value passed to it was a valid IPv6 address ''' try: return ipaddress.ip_address(ip).version == 6 except ValueError: return False def is_subnet(cidr): ''' Returns a bool telling if the passed string is an IPv4 or IPv6 subnet ''' return is_ipv4_subnet(cidr) or is_ipv6_subnet(cidr) def is_ipv4_subnet(cidr): ''' Returns a bool telling if the passed string is an IPv4 subnet ''' try: return '/' in cidr and bool(ipaddress.IPv4Network(cidr)) except Exception: return False def is_ipv6_subnet(cidr): ''' Returns a bool telling if the passed string is an IPv6 subnet ''' try: return '/' in cidr and bool(ipaddress.IPv6Network(cidr)) except Exception: return False @jinja_filter('is_ip') def is_ip_filter(ip, options=None): ''' Returns a bool telling if the passed IP is a valid IPv4 or IPv6 address. ''' return is_ipv4_filter(ip, options=options) or is_ipv6_filter(ip, options=options) def _ip_options_global(ip_obj, version): return not ip_obj.is_private def _ip_options_multicast(ip_obj, version): return ip_obj.is_multicast def _ip_options_loopback(ip_obj, version): return ip_obj.is_loopback def _ip_options_link_local(ip_obj, version): return ip_obj.is_link_local def _ip_options_private(ip_obj, version): return ip_obj.is_private def _ip_options_reserved(ip_obj, version): return ip_obj.is_reserved def _ip_options_site_local(ip_obj, version): if version == 6: return ip_obj.is_site_local return False def _ip_options_unspecified(ip_obj, version): return ip_obj.is_unspecified def _ip_options(ip_obj, version, options=None): # will process and IP options options_fun_map = { 'global': _ip_options_global, 'link-local': _ip_options_link_local, 'linklocal': _ip_options_link_local, 'll': _ip_options_link_local, 'link_local': _ip_options_link_local, 'loopback': _ip_options_loopback, 'lo': _ip_options_loopback, 'multicast': _ip_options_multicast, 'private': _ip_options_private, 'public': _ip_options_global, 'reserved': _ip_options_reserved, 'site-local': _ip_options_site_local, 'sl': _ip_options_site_local, 'site_local': _ip_options_site_local, 'unspecified': _ip_options_unspecified } if not options: return six.text_type(ip_obj) # IP version already checked options_list = [option.strip() for option in options.split(',')] for option, fun in options_fun_map.items(): if option in options_list: fun_res = fun(ip_obj, version) if not fun_res: return None # stop at first failed test # else continue return six.text_type(ip_obj) def _is_ipv(ip, version, options=None): if not version: version = 4 if version not in (4, 6): return None try: ip_obj = ipaddress.ip_address(ip) except ValueError: # maybe it is an IP network try: ip_obj = ipaddress.ip_interface(ip) except ValueError: # nope, still not :( return None if not ip_obj.version == version: return None # has the right version, let's move on return _ip_options(ip_obj, version, options=options) @jinja_filter('is_ipv4') def is_ipv4_filter(ip, options=None): ''' Returns a bool telling if the value passed to it was a valid IPv4 address. ip The IP address. net: False Consider IP addresses followed by netmask. options CSV of options regarding the nature of the IP address. E.g.: loopback, multicast, private etc. ''' _is_ipv4 = _is_ipv(ip, 4, options=options) return isinstance(_is_ipv4, six.string_types) @jinja_filter('is_ipv6') def is_ipv6_filter(ip, options=None): ''' Returns a bool telling if the value passed to it was a valid IPv6 address. ip The IP address. net: False Consider IP addresses followed by netmask. options CSV of options regarding the nature of the IP address. E.g.: loopback, multicast, private etc. ''' _is_ipv6 = _is_ipv(ip, 6, options=options) return isinstance(_is_ipv6, six.string_types) def _ipv_filter(value, version, options=None): if version not in (4, 6): return if isinstance(value, (six.string_types, six.text_type, six.binary_type)): return _is_ipv(value, version, options=options) # calls is_ipv4 or is_ipv6 for `value` elif isinstance(value, (list, tuple, types.GeneratorType)): # calls is_ipv4 or is_ipv6 for each element in the list # os it filters and returns only those elements having the desired IP version return [ _is_ipv(addr, version, options=options) for addr in value if _is_ipv(addr, version, options=options) is not None ] return None @jinja_filter('ipv4') def ipv4(value, options=None): ''' Filters a list and returns IPv4 values only. ''' return _ipv_filter(value, 4, options=options) @jinja_filter('ipv6') def ipv6(value, options=None): ''' Filters a list and returns IPv6 values only. ''' return _ipv_filter(value, 6, options=options) @jinja_filter('ipaddr') def ipaddr(value, options=None): ''' Filters and returns only valid IP objects. ''' ipv4_obj = ipv4(value, options=options) ipv6_obj = ipv6(value, options=options) if ipv4_obj is None or ipv6_obj is None: # an IP address can be either IPv4 either IPv6 # therefofe if the value passed as arg is not a list, at least one of the calls above will return None # if one of them is none, means that we should return only one of them return ipv4_obj or ipv6_obj # one of them else: return ipv4_obj + ipv6_obj # extend lists def _filter_ipaddr(value, options, version=None): ipaddr_filter_out = None if version: if version == 4: ipaddr_filter_out = ipv4(value, options) elif version == 6: ipaddr_filter_out = ipv6(value, options) else: ipaddr_filter_out = ipaddr(value, options) if not ipaddr_filter_out: return if not isinstance(ipaddr_filter_out, (list, tuple, types.GeneratorType)): ipaddr_filter_out = [ipaddr_filter_out] return ipaddr_filter_out @jinja_filter('ip_host') def ip_host(value, options=None, version=None): ''' Returns the interfaces IP address, e.g.: 192.168.0.1/28. ''' ipaddr_filter_out = _filter_ipaddr(value, options=options, version=version) if not ipaddr_filter_out: return if not isinstance(value, (list, tuple, types.GeneratorType)): return six.text_type(ipaddress.ip_interface(ipaddr_filter_out[0])) return [six.text_type(ipaddress.ip_interface(ip_a)) for ip_a in ipaddr_filter_out] def _network_hosts(ip_addr_entry): return [ six.text_type(host) for host in ipaddress.ip_network(ip_addr_entry, strict=False).hosts() ] @jinja_filter('network_hosts') def network_hosts(value, options=None, version=None): ''' Return the list of hosts within a network. .. note:: When running this command with a large IPv6 network, the command will take a long time to gather all of the hosts. ''' ipaddr_filter_out = _filter_ipaddr(value, options=options, version=version) if not ipaddr_filter_out: return if not isinstance(value, (list, tuple, types.GeneratorType)): return _network_hosts(ipaddr_filter_out[0]) return [ _network_hosts(ip_a) for ip_a in ipaddr_filter_out ] def _network_size(ip_addr_entry): return ipaddress.ip_network(ip_addr_entry, strict=False).num_addresses @jinja_filter('network_size') def network_size(value, options=None, version=None): ''' Get the size of a network. ''' ipaddr_filter_out = _filter_ipaddr(value, options=options, version=version) if not ipaddr_filter_out: return if not isinstance(value, (list, tuple, types.GeneratorType)): return _network_size(ipaddr_filter_out[0]) return [ _network_size(ip_a) for ip_a in ipaddr_filter_out ] def natural_ipv4_netmask(ip, fmt='prefixlen'): ''' Returns the "natural" mask of an IPv4 address ''' bits = _ipv4_to_bits(ip) if bits.startswith('11'): mask = '24' elif bits.startswith('1'): mask = '16' else: mask = '8' if fmt == 'netmask': return cidr_to_ipv4_netmask(mask) else: return '/' + mask def rpad_ipv4_network(ip): ''' Returns an IP network address padded with zeros. Ex: '192.168.3' -> '192.168.3.0' '10.209' -> '10.209.0.0' ''' return '.'.join(itertools.islice(itertools.chain(ip.split('.'), '0000'), 0, 4)) def cidr_to_ipv4_netmask(cidr_bits): ''' Returns an IPv4 netmask ''' try: cidr_bits = int(cidr_bits) if not 1 <= cidr_bits <= 32: return '' except ValueError: return '' netmask = '' for idx in range(4): if idx: netmask += '.' if cidr_bits >= 8: netmask += '255' cidr_bits -= 8 else: netmask += '{0:d}'.format(256 - (2 ** (8 - cidr_bits))) cidr_bits = 0 return netmask def _number_of_set_bits_to_ipv4_netmask(set_bits): # pylint: disable=C0103 ''' Returns an IPv4 netmask from the integer representation of that mask. Ex. 0xffffff00 -> '255.255.255.0' ''' return cidr_to_ipv4_netmask(_number_of_set_bits(set_bits)) # pylint: disable=C0103 def _number_of_set_bits(x): ''' Returns the number of bits that are set in a 32bit int ''' # Taken from http://stackoverflow.com/a/4912729. Many thanks! x -= (x >> 1) & 0x55555555 x = ((x >> 2) & 0x33333333) + (x & 0x33333333) x = ((x >> 4) + x) & 0x0f0f0f0f x += x >> 8 x += x >> 16 return x & 0x0000003f # pylint: enable=C0103 def _interfaces_ip(out): ''' Uses ip to return a dictionary of interfaces with various information about each (up/down state, ip address, netmask, and hwaddr) ''' ret = dict() def parse_network(value, cols): ''' Return a tuple of ip, netmask, broadcast based on the current set of cols ''' brd = None scope = None if '/' in value: # we have a CIDR in this address ip, cidr = value.split('/') # pylint: disable=C0103 else: ip = value # pylint: disable=C0103 cidr = 32 if type_ == 'inet': mask = cidr_to_ipv4_netmask(int(cidr)) if 'brd' in cols: brd = cols[cols.index('brd') + 1] elif type_ == 'inet6': mask = cidr if 'scope' in cols: scope = cols[cols.index('scope') + 1] return (ip, mask, brd, scope) groups = re.compile('\r?\n\\d').split(out) for group in groups: iface = None data = dict() for line in group.splitlines(): if ' ' not in line: continue match = re.match(r'^\d*:\s+([\w.\-]+)(?:@)?([\w.\-]+)?:\s+<(.+)>', line) if match: iface, parent, attrs = match.groups() if 'UP' in attrs.split(','): data['up'] = True else: data['up'] = False if parent: data['parent'] = parent continue cols = line.split() if len(cols) >= 2: type_, value = tuple(cols[0:2]) iflabel = cols[-1:][0] if type_ in ('inet', 'inet6'): if 'secondary' not in cols: ipaddr, netmask, broadcast, scope = parse_network(value, cols) if type_ == 'inet': if 'inet' not in data: data['inet'] = list() addr_obj = dict() addr_obj['address'] = ipaddr addr_obj['netmask'] = netmask addr_obj['broadcast'] = broadcast addr_obj['label'] = iflabel data['inet'].append(addr_obj) elif type_ == 'inet6': if 'inet6' not in data: data['inet6'] = list() addr_obj = dict() addr_obj['address'] = ipaddr addr_obj['prefixlen'] = netmask addr_obj['scope'] = scope data['inet6'].append(addr_obj) else: if 'secondary' not in data: data['secondary'] = list() ip_, mask, brd, scp = parse_network(value, cols) data['secondary'].append({ 'type': type_, 'address': ip_, 'netmask': mask, 'broadcast': brd, 'label': iflabel, }) del ip_, mask, brd, scp elif type_.startswith('link'): data['hwaddr'] = value if iface: ret[iface] = data del iface, data return ret def _interfaces_ifconfig(out): ''' Uses ifconfig to return a dictionary of interfaces with various information about each (up/down state, ip address, netmask, and hwaddr) ''' ret = dict() piface = re.compile(r'^([^\s:]+)') pmac = re.compile('.*?(?:HWaddr|ether|address:|lladdr) ([0-9a-fA-F:]+)') if salt.utils.platform.is_sunos(): pip = re.compile(r'.*?(?:inet\s+)([0-9]+\.[0-9]+\.[0-9]+\.[0-9]+)(.*)') pip6 = re.compile('.*?(?:inet6 )([0-9a-fA-F:]+)') pmask6 = re.compile(r'.*?(?:inet6 [0-9a-fA-F:]+/(\d+)).*') else: pip = re.compile(r'.*?(?:inet addr:|inet [^\d]*)(.*?)\s') pip6 = re.compile('.*?(?:inet6 addr: (.*?)/|inet6 )([0-9a-fA-F:]+)') pmask6 = re.compile(r'.*?(?:inet6 addr: [0-9a-fA-F:]+/(\d+)|prefixlen (\d+))(?: Scope:([a-zA-Z]+)| scopeid (0x[0-9a-fA-F]))?') pmask = re.compile(r'.*?(?:Mask:|netmask )(?:((?:0x)?[0-9a-fA-F]{8})|([\d\.]+))') pupdown = re.compile('UP') pbcast = re.compile(r'.*?(?:Bcast:|broadcast )([\d\.]+)') groups = re.compile('\r?\n(?=\\S)').split(out) for group in groups: data = dict() iface = '' updown = False for line in group.splitlines(): miface = piface.match(line) mmac = pmac.match(line) mip = pip.match(line) mip6 = pip6.match(line) mupdown = pupdown.search(line) if miface: iface = miface.group(1) if mmac: data['hwaddr'] = mmac.group(1) if salt.utils.platform.is_sunos(): expand_mac = [] for chunk in data['hwaddr'].split(':'): expand_mac.append('0{0}'.format(chunk) if len(chunk) < 2 else '{0}'.format(chunk)) data['hwaddr'] = ':'.join(expand_mac) if mip: if 'inet' not in data: data['inet'] = list() addr_obj = dict() addr_obj['address'] = mip.group(1) mmask = pmask.match(line) if mmask: if mmask.group(1): mmask = _number_of_set_bits_to_ipv4_netmask( int(mmask.group(1), 16)) else: mmask = mmask.group(2) addr_obj['netmask'] = mmask mbcast = pbcast.match(line) if mbcast: addr_obj['broadcast'] = mbcast.group(1) data['inet'].append(addr_obj) if mupdown: updown = True if mip6: if 'inet6' not in data: data['inet6'] = list() addr_obj = dict() addr_obj['address'] = mip6.group(1) or mip6.group(2) mmask6 = pmask6.match(line) if mmask6: addr_obj['prefixlen'] = mmask6.group(1) or mmask6.group(2) if not salt.utils.platform.is_sunos(): ipv6scope = mmask6.group(3) or mmask6.group(4) addr_obj['scope'] = ipv6scope.lower() if ipv6scope is not None else ipv6scope # SunOS sometimes has ::/0 as inet6 addr when using addrconf if not salt.utils.platform.is_sunos() \ or addr_obj['address'] != '::' \ and addr_obj['prefixlen'] != 0: data['inet6'].append(addr_obj) data['up'] = updown if iface in ret: # SunOS optimization, where interfaces occur twice in 'ifconfig -a' # output with the same name: for ipv4 and then for ipv6 addr family. # Every instance has it's own 'UP' status and we assume that ipv4 # status determines global interface status. # # merge items with higher priority for older values # after that merge the inet and inet6 sub items for both ret[iface] = dict(list(data.items()) + list(ret[iface].items())) if 'inet' in data: ret[iface]['inet'].extend(x for x in data['inet'] if x not in ret[iface]['inet']) if 'inet6' in data: ret[iface]['inet6'].extend(x for x in data['inet6'] if x not in ret[iface]['inet6']) else: ret[iface] = data del data return ret def linux_interfaces(): ''' Obtain interface information for *NIX/BSD variants ''' ifaces = dict() ip_path = salt.utils.path.which('ip') ifconfig_path = None if ip_path else salt.utils.path.which('ifconfig') if ip_path: cmd1 = subprocess.Popen( '{0} link show'.format(ip_path), shell=True, close_fds=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate()[0] cmd2 = subprocess.Popen( '{0} addr show'.format(ip_path), shell=True, close_fds=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate()[0] ifaces = _interfaces_ip("{0}\n{1}".format( salt.utils.stringutils.to_str(cmd1), salt.utils.stringutils.to_str(cmd2))) elif ifconfig_path: cmd = subprocess.Popen( '{0} -a'.format(ifconfig_path), shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate()[0] ifaces = _interfaces_ifconfig(salt.utils.stringutils.to_str(cmd)) return ifaces def _netbsd_interfaces_ifconfig(out): ''' Uses ifconfig to return a dictionary of interfaces with various information about each (up/down state, ip address, netmask, and hwaddr) ''' ret = dict() piface = re.compile(r'^([^\s:]+)') pmac = re.compile('.*?address: ([0-9a-f:]+)') pip = re.compile(r'.*?inet [^\d]*(.*?)/([\d]*)\s') pip6 = re.compile(r'.*?inet6 ([0-9a-f:]+)%([a-zA-Z0-9]*)/([\d]*)\s') pupdown = re.compile('UP') pbcast = re.compile(r'.*?broadcast ([\d\.]+)') groups = re.compile('\r?\n(?=\\S)').split(out) for group in groups: data = dict() iface = '' updown = False for line in group.splitlines(): miface = piface.match(line) mmac = pmac.match(line) mip = pip.match(line) mip6 = pip6.match(line) mupdown = pupdown.search(line) if miface: iface = miface.group(1) if mmac: data['hwaddr'] = mmac.group(1) if mip: if 'inet' not in data: data['inet'] = list() addr_obj = dict() addr_obj['address'] = mip.group(1) mmask = mip.group(2) if mip.group(2): addr_obj['netmask'] = cidr_to_ipv4_netmask(mip.group(2)) mbcast = pbcast.match(line) if mbcast: addr_obj['broadcast'] = mbcast.group(1) data['inet'].append(addr_obj) if mupdown: updown = True if mip6: if 'inet6' not in data: data['inet6'] = list() addr_obj = dict() addr_obj['address'] = mip6.group(1) mmask6 = mip6.group(3) addr_obj['scope'] = mip6.group(2) addr_obj['prefixlen'] = mip6.group(3) data['inet6'].append(addr_obj) data['up'] = updown ret[iface] = data del data return ret def netbsd_interfaces(): ''' Obtain interface information for NetBSD >= 8 where the ifconfig output diverged from other BSD variants (Netmask is now part of the address) ''' # NetBSD versions prior to 8.0 can still use linux_interfaces() if LooseVersion(os.uname()[2]) < LooseVersion('8.0'): return linux_interfaces() ifconfig_path = salt.utils.path.which('ifconfig') cmd = subprocess.Popen( '{0} -a'.format(ifconfig_path), shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate()[0] return _netbsd_interfaces_ifconfig(salt.utils.stringutils.to_str(cmd)) def _interfaces_ipconfig(out): ''' Returns a dictionary of interfaces with various information about each (up/down state, ip address, netmask, and hwaddr) NOTE: This is not used by any function and may be able to be removed in the future. ''' ifaces = dict() iface = None adapter_iface_regex = re.compile(r'adapter (\S.+):$') for line in out.splitlines(): if not line: continue # TODO what does Windows call Infiniband and 10/40gige adapters if line.startswith('Ethernet'): iface = ifaces[adapter_iface_regex.search(line).group(1)] iface['up'] = True addr = None continue if iface: key, val = line.split(',', 1) key = key.strip(' .') val = val.strip() if addr and key == 'Subnet Mask': addr['netmask'] = val elif key in ('IP Address', 'IPv4 Address'): if 'inet' not in iface: iface['inet'] = list() addr = {'address': val.rstrip('(Preferred)'), 'netmask': None, 'broadcast': None} # TODO find the broadcast iface['inet'].append(addr) elif 'IPv6 Address' in key: if 'inet6' not in iface: iface['inet'] = list() # XXX What is the prefixlen!? addr = {'address': val.rstrip('(Preferred)'), 'prefixlen': None} iface['inet6'].append(addr) elif key == 'Physical Address': iface['hwaddr'] = val elif key == 'Media State': # XXX seen used for tunnel adaptors # might be useful iface['up'] = (val != 'Media disconnected') def win_interfaces(): ''' Obtain interface information for Windows systems ''' with salt.utils.winapi.Com(): c = wmi.WMI() ifaces = {} for iface in c.Win32_NetworkAdapterConfiguration(IPEnabled=1): ifaces[iface.Description] = dict() if iface.MACAddress: ifaces[iface.Description]['hwaddr'] = iface.MACAddress if iface.IPEnabled: ifaces[iface.Description]['up'] = True for ip in iface.IPAddress: if '.' in ip: if 'inet' not in ifaces[iface.Description]: ifaces[iface.Description]['inet'] = [] item = {'address': ip, 'label': iface.Description} if iface.DefaultIPGateway: broadcast = next((i for i in iface.DefaultIPGateway if '.' in i), '') if broadcast: item['broadcast'] = broadcast if iface.IPSubnet: netmask = next((i for i in iface.IPSubnet if '.' in i), '') if netmask: item['netmask'] = netmask ifaces[iface.Description]['inet'].append(item) if ':' in ip: if 'inet6' not in ifaces[iface.Description]: ifaces[iface.Description]['inet6'] = [] item = {'address': ip} if iface.DefaultIPGateway: broadcast = next((i for i in iface.DefaultIPGateway if ':' in i), '') if broadcast: item['broadcast'] = broadcast if iface.IPSubnet: netmask = next((i for i in iface.IPSubnet if ':' in i), '') if netmask: item['netmask'] = netmask ifaces[iface.Description]['inet6'].append(item) else: ifaces[iface.Description]['up'] = False return ifaces def interfaces(): ''' Return a dictionary of information about all the interfaces on the minion ''' if salt.utils.platform.is_windows(): return win_interfaces() elif salt.utils.platform.is_netbsd(): return netbsd_interfaces() else: return linux_interfaces() def get_net_start(ipaddr, netmask): ''' Return the address of the network ''' net = ipaddress.ip_network('{0}/{1}'.format(ipaddr, netmask), strict=False) return six.text_type(net.network_address) def get_net_size(mask): ''' Turns an IPv4 netmask into it's corresponding prefix length (255.255.255.0 -> 24 as in 192.168.1.10/24). ''' binary_str = '' for octet in mask.split('.'): binary_str += bin(int(octet))[2:].zfill(8) return len(binary_str.rstrip('0')) def calc_net(ipaddr, netmask=None): ''' Takes IP (CIDR notation supported) and optionally netmask and returns the network in CIDR-notation. (The IP can be any IP inside the subnet) ''' if netmask is not None: ipaddr = '{0}/{1}'.format(ipaddr, netmask) return six.text_type(ipaddress.ip_network(ipaddr, strict=False)) def _ipv4_to_bits(ipaddr): ''' Accepts an IPv4 dotted quad and returns a string representing its binary counterpart ''' return ''.join([bin(int(x))[2:].rjust(8, '0') for x in ipaddr.split('.')]) def _get_iface_info(iface): ''' If `iface` is available, return interface info and no error, otherwise return no info and log and return an error ''' iface_info = interfaces() if iface in iface_info.keys(): return iface_info, False else: error_msg = ('Interface "{0}" not in available interfaces: "{1}"' ''.format(iface, '", "'.join(iface_info.keys()))) log.error(error_msg) return None, error_msg def _hw_addr_aix(iface): ''' Return the hardware address (a.k.a. MAC address) for a given interface on AIX MAC address not available in through interfaces ''' cmd = subprocess.Popen( 'entstat -d {0} | grep \'Hardware Address\''.format(iface), shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate()[0] if cmd: comps = cmd.split(' ') if len(comps) == 3: mac_addr = comps[2].strip('\'').strip() return mac_addr error_msg = ('Interface "{0}" either not available or does not contain a hardware address'.format(iface)) log.error(error_msg) return error_msg def hw_addr(iface): ''' Return the hardware address (a.k.a. MAC address) for a given interface .. versionchanged:: 2016.11.4 Added support for AIX ''' if salt.utils.platform.is_aix(): return _hw_addr_aix iface_info, error = _get_iface_info(iface) if error is False: return iface_info.get(iface, {}).get('hwaddr', '') else: return error def interface(iface): ''' Return the details of `iface` or an error if it does not exist ''' iface_info, error = _get_iface_info(iface) if error is False: return iface_info.get(iface, {}).get('inet', '') else: return error def interface_ip(iface): ''' Return `iface` IPv4 addr or an error if `iface` does not exist ''' iface_info, error = _get_iface_info(iface) if error is False: inet = iface_info.get(iface, {}).get('inet', None) return inet[0].get('address', '') if inet else '' else: return error def _subnets(proto='inet', interfaces_=None): ''' Returns a list of subnets to which the host belongs ''' if interfaces_ is None: ifaces = interfaces() elif isinstance(interfaces_, list): ifaces = {} for key, value in six.iteritems(interfaces()): if key in interfaces_: ifaces[key] = value else: ifaces = {interfaces_: interfaces().get(interfaces_, {})} ret = set() if proto == 'inet': subnet = 'netmask' dflt_cidr = 32 elif proto == 'inet6': subnet = 'prefixlen' dflt_cidr = 128 else: log.error('Invalid proto %s calling subnets()', proto) return for ip_info in six.itervalues(ifaces): addrs = ip_info.get(proto, []) addrs.extend([addr for addr in ip_info.get('secondary', []) if addr.get('type') == proto]) for intf in addrs: if subnet in intf: intf = ipaddress.ip_interface('{0}/{1}'.format(intf['address'], intf[subnet])) else: intf = ipaddress.ip_interface('{0}/{1}'.format(intf['address'], dflt_cidr)) if not intf.is_loopback: ret.add(intf.network) return [six.text_type(net) for net in sorted(ret)] def subnets(interfaces=None): ''' Returns a list of IPv4 subnets to which the host belongs ''' return _subnets('inet', interfaces_=interfaces) def subnets6(): ''' Returns a list of IPv6 subnets to which the host belongs ''' return _subnets('inet6') def in_subnet(cidr, addr=None): ''' Returns True if host or (any of) addrs is within specified subnet, otherwise False ''' try: cidr = ipaddress.ip_network(cidr) except ValueError: log.error('Invalid CIDR \'%s\'', cidr) return False if addr is None: addr = ip_addrs() addr.extend(ip_addrs6()) elif not isinstance(addr, (list, tuple)): addr = (addr,) return any(ipaddress.ip_address(item) in cidr for item in addr) def _ip_addrs(interface=None, include_loopback=False, interface_data=None, proto='inet'): ''' Return the full list of IP adresses matching the criteria proto = inet|inet6 ''' ret = set() ifaces = interface_data \ if isinstance(interface_data, dict) \ else interfaces() if interface is None: target_ifaces = ifaces else: target_ifaces = dict([(k, v) for k, v in six.iteritems(ifaces) if k == interface]) if not target_ifaces: log.error('Interface %s not found.', interface) for ip_info in six.itervalues(target_ifaces): addrs = ip_info.get(proto, []) addrs.extend([addr for addr in ip_info.get('secondary', []) if addr.get('type') == proto]) for addr in addrs: addr = ipaddress.ip_address(addr.get('address')) if not addr.is_loopback or include_loopback: ret.add(addr) return [six.text_type(addr) for addr in sorted(ret)] def ip_addrs(interface=None, include_loopback=False, interface_data=None): ''' Returns a list of IPv4 addresses assigned to the host. 127.0.0.1 is ignored, unless 'include_loopback=True' is indicated. If 'interface' is provided, then only IP addresses from that interface will be returned. ''' return _ip_addrs(interface, include_loopback, interface_data, 'inet') def ip_addrs6(interface=None, include_loopback=False, interface_data=None): ''' Returns a list of IPv6 addresses assigned to the host. ::1 is ignored, unless 'include_loopback=True' is indicated. If 'interface' is provided, then only IP addresses from that interface will be returned. ''' return _ip_addrs(interface, include_loopback, interface_data, 'inet6') def hex2ip(hex_ip, invert=False): ''' Convert a hex string to an ip, if a failure occurs the original hex is returned. If 'invert=True' assume that ip from /proc/net/<proto> ''' if len(hex_ip) == 32: # ipv6 ip = [] for i in range(0, 32, 8): ip_part = hex_ip[i:i + 8] ip_part = [ip_part[x:x + 2] for x in range(0, 8, 2)] if invert: ip.append("{0[3]}{0[2]}:{0[1]}{0[0]}".format(ip_part)) else: ip.append("{0[0]}{0[1]}:{0[2]}{0[3]}".format(ip_part)) try: address = ipaddress.IPv6Address(":".join(ip)) if address.ipv4_mapped: return str(address.ipv4_mapped) else: return address.compressed except ipaddress.AddressValueError as ex: log.error('hex2ip - ipv6 address error: %s', ex) return hex_ip try: hip = int(hex_ip, 16) except ValueError: return hex_ip if invert: return '{3}.{2}.{1}.{0}'.format(hip >> 24 & 255, hip >> 16 & 255, hip >> 8 & 255, hip & 255) return '{0}.{1}.{2}.{3}'.format(hip >> 24 & 255, hip >> 16 & 255, hip >> 8 & 255, hip & 255) def mac2eui64(mac, prefix=None): ''' Convert a MAC address to a EUI64 identifier or, with prefix provided, a full IPv6 address ''' # http://tools.ietf.org/html/rfc4291#section-2.5.1 eui64 = re.sub(r'[.:-]', '', mac).lower() eui64 = eui64[0:6] + 'fffe' + eui64[6:] eui64 = hex(int(eui64[0:2], 16) | 2)[2:].zfill(2) + eui64[2:] if prefix is None: return ':'.join(re.findall(r'.{4}', eui64)) else: try: net = ipaddress.ip_network(prefix, strict=False) euil = int('0x{0}'.format(eui64), 16) return '{0}/{1}'.format(net[euil], net.prefixlen) except Exception: return def active_tcp(): ''' Return a dict describing all active tcp connections as quickly as possible ''' ret = {} for statf in ['/proc/net/tcp', '/proc/net/tcp6']: if os.path.isfile(statf): with salt.utils.files.fopen(statf, 'rb') as fp_: for line in fp_: line = salt.utils.stringutils.to_unicode(line) if line.strip().startswith('sl'): continue iret = _parse_tcp_line(line) sl = next(iter(iret)) if iret[sl]['state'] == 1: # 1 is ESTABLISHED del iret[sl]['state'] ret[len(ret)] = iret[sl] return ret def local_port_tcp(port): ''' Return a set of remote ip addrs attached to the specified local port ''' ret = _remotes_on(port, 'local_port') return ret def remote_port_tcp(port): ''' Return a set of ip addrs the current host is connected to on given port ''' ret = _remotes_on(port, 'remote_port') return ret def _remotes_on(port, which_end): ''' Return a set of ip addrs active tcp connections ''' port = int(port) ret = _netlink_tool_remote_on(port, which_end) if ret is not None: return ret ret = set() proc_available = False for statf in ['/proc/net/tcp', '/proc/net/tcp6']: if os.path.isfile(statf): proc_available = True with salt.utils.files.fopen(statf, 'r') as fp_: for line in fp_: line = salt.utils.stringutils.to_unicode(line) if line.strip().startswith('sl'): continue iret = _parse_tcp_line(line) sl = next(iter(iret)) if iret[sl][which_end] == port and iret[sl]['state'] == 1: # 1 is ESTABLISHED ret.add(iret[sl]['remote_addr']) if not proc_available: # Fallback to use OS specific tools if salt.utils.platform.is_sunos(): return _sunos_remotes_on(port, which_end) if salt.utils.platform.is_freebsd(): return _freebsd_remotes_on(port, which_end) if salt.utils.platform.is_netbsd(): return _netbsd_remotes_on(port, which_end) if salt.utils.platform.is_openbsd(): return _openbsd_remotes_on(port, which_end) if salt.utils.platform.is_windows(): return _windows_remotes_on(port, which_end) if salt.utils.platform.is_aix(): return _aix_remotes_on(port, which_end) return _linux_remotes_on(port, which_end) return ret def _parse_tcp_line(line): ''' Parse a single line from the contents of /proc/net/tcp or /proc/net/tcp6 ''' ret = {} comps = line.strip().split() sl = comps[0].rstrip(':') ret[sl] = {} l_addr, l_port = comps[1].split(':') r_addr, r_port = comps[2].split(':') ret[sl]['local_addr'] = hex2ip(l_addr, True) ret[sl]['local_port'] = int(l_port, 16) ret[sl]['remote_addr'] = hex2ip(r_addr, True) ret[sl]['remote_port'] = int(r_port, 16) ret[sl]['state'] = int(comps[3], 16) return ret def _netlink_tool_remote_on(port, which_end): ''' Returns set of ipv4 host addresses of remote established connections on local or remote tcp port. Parses output of shell 'ss' to get connections [root@salt-master ~]# ss -ant State Recv-Q Send-Q Local Address:Port Peer Address:Port LISTEN 0 511 *:80 *:* LISTEN 0 128 *:22 *:* ESTAB 0 0 127.0.0.1:56726 127.0.0.1:4505 ''' remotes = set() valid = False try: data = subprocess.check_output(['ss', '-ant']) # pylint: disable=minimum-python-version except subprocess.CalledProcessError: log.error('Failed ss') raise except OSError: # not command "No such file or directory" return None lines = salt.utils.stringutils.to_str(data).split('\n') for line in lines: if 'Address:Port' in line: # ss tools may not be valid valid = True continue elif 'ESTAB' not in line: continue chunks = line.split() local_host, local_port = chunks[3].split(':', 1) remote_host, remote_port = chunks[4].split(':', 1) if which_end == 'remote_port' and int(remote_port) != port: continue if which_end == 'local_port' and int(local_port) != port: continue remotes.add(remote_host) if valid is False: remotes = None return remotes def _sunos_remotes_on(port, which_end): ''' SunOS specific helper function. Returns set of ipv4 host addresses of remote established connections on local or remote tcp port. Parses output of shell 'netstat' to get connections [root@salt-master ~]# netstat -f inet -n TCP: IPv4 Local Address Remote Address Swind Send-Q Rwind Recv-Q State -------------------- -------------------- ----- ------ ----- ------ ----------- 10.0.0.101.4505 10.0.0.1.45329 1064800 0 1055864 0 ESTABLISHED 10.0.0.101.4505 10.0.0.100.50798 1064800 0 1055864 0 ESTABLISHED ''' remotes = set() try: data = subprocess.check_output(['netstat', '-f', 'inet', '-n']) # pylint: disable=minimum-python-version except subprocess.CalledProcessError: log.error('Failed netstat') raise lines = salt.utils.stringutils.to_str(data).split('\n') for line in lines: if 'ESTABLISHED' not in line: continue chunks = line.split() local_host, local_port = chunks[0].rsplit('.', 1) remote_host, remote_port = chunks[1].rsplit('.', 1) if which_end == 'remote_port' and int(remote_port) != port: continue if which_end == 'local_port' and int(local_port) != port: continue remotes.add(remote_host) return remotes def _freebsd_remotes_on(port, which_end): ''' Returns set of ipv4 host addresses of remote established connections on local tcp port port. Parses output of shell 'sockstat' (FreeBSD) to get connections $ sudo sockstat -4 USER COMMAND PID FD PROTO LOCAL ADDRESS FOREIGN ADDRESS root python2.7 1456 29 tcp4 *:4505 *:* root python2.7 1445 17 tcp4 *:4506 *:* root python2.7 1294 14 tcp4 127.0.0.1:11813 127.0.0.1:4505 root python2.7 1294 41 tcp4 127.0.0.1:61115 127.0.0.1:4506 $ sudo sockstat -4 -c -p 4506 USER COMMAND PID FD PROTO LOCAL ADDRESS FOREIGN ADDRESS root python2.7 1294 41 tcp4 127.0.0.1:61115 127.0.0.1:4506 ''' port = int(port) remotes = set() try: cmd = salt.utils.args.shlex_split('sockstat -4 -c -p {0}'.format(port)) data = subprocess.check_output(cmd) # pylint: disable=minimum-python-version except subprocess.CalledProcessError as ex: log.error('Failed "sockstat" with returncode = %s', ex.returncode) raise lines = salt.utils.stringutils.to_str(data).split('\n') for line in lines: chunks = line.split() if not chunks: continue # ['root', 'python2.7', '1456', '37', 'tcp4', # '127.0.0.1:4505-', '127.0.0.1:55703'] # print chunks if 'COMMAND' in chunks[1]: continue # ignore header if len(chunks) < 2: continue # sockstat -4 -c -p 4506 does this with high PIDs: # USER COMMAND PID FD PROTO LOCAL ADDRESS FOREIGN ADDRESS # salt-master python2.781106 35 tcp4 192.168.12.34:4506 192.168.12.45:60143 local = chunks[-2] remote = chunks[-1] lhost, lport = local.split(':') rhost, rport = remote.split(':') if which_end == 'local' and int(lport) != port: # ignore if local port not port continue if which_end == 'remote' and int(rport) != port: # ignore if remote port not port continue remotes.add(rhost) return remotes def _netbsd_remotes_on(port, which_end): ''' Returns set of ipv4 host addresses of remote established connections on local tcp port port. Parses output of shell 'sockstat' (NetBSD) to get connections $ sudo sockstat -4 -n USER COMMAND PID FD PROTO LOCAL ADDRESS FOREIGN ADDRESS root python2.7 1456 29 tcp *.4505 *.* root python2.7 1445 17 tcp *.4506 *.* root python2.7 1294 14 tcp 127.0.0.1.11813 127.0.0.1.4505 root python2.7 1294 41 tcp 127.0.0.1.61115 127.0.0.1.4506 $ sudo sockstat -4 -c -n -p 4506 USER COMMAND PID FD PROTO LOCAL ADDRESS FOREIGN ADDRESS root python2.7 1294 41 tcp 127.0.0.1.61115 127.0.0.1.4506 ''' port = int(port) remotes = set() try: cmd = salt.utils.args.shlex_split('sockstat -4 -c -n -p {0}'.format(port)) data = subprocess.check_output(cmd) # pylint: disable=minimum-python-version except subprocess.CalledProcessError as ex: log.error('Failed "sockstat" with returncode = %s', ex.returncode) raise lines = salt.utils.stringutils.to_str(data).split('\n') for line in lines: chunks = line.split() if not chunks: continue # ['root', 'python2.7', '1456', '37', 'tcp', # '127.0.0.1.4505-', '127.0.0.1.55703'] # print chunks if 'COMMAND' in chunks[1]: continue # ignore header if len(chunks) < 2: continue local = chunks[5].split('.') lport = local.pop() lhost = '.'.join(local) remote = chunks[6].split('.') rport = remote.pop() rhost = '.'.join(remote) if which_end == 'local' and int(lport) != port: # ignore if local port not port continue if which_end == 'remote' and int(rport) != port: # ignore if remote port not port continue remotes.add(rhost) return remotes def _openbsd_remotes_on(port, which_end): ''' OpenBSD specific helper function. Returns set of ipv4 host addresses of remote established connections on local or remote tcp port. Parses output of shell 'netstat' to get connections $ netstat -nf inet Active Internet connections Proto Recv-Q Send-Q Local Address Foreign Address (state) tcp 0 0 10.0.0.101.4505 10.0.0.1.45329 ESTABLISHED tcp 0 0 10.0.0.101.4505 10.0.0.100.50798 ESTABLISHED ''' remotes = set() try: data = subprocess.check_output(['netstat', '-nf', 'inet']) # pylint: disable=minimum-python-version except subprocess.CalledProcessError: log.error('Failed netstat') raise lines = data.split('\n') for line in lines: if 'ESTABLISHED' not in line: continue chunks = line.split() local_host, local_port = chunks[3].rsplit('.', 1) remote_host, remote_port = chunks[4].rsplit('.', 1) if which_end == 'remote_port' and int(remote_port) != port: continue if which_end == 'local_port' and int(local_port) != port: continue remotes.add(remote_host) return remotes def _windows_remotes_on(port, which_end): r''' Windows specific helper function. Returns set of ipv4 host addresses of remote established connections on local or remote tcp port. Parses output of shell 'netstat' to get connections C:\>netstat -n Active Connections Proto Local Address Foreign Address State TCP 10.2.33.17:3007 130.164.12.233:10123 ESTABLISHED TCP 10.2.33.17:3389 130.164.30.5:10378 ESTABLISHED ''' remotes = set() try: data = subprocess.check_output(['netstat', '-n']) # pylint: disable=minimum-python-version except subprocess.CalledProcessError: log.error('Failed netstat') raise lines = salt.utils.stringutils.to_str(data).split('\n') for line in lines: if 'ESTABLISHED' not in line: continue chunks = line.split() local_host, local_port = chunks[1].rsplit(':', 1) remote_host, remote_port = chunks[2].rsplit(':', 1) if which_end == 'remote_port' and int(remote_port) != port: continue if which_end == 'local_port' and int(local_port) != port: continue remotes.add(remote_host) return remotes def _linux_remotes_on(port, which_end): ''' Linux specific helper function. Returns set of ip host addresses of remote established connections on local tcp port port. Parses output of shell 'lsof' to get connections $ sudo lsof -iTCP:4505 -n COMMAND PID USER FD TYPE DEVICE SIZE/OFF NODE NAME Python 9971 root 35u IPv4 0x18a8464a29ca329d 0t0 TCP *:4505 (LISTEN) Python 9971 root 37u IPv4 0x18a8464a29b2b29d 0t0 TCP 127.0.0.1:4505->127.0.0.1:55703 (ESTABLISHED) Python 10152 root 22u IPv4 0x18a8464a29c8cab5 0t0 TCP 127.0.0.1:55703->127.0.0.1:4505 (ESTABLISHED) Python 10153 root 22u IPv4 0x18a8464a29c8cab5 0t0 TCP [fe80::249a]:4505->[fe80::150]:59367 (ESTABLISHED) ''' remotes = set() try: data = subprocess.check_output( ['lsof', '-iTCP:{0:d}'.format(port), '-n', '-P'] # pylint: disable=minimum-python-version ) except subprocess.CalledProcessError as ex: if ex.returncode == 1: # Lsof return 1 if any error was detected, including the failure # to locate Internet addresses, and it is not an error in this case. log.warning('"lsof" returncode = 1, likely no active TCP sessions.') return remotes log.error('Failed "lsof" with returncode = %s', ex.returncode) raise lines = salt.utils.stringutils.to_str(data).split('\n') for line in lines: chunks = line.split() if not chunks: continue # ['Python', '9971', 'root', '37u', 'IPv4', '0x18a8464a29b2b29d', '0t0', # 'TCP', '127.0.0.1:4505->127.0.0.1:55703', '(ESTABLISHED)'] # print chunks if 'COMMAND' in chunks[0]: continue # ignore header if 'ESTABLISHED' not in chunks[-1]: continue # ignore if not ESTABLISHED # '127.0.0.1:4505->127.0.0.1:55703' local, remote = chunks[8].split('->') _, lport = local.rsplit(':', 1) rhost, rport = remote.rsplit(':', 1) if which_end == 'remote_port' and int(rport) != port: continue if which_end == 'local_port' and int(lport) != port: continue remotes.add(rhost.strip("[]")) return remotes def _aix_remotes_on(port, which_end): ''' AIX specific helper function. Returns set of ipv4 host addresses of remote established connections on local or remote tcp port. Parses output of shell 'netstat' to get connections root@la68pp002_pub:/opt/salt/lib/python2.7/site-packages/salt/modules# netstat -f inet -n Active Internet connections Proto Recv-Q Send-Q Local Address Foreign Address (state) tcp4 0 0 172.29.149.95.50093 209.41.78.13.4505 ESTABLISHED tcp4 0 0 127.0.0.1.9514 *.* LISTEN tcp4 0 0 127.0.0.1.9515 *.* LISTEN tcp4 0 0 127.0.0.1.199 127.0.0.1.32779 ESTABLISHED tcp4 0 0 127.0.0.1.32779 127.0.0.1.199 ESTABLISHED tcp4 0 40 172.29.149.95.22 172.29.96.83.41022 ESTABLISHED tcp4 0 0 172.29.149.95.22 172.29.96.83.41032 ESTABLISHED tcp4 0 0 127.0.0.1.32771 127.0.0.1.32775 ESTABLISHED tcp 0 0 127.0.0.1.32775 127.0.0.1.32771 ESTABLISHED tcp4 0 0 127.0.0.1.32771 127.0.0.1.32776 ESTABLISHED tcp 0 0 127.0.0.1.32776 127.0.0.1.32771 ESTABLISHED tcp4 0 0 127.0.0.1.32771 127.0.0.1.32777 ESTABLISHED tcp 0 0 127.0.0.1.32777 127.0.0.1.32771 ESTABLISHED tcp4 0 0 127.0.0.1.32771 127.0.0.1.32778 ESTABLISHED tcp 0 0 127.0.0.1.32778 127.0.0.1.32771 ESTABLISHED ''' remotes = set() try: data = subprocess.check_output(['netstat', '-f', 'inet', '-n']) # pylint: disable=minimum-python-version except subprocess.CalledProcessError: log.error('Failed netstat') raise lines = salt.utils.stringutils.to_str(data).split('\n') for line in lines: if 'ESTABLISHED' not in line: continue chunks = line.split() local_host, local_port = chunks[3].rsplit('.', 1) remote_host, remote_port = chunks[4].rsplit('.', 1) if which_end == 'remote_port' and int(remote_port) != port: continue if which_end == 'local_port' and int(local_port) != port: continue remotes.add(remote_host) return remotes @jinja_filter('gen_mac') def gen_mac(prefix='AC:DE:48'): ''' Generates a MAC address with the defined OUI prefix. Common prefixes: - ``00:16:3E`` -- Xen - ``00:18:51`` -- OpenVZ - ``00:50:56`` -- VMware (manually generated) - ``52:54:00`` -- QEMU/KVM - ``AC:DE:48`` -- PRIVATE References: - http://standards.ieee.org/develop/regauth/oui/oui.txt - https://www.wireshark.org/tools/oui-lookup.html - https://en.wikipedia.org/wiki/MAC_address ''' return '{0}:{1:02X}:{2:02X}:{3:02X}'.format(prefix, random.randint(0, 0xff), random.randint(0, 0xff), random.randint(0, 0xff)) @jinja_filter('mac_str_to_bytes') def mac_str_to_bytes(mac_str): ''' Convert a MAC address string into bytes. Works with or without separators: b1 = mac_str_to_bytes('08:00:27:13:69:77') b2 = mac_str_to_bytes('080027136977') assert b1 == b2 assert isinstance(b1, bytes) ''' if len(mac_str) == 12: pass elif len(mac_str) == 17: sep = mac_str[2] mac_str = mac_str.replace(sep, '') else: raise ValueError('Invalid MAC address') chars = (int(mac_str[s:s+2], 16) for s in range(0, 12, 2)) return bytes(chars) if six.PY3 else b''.join(chr(x) for x in chars) def refresh_dns(): ''' issue #21397: force glibc to re-read resolv.conf ''' try: res_init() except NameError: # Exception raised loading the library, thus res_init is not defined pass @jinja_filter('connection_check') def connection_check(addr, port=80, safe=False, ipv6=None): ''' Provides a convenient alias for the dns_check filter. ''' return dns_check(addr, port, safe, ipv6) @jinja_filter('dns_check') def dns_check(addr, port=80, safe=False, ipv6=None, attempt_connect=True): ''' Return the ip resolved by dns, but do not exit on failure, only raise an exception. Obeys system preference for IPv4/6 address resolution - this can be overridden by the ipv6 flag. Tries to connect to the address before considering it useful. If no address can be reached, the first one resolved is used as a fallback. ''' error = False lookup = addr seen_ipv6 = False family = socket.AF_INET6 if ipv6 else socket.AF_INET if ipv6 is False else socket.AF_UNSPEC hostnames = [] try: refresh_dns() hostnames = socket.getaddrinfo(addr, port, family, socket.SOCK_STREAM) except TypeError: err = ('Attempt to resolve address \'{0}\' failed. Invalid or unresolveable address').format(lookup) raise SaltSystemExit(code=42, msg=err) except socket.error: error = True # If ipv6 is set to True, attempt another lookup using the IPv4 family, # just in case we're attempting to lookup an IPv4 IP # as an IPv6 hostname. if error and ipv6: try: refresh_dns() hostnames = socket.getaddrinfo(addr, port, socket.AF_INET, socket.SOCK_STREAM) except TypeError: err = ('Attempt to resolve address \'{0}\' failed. Invalid or unresolveable address').format(lookup) raise SaltSystemExit(code=42, msg=err) except socket.error: error = True try: if not hostnames: error = True else: resolved = False candidates = [] for h in hostnames: # Input is IP address, passed through unchanged, just return it if h[4][0] == addr: resolved = salt.utils.zeromq.ip_bracket(addr) break candidate_addr = salt.utils.zeromq.ip_bracket(h[4][0]) # sometimes /etc/hosts contains ::1 localhost if not ipv6 and candidate_addr == '[::1]': continue candidates.append(candidate_addr) if attempt_connect: try: s = socket.socket(h[0], socket.SOCK_STREAM) s.settimeout(2) s.connect((candidate_addr.strip('[]'), h[4][1])) s.close() resolved = candidate_addr break except socket.error: pass if not resolved: if candidates: resolved = candidates[0] else: error = True except TypeError: err = ('Attempt to resolve address \'{0}\' failed. Invalid or unresolveable address').format(lookup) raise SaltSystemExit(code=42, msg=err) except socket.error: error = True if error: err = ('DNS lookup or connection check of \'{0}\' failed.').format(addr) if safe: if salt.log.is_console_configured(): # If logging is not configured it also means that either # the master or minion instance calling this hasn't even # started running log.error(err) raise SaltClientError() raise SaltSystemExit(code=42, msg=err) return resolved def parse_host_port(host_port): """ Takes a string argument specifying host or host:port. Returns a (hostname, port) or (ip_address, port) tuple. If no port is given, the second (port) element of the returned tuple will be None. host:port argument, for example, is accepted in the forms of: - hostname - hostname:1234 - hostname.domain.tld - hostname.domain.tld:5678 - [1234::5]:5678 - 1234::5 - 10.11.12.13:4567 - 10.11.12.13 """ host, port = None, None # default _s_ = host_port[:] if _s_[0] == "[": if "]" in host_port: host, _s_ = _s_.lstrip("[").rsplit("]", 1) host = ipaddress.IPv6Address(host).compressed if _s_[0] == ":": port = int(_s_.lstrip(":")) else: if len(_s_) > 1: raise ValueError('found ambiguous "{}" port in "{}"'.format(_s_, host_port)) else: if _s_.count(":") == 1: host, _hostport_separator_, port = _s_.partition(":") try: port = int(port) except ValueError as _e_: log.error('host_port "%s" port value "%s" is not an integer.', host_port, port) raise _e_ else: host = _s_ try: if not isinstance(host, ipaddress._BaseAddress): host_ip = ipaddress.ip_address(host).compressed host = host_ip except ValueError: log.debug('"%s" Not an IP address? Assuming it is a hostname.', host) if host != sanitize_host(host): log.error('bad hostname: "%s"', host) raise ValueError('bad hostname: "{}"'.format(host)) return host, port def is_fqdn(hostname): """ Verify if hostname conforms to be a FQDN. :param hostname: text string with the name of the host :return: bool, True if hostname is correct FQDN, False otherwise """ compliant = re.compile(r"(?!-)[A-Z\d\-\_]{1,63}(?<!-)$", re.IGNORECASE) return "." in hostname and len(hostname) < 0xff and all(compliant.match(x) for x in hostname.rstrip(".").split("."))
saltstack/salt
salt/utils/network.py
generate_minion_id
python
def generate_minion_id(): ''' Return only first element of the hostname from all possible list. :return: ''' try: ret = salt.utils.stringutils.to_unicode(_generate_minion_id().first()) except TypeError: ret = None return ret or 'localhost'
Return only first element of the hostname from all possible list. :return:
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/network.py#L173-L183
[ "def to_unicode(s, encoding=None, errors='strict', normalize=False):\n '''\n Given str or unicode, return unicode (str for python 3)\n '''\n def _normalize(s):\n return unicodedata.normalize('NFC', s) if normalize else s\n\n if encoding is None:\n # Try utf-8 first, and fall back to detected encoding\n encoding = ('utf-8', __salt_system_encoding__)\n if not isinstance(encoding, (tuple, list)):\n encoding = (encoding,)\n\n if not encoding:\n raise ValueError('encoding cannot be empty')\n\n exc = None\n if six.PY3:\n if isinstance(s, str):\n return _normalize(s)\n elif isinstance(s, (bytes, bytearray)):\n return _normalize(to_str(s, encoding, errors))\n raise TypeError('expected str, bytes, or bytearray')\n else:\n # This needs to be str and not six.string_types, since if the string is\n # already a unicode type, it does not need to be decoded (and doing so\n # will raise an exception).\n if isinstance(s, unicode): # pylint: disable=incompatible-py3-code,undefined-variable\n return _normalize(s)\n elif isinstance(s, (str, bytearray)):\n for enc in encoding:\n try:\n return _normalize(s.decode(enc, errors))\n except UnicodeDecodeError as err:\n exc = err\n continue\n # The only way we get this far is if a UnicodeDecodeError was\n # raised, otherwise we would have already returned (or raised some\n # other exception).\n raise exc # pylint: disable=raising-bad-type\n raise TypeError('expected str or bytearray')\n", "def _generate_minion_id():\n '''\n Get list of possible host names and convention names.\n\n :return:\n '''\n # There are three types of hostnames:\n # 1. Network names. How host is accessed from the network.\n # 2. Host aliases. They might be not available in all the network or only locally (/etc/hosts)\n # 3. Convention names, an internal nodename.\n\n class DistinctList(list):\n '''\n List, which allows one to append only distinct objects.\n Needs to work on Python 2.6, because of collections.OrderedDict only since 2.7 version.\n Override 'filter()' for custom filtering.\n '''\n localhost_matchers = [r'localhost.*', r'ip6-.*', r'127[.]\\d', r'0\\.0\\.0\\.0',\n r'::1.*', r'ipv6-.*', r'fe00::.*', r'fe02::.*', r'1.0.0.*.ip6.arpa']\n\n def append(self, p_object):\n if p_object and p_object not in self and not self.filter(p_object):\n super(DistinctList, self).append(p_object)\n return self\n\n def extend(self, iterable):\n for obj in iterable:\n self.append(obj)\n return self\n\n def filter(self, element):\n 'Returns True if element needs to be filtered'\n for rgx in self.localhost_matchers:\n if re.match(rgx, element):\n return True\n\n def first(self):\n return self and self[0] or None\n\n hostname = socket.gethostname()\n\n hosts = DistinctList().append(\n salt.utils.stringutils.to_unicode(socket.getfqdn(salt.utils.stringutils.to_bytes(hostname)))\n ).append(platform.node()).append(hostname)\n if not hosts:\n try:\n for a_nfo in socket.getaddrinfo(hosts.first() or 'localhost', None, socket.AF_INET,\n socket.SOCK_RAW, socket.IPPROTO_IP, socket.AI_CANONNAME):\n if len(a_nfo) > 3:\n hosts.append(a_nfo[3])\n except socket.gaierror:\n log.warning('Cannot resolve address %s info via socket: %s',\n hosts.first() or 'localhost (N/A)', socket.gaierror)\n # Universal method for everywhere (Linux, Slowlaris, Windows etc)\n for f_name in ('/etc/hostname', '/etc/nodename', '/etc/hosts',\n r'{win}\\system32\\drivers\\etc\\hosts'.format(win=os.getenv('WINDIR'))):\n try:\n with salt.utils.files.fopen(f_name) as f_hdl:\n for line in f_hdl:\n line = salt.utils.stringutils.to_unicode(line)\n hst = line.strip().split('#')[0].strip().split()\n if hst:\n if hst[0][:4] in ('127.', '::1') or len(hst) == 1:\n hosts.extend(hst)\n except IOError:\n pass\n\n # include public and private ipaddresses\n return hosts.extend([addr for addr in ip_addrs()\n if not ipaddress.ip_address(addr).is_loopback])\n" ]
# -*- coding: utf-8 -*- ''' Define some generic socket functions for network modules ''' # Import python libs from __future__ import absolute_import, unicode_literals, print_function import itertools import os import re import types import socket import logging import platform import random import subprocess from string import ascii_letters, digits # Import 3rd-party libs from salt.ext import six from salt.ext.six.moves import range # pylint: disable=import-error,redefined-builtin # Attempt to import wmi try: import wmi import salt.utils.winapi except ImportError: pass # Import salt libs import salt.utils.args import salt.utils.files import salt.utils.path import salt.utils.platform import salt.utils.stringutils import salt.utils.zeromq from salt._compat import ipaddress from salt.exceptions import SaltClientError, SaltSystemExit from salt.utils.decorators.jinja import jinja_filter from salt.utils.versions import LooseVersion # inet_pton does not exist in Windows, this is a workaround if salt.utils.platform.is_windows(): from salt.ext import win_inet_pton # pylint: disable=unused-import log = logging.getLogger(__name__) try: import ctypes import ctypes.util libc = ctypes.cdll.LoadLibrary(ctypes.util.find_library("c")) res_init = libc.__res_init except (ImportError, OSError, AttributeError, TypeError): pass # pylint: disable=C0103 def sanitize_host(host): ''' Sanitize host string. https://tools.ietf.org/html/rfc1123#section-2.1 ''' RFC952_characters = ascii_letters + digits + ".-" return "".join([c for c in host[0:255] if c in RFC952_characters]) def isportopen(host, port): ''' Return status of a port ''' if not 1 <= int(port) <= 65535: return False sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) out = sock.connect_ex((sanitize_host(host), int(port))) return out def host_to_ips(host): ''' Returns a list of IP addresses of a given hostname or None if not found. ''' ips = [] try: for family, socktype, proto, canonname, sockaddr in socket.getaddrinfo( host, 0, socket.AF_UNSPEC, socket.SOCK_STREAM): if family == socket.AF_INET: ip, port = sockaddr elif family == socket.AF_INET6: ip, port, flow_info, scope_id = sockaddr ips.append(ip) if not ips: ips = None except Exception: ips = None return ips def _generate_minion_id(): ''' Get list of possible host names and convention names. :return: ''' # There are three types of hostnames: # 1. Network names. How host is accessed from the network. # 2. Host aliases. They might be not available in all the network or only locally (/etc/hosts) # 3. Convention names, an internal nodename. class DistinctList(list): ''' List, which allows one to append only distinct objects. Needs to work on Python 2.6, because of collections.OrderedDict only since 2.7 version. Override 'filter()' for custom filtering. ''' localhost_matchers = [r'localhost.*', r'ip6-.*', r'127[.]\d', r'0\.0\.0\.0', r'::1.*', r'ipv6-.*', r'fe00::.*', r'fe02::.*', r'1.0.0.*.ip6.arpa'] def append(self, p_object): if p_object and p_object not in self and not self.filter(p_object): super(DistinctList, self).append(p_object) return self def extend(self, iterable): for obj in iterable: self.append(obj) return self def filter(self, element): 'Returns True if element needs to be filtered' for rgx in self.localhost_matchers: if re.match(rgx, element): return True def first(self): return self and self[0] or None hostname = socket.gethostname() hosts = DistinctList().append( salt.utils.stringutils.to_unicode(socket.getfqdn(salt.utils.stringutils.to_bytes(hostname))) ).append(platform.node()).append(hostname) if not hosts: try: for a_nfo in socket.getaddrinfo(hosts.first() or 'localhost', None, socket.AF_INET, socket.SOCK_RAW, socket.IPPROTO_IP, socket.AI_CANONNAME): if len(a_nfo) > 3: hosts.append(a_nfo[3]) except socket.gaierror: log.warning('Cannot resolve address %s info via socket: %s', hosts.first() or 'localhost (N/A)', socket.gaierror) # Universal method for everywhere (Linux, Slowlaris, Windows etc) for f_name in ('/etc/hostname', '/etc/nodename', '/etc/hosts', r'{win}\system32\drivers\etc\hosts'.format(win=os.getenv('WINDIR'))): try: with salt.utils.files.fopen(f_name) as f_hdl: for line in f_hdl: line = salt.utils.stringutils.to_unicode(line) hst = line.strip().split('#')[0].strip().split() if hst: if hst[0][:4] in ('127.', '::1') or len(hst) == 1: hosts.extend(hst) except IOError: pass # include public and private ipaddresses return hosts.extend([addr for addr in ip_addrs() if not ipaddress.ip_address(addr).is_loopback]) def get_socket(addr, type=socket.SOCK_STREAM, proto=0): ''' Return a socket object for the addr IP-version agnostic ''' version = ipaddress.ip_address(addr).version if version == 4: family = socket.AF_INET elif version == 6: family = socket.AF_INET6 return socket.socket(family, type, proto) def get_fqhostname(): ''' Returns the fully qualified hostname ''' # try getaddrinfo() fqdn = None try: addrinfo = socket.getaddrinfo( socket.gethostname(), 0, socket.AF_UNSPEC, socket.SOCK_STREAM, socket.SOL_TCP, socket.AI_CANONNAME ) for info in addrinfo: # info struct [family, socktype, proto, canonname, sockaddr] # On Windows `canonname` can be an empty string # This can cause the function to return `None` if len(info) > 3 and info[3]: fqdn = info[3] break except socket.gaierror: pass # NOTE: this used to log.error() but it was later disabled except socket.error as err: log.debug('socket.getaddrinfo() failure while finding fqdn: %s', err) if fqdn is None: fqdn = socket.getfqdn() return fqdn def ip_to_host(ip): ''' Returns the hostname of a given IP ''' try: hostname, aliaslist, ipaddrlist = socket.gethostbyaddr(ip) except Exception as exc: log.debug('salt.utils.network.ip_to_host(%r) failed: %s', ip, exc) hostname = None return hostname # pylint: enable=C0103 def is_reachable_host(entity_name): ''' Returns a bool telling if the entity name is a reachable host (IPv4/IPv6/FQDN/etc). :param hostname: :return: ''' try: assert type(socket.getaddrinfo(entity_name, 0, 0, 0, 0)) == list ret = True except socket.gaierror: ret = False return ret def is_ip(ip): ''' Returns a bool telling if the passed IP is a valid IPv4 or IPv6 address. ''' return is_ipv4(ip) or is_ipv6(ip) def is_ipv4(ip): ''' Returns a bool telling if the value passed to it was a valid IPv4 address ''' try: return ipaddress.ip_address(ip).version == 4 except ValueError: return False def is_ipv6(ip): ''' Returns a bool telling if the value passed to it was a valid IPv6 address ''' try: return ipaddress.ip_address(ip).version == 6 except ValueError: return False def is_subnet(cidr): ''' Returns a bool telling if the passed string is an IPv4 or IPv6 subnet ''' return is_ipv4_subnet(cidr) or is_ipv6_subnet(cidr) def is_ipv4_subnet(cidr): ''' Returns a bool telling if the passed string is an IPv4 subnet ''' try: return '/' in cidr and bool(ipaddress.IPv4Network(cidr)) except Exception: return False def is_ipv6_subnet(cidr): ''' Returns a bool telling if the passed string is an IPv6 subnet ''' try: return '/' in cidr and bool(ipaddress.IPv6Network(cidr)) except Exception: return False @jinja_filter('is_ip') def is_ip_filter(ip, options=None): ''' Returns a bool telling if the passed IP is a valid IPv4 or IPv6 address. ''' return is_ipv4_filter(ip, options=options) or is_ipv6_filter(ip, options=options) def _ip_options_global(ip_obj, version): return not ip_obj.is_private def _ip_options_multicast(ip_obj, version): return ip_obj.is_multicast def _ip_options_loopback(ip_obj, version): return ip_obj.is_loopback def _ip_options_link_local(ip_obj, version): return ip_obj.is_link_local def _ip_options_private(ip_obj, version): return ip_obj.is_private def _ip_options_reserved(ip_obj, version): return ip_obj.is_reserved def _ip_options_site_local(ip_obj, version): if version == 6: return ip_obj.is_site_local return False def _ip_options_unspecified(ip_obj, version): return ip_obj.is_unspecified def _ip_options(ip_obj, version, options=None): # will process and IP options options_fun_map = { 'global': _ip_options_global, 'link-local': _ip_options_link_local, 'linklocal': _ip_options_link_local, 'll': _ip_options_link_local, 'link_local': _ip_options_link_local, 'loopback': _ip_options_loopback, 'lo': _ip_options_loopback, 'multicast': _ip_options_multicast, 'private': _ip_options_private, 'public': _ip_options_global, 'reserved': _ip_options_reserved, 'site-local': _ip_options_site_local, 'sl': _ip_options_site_local, 'site_local': _ip_options_site_local, 'unspecified': _ip_options_unspecified } if not options: return six.text_type(ip_obj) # IP version already checked options_list = [option.strip() for option in options.split(',')] for option, fun in options_fun_map.items(): if option in options_list: fun_res = fun(ip_obj, version) if not fun_res: return None # stop at first failed test # else continue return six.text_type(ip_obj) def _is_ipv(ip, version, options=None): if not version: version = 4 if version not in (4, 6): return None try: ip_obj = ipaddress.ip_address(ip) except ValueError: # maybe it is an IP network try: ip_obj = ipaddress.ip_interface(ip) except ValueError: # nope, still not :( return None if not ip_obj.version == version: return None # has the right version, let's move on return _ip_options(ip_obj, version, options=options) @jinja_filter('is_ipv4') def is_ipv4_filter(ip, options=None): ''' Returns a bool telling if the value passed to it was a valid IPv4 address. ip The IP address. net: False Consider IP addresses followed by netmask. options CSV of options regarding the nature of the IP address. E.g.: loopback, multicast, private etc. ''' _is_ipv4 = _is_ipv(ip, 4, options=options) return isinstance(_is_ipv4, six.string_types) @jinja_filter('is_ipv6') def is_ipv6_filter(ip, options=None): ''' Returns a bool telling if the value passed to it was a valid IPv6 address. ip The IP address. net: False Consider IP addresses followed by netmask. options CSV of options regarding the nature of the IP address. E.g.: loopback, multicast, private etc. ''' _is_ipv6 = _is_ipv(ip, 6, options=options) return isinstance(_is_ipv6, six.string_types) def _ipv_filter(value, version, options=None): if version not in (4, 6): return if isinstance(value, (six.string_types, six.text_type, six.binary_type)): return _is_ipv(value, version, options=options) # calls is_ipv4 or is_ipv6 for `value` elif isinstance(value, (list, tuple, types.GeneratorType)): # calls is_ipv4 or is_ipv6 for each element in the list # os it filters and returns only those elements having the desired IP version return [ _is_ipv(addr, version, options=options) for addr in value if _is_ipv(addr, version, options=options) is not None ] return None @jinja_filter('ipv4') def ipv4(value, options=None): ''' Filters a list and returns IPv4 values only. ''' return _ipv_filter(value, 4, options=options) @jinja_filter('ipv6') def ipv6(value, options=None): ''' Filters a list and returns IPv6 values only. ''' return _ipv_filter(value, 6, options=options) @jinja_filter('ipaddr') def ipaddr(value, options=None): ''' Filters and returns only valid IP objects. ''' ipv4_obj = ipv4(value, options=options) ipv6_obj = ipv6(value, options=options) if ipv4_obj is None or ipv6_obj is None: # an IP address can be either IPv4 either IPv6 # therefofe if the value passed as arg is not a list, at least one of the calls above will return None # if one of them is none, means that we should return only one of them return ipv4_obj or ipv6_obj # one of them else: return ipv4_obj + ipv6_obj # extend lists def _filter_ipaddr(value, options, version=None): ipaddr_filter_out = None if version: if version == 4: ipaddr_filter_out = ipv4(value, options) elif version == 6: ipaddr_filter_out = ipv6(value, options) else: ipaddr_filter_out = ipaddr(value, options) if not ipaddr_filter_out: return if not isinstance(ipaddr_filter_out, (list, tuple, types.GeneratorType)): ipaddr_filter_out = [ipaddr_filter_out] return ipaddr_filter_out @jinja_filter('ip_host') def ip_host(value, options=None, version=None): ''' Returns the interfaces IP address, e.g.: 192.168.0.1/28. ''' ipaddr_filter_out = _filter_ipaddr(value, options=options, version=version) if not ipaddr_filter_out: return if not isinstance(value, (list, tuple, types.GeneratorType)): return six.text_type(ipaddress.ip_interface(ipaddr_filter_out[0])) return [six.text_type(ipaddress.ip_interface(ip_a)) for ip_a in ipaddr_filter_out] def _network_hosts(ip_addr_entry): return [ six.text_type(host) for host in ipaddress.ip_network(ip_addr_entry, strict=False).hosts() ] @jinja_filter('network_hosts') def network_hosts(value, options=None, version=None): ''' Return the list of hosts within a network. .. note:: When running this command with a large IPv6 network, the command will take a long time to gather all of the hosts. ''' ipaddr_filter_out = _filter_ipaddr(value, options=options, version=version) if not ipaddr_filter_out: return if not isinstance(value, (list, tuple, types.GeneratorType)): return _network_hosts(ipaddr_filter_out[0]) return [ _network_hosts(ip_a) for ip_a in ipaddr_filter_out ] def _network_size(ip_addr_entry): return ipaddress.ip_network(ip_addr_entry, strict=False).num_addresses @jinja_filter('network_size') def network_size(value, options=None, version=None): ''' Get the size of a network. ''' ipaddr_filter_out = _filter_ipaddr(value, options=options, version=version) if not ipaddr_filter_out: return if not isinstance(value, (list, tuple, types.GeneratorType)): return _network_size(ipaddr_filter_out[0]) return [ _network_size(ip_a) for ip_a in ipaddr_filter_out ] def natural_ipv4_netmask(ip, fmt='prefixlen'): ''' Returns the "natural" mask of an IPv4 address ''' bits = _ipv4_to_bits(ip) if bits.startswith('11'): mask = '24' elif bits.startswith('1'): mask = '16' else: mask = '8' if fmt == 'netmask': return cidr_to_ipv4_netmask(mask) else: return '/' + mask def rpad_ipv4_network(ip): ''' Returns an IP network address padded with zeros. Ex: '192.168.3' -> '192.168.3.0' '10.209' -> '10.209.0.0' ''' return '.'.join(itertools.islice(itertools.chain(ip.split('.'), '0000'), 0, 4)) def cidr_to_ipv4_netmask(cidr_bits): ''' Returns an IPv4 netmask ''' try: cidr_bits = int(cidr_bits) if not 1 <= cidr_bits <= 32: return '' except ValueError: return '' netmask = '' for idx in range(4): if idx: netmask += '.' if cidr_bits >= 8: netmask += '255' cidr_bits -= 8 else: netmask += '{0:d}'.format(256 - (2 ** (8 - cidr_bits))) cidr_bits = 0 return netmask def _number_of_set_bits_to_ipv4_netmask(set_bits): # pylint: disable=C0103 ''' Returns an IPv4 netmask from the integer representation of that mask. Ex. 0xffffff00 -> '255.255.255.0' ''' return cidr_to_ipv4_netmask(_number_of_set_bits(set_bits)) # pylint: disable=C0103 def _number_of_set_bits(x): ''' Returns the number of bits that are set in a 32bit int ''' # Taken from http://stackoverflow.com/a/4912729. Many thanks! x -= (x >> 1) & 0x55555555 x = ((x >> 2) & 0x33333333) + (x & 0x33333333) x = ((x >> 4) + x) & 0x0f0f0f0f x += x >> 8 x += x >> 16 return x & 0x0000003f # pylint: enable=C0103 def _interfaces_ip(out): ''' Uses ip to return a dictionary of interfaces with various information about each (up/down state, ip address, netmask, and hwaddr) ''' ret = dict() def parse_network(value, cols): ''' Return a tuple of ip, netmask, broadcast based on the current set of cols ''' brd = None scope = None if '/' in value: # we have a CIDR in this address ip, cidr = value.split('/') # pylint: disable=C0103 else: ip = value # pylint: disable=C0103 cidr = 32 if type_ == 'inet': mask = cidr_to_ipv4_netmask(int(cidr)) if 'brd' in cols: brd = cols[cols.index('brd') + 1] elif type_ == 'inet6': mask = cidr if 'scope' in cols: scope = cols[cols.index('scope') + 1] return (ip, mask, brd, scope) groups = re.compile('\r?\n\\d').split(out) for group in groups: iface = None data = dict() for line in group.splitlines(): if ' ' not in line: continue match = re.match(r'^\d*:\s+([\w.\-]+)(?:@)?([\w.\-]+)?:\s+<(.+)>', line) if match: iface, parent, attrs = match.groups() if 'UP' in attrs.split(','): data['up'] = True else: data['up'] = False if parent: data['parent'] = parent continue cols = line.split() if len(cols) >= 2: type_, value = tuple(cols[0:2]) iflabel = cols[-1:][0] if type_ in ('inet', 'inet6'): if 'secondary' not in cols: ipaddr, netmask, broadcast, scope = parse_network(value, cols) if type_ == 'inet': if 'inet' not in data: data['inet'] = list() addr_obj = dict() addr_obj['address'] = ipaddr addr_obj['netmask'] = netmask addr_obj['broadcast'] = broadcast addr_obj['label'] = iflabel data['inet'].append(addr_obj) elif type_ == 'inet6': if 'inet6' not in data: data['inet6'] = list() addr_obj = dict() addr_obj['address'] = ipaddr addr_obj['prefixlen'] = netmask addr_obj['scope'] = scope data['inet6'].append(addr_obj) else: if 'secondary' not in data: data['secondary'] = list() ip_, mask, brd, scp = parse_network(value, cols) data['secondary'].append({ 'type': type_, 'address': ip_, 'netmask': mask, 'broadcast': brd, 'label': iflabel, }) del ip_, mask, brd, scp elif type_.startswith('link'): data['hwaddr'] = value if iface: ret[iface] = data del iface, data return ret def _interfaces_ifconfig(out): ''' Uses ifconfig to return a dictionary of interfaces with various information about each (up/down state, ip address, netmask, and hwaddr) ''' ret = dict() piface = re.compile(r'^([^\s:]+)') pmac = re.compile('.*?(?:HWaddr|ether|address:|lladdr) ([0-9a-fA-F:]+)') if salt.utils.platform.is_sunos(): pip = re.compile(r'.*?(?:inet\s+)([0-9]+\.[0-9]+\.[0-9]+\.[0-9]+)(.*)') pip6 = re.compile('.*?(?:inet6 )([0-9a-fA-F:]+)') pmask6 = re.compile(r'.*?(?:inet6 [0-9a-fA-F:]+/(\d+)).*') else: pip = re.compile(r'.*?(?:inet addr:|inet [^\d]*)(.*?)\s') pip6 = re.compile('.*?(?:inet6 addr: (.*?)/|inet6 )([0-9a-fA-F:]+)') pmask6 = re.compile(r'.*?(?:inet6 addr: [0-9a-fA-F:]+/(\d+)|prefixlen (\d+))(?: Scope:([a-zA-Z]+)| scopeid (0x[0-9a-fA-F]))?') pmask = re.compile(r'.*?(?:Mask:|netmask )(?:((?:0x)?[0-9a-fA-F]{8})|([\d\.]+))') pupdown = re.compile('UP') pbcast = re.compile(r'.*?(?:Bcast:|broadcast )([\d\.]+)') groups = re.compile('\r?\n(?=\\S)').split(out) for group in groups: data = dict() iface = '' updown = False for line in group.splitlines(): miface = piface.match(line) mmac = pmac.match(line) mip = pip.match(line) mip6 = pip6.match(line) mupdown = pupdown.search(line) if miface: iface = miface.group(1) if mmac: data['hwaddr'] = mmac.group(1) if salt.utils.platform.is_sunos(): expand_mac = [] for chunk in data['hwaddr'].split(':'): expand_mac.append('0{0}'.format(chunk) if len(chunk) < 2 else '{0}'.format(chunk)) data['hwaddr'] = ':'.join(expand_mac) if mip: if 'inet' not in data: data['inet'] = list() addr_obj = dict() addr_obj['address'] = mip.group(1) mmask = pmask.match(line) if mmask: if mmask.group(1): mmask = _number_of_set_bits_to_ipv4_netmask( int(mmask.group(1), 16)) else: mmask = mmask.group(2) addr_obj['netmask'] = mmask mbcast = pbcast.match(line) if mbcast: addr_obj['broadcast'] = mbcast.group(1) data['inet'].append(addr_obj) if mupdown: updown = True if mip6: if 'inet6' not in data: data['inet6'] = list() addr_obj = dict() addr_obj['address'] = mip6.group(1) or mip6.group(2) mmask6 = pmask6.match(line) if mmask6: addr_obj['prefixlen'] = mmask6.group(1) or mmask6.group(2) if not salt.utils.platform.is_sunos(): ipv6scope = mmask6.group(3) or mmask6.group(4) addr_obj['scope'] = ipv6scope.lower() if ipv6scope is not None else ipv6scope # SunOS sometimes has ::/0 as inet6 addr when using addrconf if not salt.utils.platform.is_sunos() \ or addr_obj['address'] != '::' \ and addr_obj['prefixlen'] != 0: data['inet6'].append(addr_obj) data['up'] = updown if iface in ret: # SunOS optimization, where interfaces occur twice in 'ifconfig -a' # output with the same name: for ipv4 and then for ipv6 addr family. # Every instance has it's own 'UP' status and we assume that ipv4 # status determines global interface status. # # merge items with higher priority for older values # after that merge the inet and inet6 sub items for both ret[iface] = dict(list(data.items()) + list(ret[iface].items())) if 'inet' in data: ret[iface]['inet'].extend(x for x in data['inet'] if x not in ret[iface]['inet']) if 'inet6' in data: ret[iface]['inet6'].extend(x for x in data['inet6'] if x not in ret[iface]['inet6']) else: ret[iface] = data del data return ret def linux_interfaces(): ''' Obtain interface information for *NIX/BSD variants ''' ifaces = dict() ip_path = salt.utils.path.which('ip') ifconfig_path = None if ip_path else salt.utils.path.which('ifconfig') if ip_path: cmd1 = subprocess.Popen( '{0} link show'.format(ip_path), shell=True, close_fds=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate()[0] cmd2 = subprocess.Popen( '{0} addr show'.format(ip_path), shell=True, close_fds=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate()[0] ifaces = _interfaces_ip("{0}\n{1}".format( salt.utils.stringutils.to_str(cmd1), salt.utils.stringutils.to_str(cmd2))) elif ifconfig_path: cmd = subprocess.Popen( '{0} -a'.format(ifconfig_path), shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate()[0] ifaces = _interfaces_ifconfig(salt.utils.stringutils.to_str(cmd)) return ifaces def _netbsd_interfaces_ifconfig(out): ''' Uses ifconfig to return a dictionary of interfaces with various information about each (up/down state, ip address, netmask, and hwaddr) ''' ret = dict() piface = re.compile(r'^([^\s:]+)') pmac = re.compile('.*?address: ([0-9a-f:]+)') pip = re.compile(r'.*?inet [^\d]*(.*?)/([\d]*)\s') pip6 = re.compile(r'.*?inet6 ([0-9a-f:]+)%([a-zA-Z0-9]*)/([\d]*)\s') pupdown = re.compile('UP') pbcast = re.compile(r'.*?broadcast ([\d\.]+)') groups = re.compile('\r?\n(?=\\S)').split(out) for group in groups: data = dict() iface = '' updown = False for line in group.splitlines(): miface = piface.match(line) mmac = pmac.match(line) mip = pip.match(line) mip6 = pip6.match(line) mupdown = pupdown.search(line) if miface: iface = miface.group(1) if mmac: data['hwaddr'] = mmac.group(1) if mip: if 'inet' not in data: data['inet'] = list() addr_obj = dict() addr_obj['address'] = mip.group(1) mmask = mip.group(2) if mip.group(2): addr_obj['netmask'] = cidr_to_ipv4_netmask(mip.group(2)) mbcast = pbcast.match(line) if mbcast: addr_obj['broadcast'] = mbcast.group(1) data['inet'].append(addr_obj) if mupdown: updown = True if mip6: if 'inet6' not in data: data['inet6'] = list() addr_obj = dict() addr_obj['address'] = mip6.group(1) mmask6 = mip6.group(3) addr_obj['scope'] = mip6.group(2) addr_obj['prefixlen'] = mip6.group(3) data['inet6'].append(addr_obj) data['up'] = updown ret[iface] = data del data return ret def netbsd_interfaces(): ''' Obtain interface information for NetBSD >= 8 where the ifconfig output diverged from other BSD variants (Netmask is now part of the address) ''' # NetBSD versions prior to 8.0 can still use linux_interfaces() if LooseVersion(os.uname()[2]) < LooseVersion('8.0'): return linux_interfaces() ifconfig_path = salt.utils.path.which('ifconfig') cmd = subprocess.Popen( '{0} -a'.format(ifconfig_path), shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate()[0] return _netbsd_interfaces_ifconfig(salt.utils.stringutils.to_str(cmd)) def _interfaces_ipconfig(out): ''' Returns a dictionary of interfaces with various information about each (up/down state, ip address, netmask, and hwaddr) NOTE: This is not used by any function and may be able to be removed in the future. ''' ifaces = dict() iface = None adapter_iface_regex = re.compile(r'adapter (\S.+):$') for line in out.splitlines(): if not line: continue # TODO what does Windows call Infiniband and 10/40gige adapters if line.startswith('Ethernet'): iface = ifaces[adapter_iface_regex.search(line).group(1)] iface['up'] = True addr = None continue if iface: key, val = line.split(',', 1) key = key.strip(' .') val = val.strip() if addr and key == 'Subnet Mask': addr['netmask'] = val elif key in ('IP Address', 'IPv4 Address'): if 'inet' not in iface: iface['inet'] = list() addr = {'address': val.rstrip('(Preferred)'), 'netmask': None, 'broadcast': None} # TODO find the broadcast iface['inet'].append(addr) elif 'IPv6 Address' in key: if 'inet6' not in iface: iface['inet'] = list() # XXX What is the prefixlen!? addr = {'address': val.rstrip('(Preferred)'), 'prefixlen': None} iface['inet6'].append(addr) elif key == 'Physical Address': iface['hwaddr'] = val elif key == 'Media State': # XXX seen used for tunnel adaptors # might be useful iface['up'] = (val != 'Media disconnected') def win_interfaces(): ''' Obtain interface information for Windows systems ''' with salt.utils.winapi.Com(): c = wmi.WMI() ifaces = {} for iface in c.Win32_NetworkAdapterConfiguration(IPEnabled=1): ifaces[iface.Description] = dict() if iface.MACAddress: ifaces[iface.Description]['hwaddr'] = iface.MACAddress if iface.IPEnabled: ifaces[iface.Description]['up'] = True for ip in iface.IPAddress: if '.' in ip: if 'inet' not in ifaces[iface.Description]: ifaces[iface.Description]['inet'] = [] item = {'address': ip, 'label': iface.Description} if iface.DefaultIPGateway: broadcast = next((i for i in iface.DefaultIPGateway if '.' in i), '') if broadcast: item['broadcast'] = broadcast if iface.IPSubnet: netmask = next((i for i in iface.IPSubnet if '.' in i), '') if netmask: item['netmask'] = netmask ifaces[iface.Description]['inet'].append(item) if ':' in ip: if 'inet6' not in ifaces[iface.Description]: ifaces[iface.Description]['inet6'] = [] item = {'address': ip} if iface.DefaultIPGateway: broadcast = next((i for i in iface.DefaultIPGateway if ':' in i), '') if broadcast: item['broadcast'] = broadcast if iface.IPSubnet: netmask = next((i for i in iface.IPSubnet if ':' in i), '') if netmask: item['netmask'] = netmask ifaces[iface.Description]['inet6'].append(item) else: ifaces[iface.Description]['up'] = False return ifaces def interfaces(): ''' Return a dictionary of information about all the interfaces on the minion ''' if salt.utils.platform.is_windows(): return win_interfaces() elif salt.utils.platform.is_netbsd(): return netbsd_interfaces() else: return linux_interfaces() def get_net_start(ipaddr, netmask): ''' Return the address of the network ''' net = ipaddress.ip_network('{0}/{1}'.format(ipaddr, netmask), strict=False) return six.text_type(net.network_address) def get_net_size(mask): ''' Turns an IPv4 netmask into it's corresponding prefix length (255.255.255.0 -> 24 as in 192.168.1.10/24). ''' binary_str = '' for octet in mask.split('.'): binary_str += bin(int(octet))[2:].zfill(8) return len(binary_str.rstrip('0')) def calc_net(ipaddr, netmask=None): ''' Takes IP (CIDR notation supported) and optionally netmask and returns the network in CIDR-notation. (The IP can be any IP inside the subnet) ''' if netmask is not None: ipaddr = '{0}/{1}'.format(ipaddr, netmask) return six.text_type(ipaddress.ip_network(ipaddr, strict=False)) def _ipv4_to_bits(ipaddr): ''' Accepts an IPv4 dotted quad and returns a string representing its binary counterpart ''' return ''.join([bin(int(x))[2:].rjust(8, '0') for x in ipaddr.split('.')]) def _get_iface_info(iface): ''' If `iface` is available, return interface info and no error, otherwise return no info and log and return an error ''' iface_info = interfaces() if iface in iface_info.keys(): return iface_info, False else: error_msg = ('Interface "{0}" not in available interfaces: "{1}"' ''.format(iface, '", "'.join(iface_info.keys()))) log.error(error_msg) return None, error_msg def _hw_addr_aix(iface): ''' Return the hardware address (a.k.a. MAC address) for a given interface on AIX MAC address not available in through interfaces ''' cmd = subprocess.Popen( 'entstat -d {0} | grep \'Hardware Address\''.format(iface), shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate()[0] if cmd: comps = cmd.split(' ') if len(comps) == 3: mac_addr = comps[2].strip('\'').strip() return mac_addr error_msg = ('Interface "{0}" either not available or does not contain a hardware address'.format(iface)) log.error(error_msg) return error_msg def hw_addr(iface): ''' Return the hardware address (a.k.a. MAC address) for a given interface .. versionchanged:: 2016.11.4 Added support for AIX ''' if salt.utils.platform.is_aix(): return _hw_addr_aix iface_info, error = _get_iface_info(iface) if error is False: return iface_info.get(iface, {}).get('hwaddr', '') else: return error def interface(iface): ''' Return the details of `iface` or an error if it does not exist ''' iface_info, error = _get_iface_info(iface) if error is False: return iface_info.get(iface, {}).get('inet', '') else: return error def interface_ip(iface): ''' Return `iface` IPv4 addr or an error if `iface` does not exist ''' iface_info, error = _get_iface_info(iface) if error is False: inet = iface_info.get(iface, {}).get('inet', None) return inet[0].get('address', '') if inet else '' else: return error def _subnets(proto='inet', interfaces_=None): ''' Returns a list of subnets to which the host belongs ''' if interfaces_ is None: ifaces = interfaces() elif isinstance(interfaces_, list): ifaces = {} for key, value in six.iteritems(interfaces()): if key in interfaces_: ifaces[key] = value else: ifaces = {interfaces_: interfaces().get(interfaces_, {})} ret = set() if proto == 'inet': subnet = 'netmask' dflt_cidr = 32 elif proto == 'inet6': subnet = 'prefixlen' dflt_cidr = 128 else: log.error('Invalid proto %s calling subnets()', proto) return for ip_info in six.itervalues(ifaces): addrs = ip_info.get(proto, []) addrs.extend([addr for addr in ip_info.get('secondary', []) if addr.get('type') == proto]) for intf in addrs: if subnet in intf: intf = ipaddress.ip_interface('{0}/{1}'.format(intf['address'], intf[subnet])) else: intf = ipaddress.ip_interface('{0}/{1}'.format(intf['address'], dflt_cidr)) if not intf.is_loopback: ret.add(intf.network) return [six.text_type(net) for net in sorted(ret)] def subnets(interfaces=None): ''' Returns a list of IPv4 subnets to which the host belongs ''' return _subnets('inet', interfaces_=interfaces) def subnets6(): ''' Returns a list of IPv6 subnets to which the host belongs ''' return _subnets('inet6') def in_subnet(cidr, addr=None): ''' Returns True if host or (any of) addrs is within specified subnet, otherwise False ''' try: cidr = ipaddress.ip_network(cidr) except ValueError: log.error('Invalid CIDR \'%s\'', cidr) return False if addr is None: addr = ip_addrs() addr.extend(ip_addrs6()) elif not isinstance(addr, (list, tuple)): addr = (addr,) return any(ipaddress.ip_address(item) in cidr for item in addr) def _ip_addrs(interface=None, include_loopback=False, interface_data=None, proto='inet'): ''' Return the full list of IP adresses matching the criteria proto = inet|inet6 ''' ret = set() ifaces = interface_data \ if isinstance(interface_data, dict) \ else interfaces() if interface is None: target_ifaces = ifaces else: target_ifaces = dict([(k, v) for k, v in six.iteritems(ifaces) if k == interface]) if not target_ifaces: log.error('Interface %s not found.', interface) for ip_info in six.itervalues(target_ifaces): addrs = ip_info.get(proto, []) addrs.extend([addr for addr in ip_info.get('secondary', []) if addr.get('type') == proto]) for addr in addrs: addr = ipaddress.ip_address(addr.get('address')) if not addr.is_loopback or include_loopback: ret.add(addr) return [six.text_type(addr) for addr in sorted(ret)] def ip_addrs(interface=None, include_loopback=False, interface_data=None): ''' Returns a list of IPv4 addresses assigned to the host. 127.0.0.1 is ignored, unless 'include_loopback=True' is indicated. If 'interface' is provided, then only IP addresses from that interface will be returned. ''' return _ip_addrs(interface, include_loopback, interface_data, 'inet') def ip_addrs6(interface=None, include_loopback=False, interface_data=None): ''' Returns a list of IPv6 addresses assigned to the host. ::1 is ignored, unless 'include_loopback=True' is indicated. If 'interface' is provided, then only IP addresses from that interface will be returned. ''' return _ip_addrs(interface, include_loopback, interface_data, 'inet6') def hex2ip(hex_ip, invert=False): ''' Convert a hex string to an ip, if a failure occurs the original hex is returned. If 'invert=True' assume that ip from /proc/net/<proto> ''' if len(hex_ip) == 32: # ipv6 ip = [] for i in range(0, 32, 8): ip_part = hex_ip[i:i + 8] ip_part = [ip_part[x:x + 2] for x in range(0, 8, 2)] if invert: ip.append("{0[3]}{0[2]}:{0[1]}{0[0]}".format(ip_part)) else: ip.append("{0[0]}{0[1]}:{0[2]}{0[3]}".format(ip_part)) try: address = ipaddress.IPv6Address(":".join(ip)) if address.ipv4_mapped: return str(address.ipv4_mapped) else: return address.compressed except ipaddress.AddressValueError as ex: log.error('hex2ip - ipv6 address error: %s', ex) return hex_ip try: hip = int(hex_ip, 16) except ValueError: return hex_ip if invert: return '{3}.{2}.{1}.{0}'.format(hip >> 24 & 255, hip >> 16 & 255, hip >> 8 & 255, hip & 255) return '{0}.{1}.{2}.{3}'.format(hip >> 24 & 255, hip >> 16 & 255, hip >> 8 & 255, hip & 255) def mac2eui64(mac, prefix=None): ''' Convert a MAC address to a EUI64 identifier or, with prefix provided, a full IPv6 address ''' # http://tools.ietf.org/html/rfc4291#section-2.5.1 eui64 = re.sub(r'[.:-]', '', mac).lower() eui64 = eui64[0:6] + 'fffe' + eui64[6:] eui64 = hex(int(eui64[0:2], 16) | 2)[2:].zfill(2) + eui64[2:] if prefix is None: return ':'.join(re.findall(r'.{4}', eui64)) else: try: net = ipaddress.ip_network(prefix, strict=False) euil = int('0x{0}'.format(eui64), 16) return '{0}/{1}'.format(net[euil], net.prefixlen) except Exception: return def active_tcp(): ''' Return a dict describing all active tcp connections as quickly as possible ''' ret = {} for statf in ['/proc/net/tcp', '/proc/net/tcp6']: if os.path.isfile(statf): with salt.utils.files.fopen(statf, 'rb') as fp_: for line in fp_: line = salt.utils.stringutils.to_unicode(line) if line.strip().startswith('sl'): continue iret = _parse_tcp_line(line) sl = next(iter(iret)) if iret[sl]['state'] == 1: # 1 is ESTABLISHED del iret[sl]['state'] ret[len(ret)] = iret[sl] return ret def local_port_tcp(port): ''' Return a set of remote ip addrs attached to the specified local port ''' ret = _remotes_on(port, 'local_port') return ret def remote_port_tcp(port): ''' Return a set of ip addrs the current host is connected to on given port ''' ret = _remotes_on(port, 'remote_port') return ret def _remotes_on(port, which_end): ''' Return a set of ip addrs active tcp connections ''' port = int(port) ret = _netlink_tool_remote_on(port, which_end) if ret is not None: return ret ret = set() proc_available = False for statf in ['/proc/net/tcp', '/proc/net/tcp6']: if os.path.isfile(statf): proc_available = True with salt.utils.files.fopen(statf, 'r') as fp_: for line in fp_: line = salt.utils.stringutils.to_unicode(line) if line.strip().startswith('sl'): continue iret = _parse_tcp_line(line) sl = next(iter(iret)) if iret[sl][which_end] == port and iret[sl]['state'] == 1: # 1 is ESTABLISHED ret.add(iret[sl]['remote_addr']) if not proc_available: # Fallback to use OS specific tools if salt.utils.platform.is_sunos(): return _sunos_remotes_on(port, which_end) if salt.utils.platform.is_freebsd(): return _freebsd_remotes_on(port, which_end) if salt.utils.platform.is_netbsd(): return _netbsd_remotes_on(port, which_end) if salt.utils.platform.is_openbsd(): return _openbsd_remotes_on(port, which_end) if salt.utils.platform.is_windows(): return _windows_remotes_on(port, which_end) if salt.utils.platform.is_aix(): return _aix_remotes_on(port, which_end) return _linux_remotes_on(port, which_end) return ret def _parse_tcp_line(line): ''' Parse a single line from the contents of /proc/net/tcp or /proc/net/tcp6 ''' ret = {} comps = line.strip().split() sl = comps[0].rstrip(':') ret[sl] = {} l_addr, l_port = comps[1].split(':') r_addr, r_port = comps[2].split(':') ret[sl]['local_addr'] = hex2ip(l_addr, True) ret[sl]['local_port'] = int(l_port, 16) ret[sl]['remote_addr'] = hex2ip(r_addr, True) ret[sl]['remote_port'] = int(r_port, 16) ret[sl]['state'] = int(comps[3], 16) return ret def _netlink_tool_remote_on(port, which_end): ''' Returns set of ipv4 host addresses of remote established connections on local or remote tcp port. Parses output of shell 'ss' to get connections [root@salt-master ~]# ss -ant State Recv-Q Send-Q Local Address:Port Peer Address:Port LISTEN 0 511 *:80 *:* LISTEN 0 128 *:22 *:* ESTAB 0 0 127.0.0.1:56726 127.0.0.1:4505 ''' remotes = set() valid = False try: data = subprocess.check_output(['ss', '-ant']) # pylint: disable=minimum-python-version except subprocess.CalledProcessError: log.error('Failed ss') raise except OSError: # not command "No such file or directory" return None lines = salt.utils.stringutils.to_str(data).split('\n') for line in lines: if 'Address:Port' in line: # ss tools may not be valid valid = True continue elif 'ESTAB' not in line: continue chunks = line.split() local_host, local_port = chunks[3].split(':', 1) remote_host, remote_port = chunks[4].split(':', 1) if which_end == 'remote_port' and int(remote_port) != port: continue if which_end == 'local_port' and int(local_port) != port: continue remotes.add(remote_host) if valid is False: remotes = None return remotes def _sunos_remotes_on(port, which_end): ''' SunOS specific helper function. Returns set of ipv4 host addresses of remote established connections on local or remote tcp port. Parses output of shell 'netstat' to get connections [root@salt-master ~]# netstat -f inet -n TCP: IPv4 Local Address Remote Address Swind Send-Q Rwind Recv-Q State -------------------- -------------------- ----- ------ ----- ------ ----------- 10.0.0.101.4505 10.0.0.1.45329 1064800 0 1055864 0 ESTABLISHED 10.0.0.101.4505 10.0.0.100.50798 1064800 0 1055864 0 ESTABLISHED ''' remotes = set() try: data = subprocess.check_output(['netstat', '-f', 'inet', '-n']) # pylint: disable=minimum-python-version except subprocess.CalledProcessError: log.error('Failed netstat') raise lines = salt.utils.stringutils.to_str(data).split('\n') for line in lines: if 'ESTABLISHED' not in line: continue chunks = line.split() local_host, local_port = chunks[0].rsplit('.', 1) remote_host, remote_port = chunks[1].rsplit('.', 1) if which_end == 'remote_port' and int(remote_port) != port: continue if which_end == 'local_port' and int(local_port) != port: continue remotes.add(remote_host) return remotes def _freebsd_remotes_on(port, which_end): ''' Returns set of ipv4 host addresses of remote established connections on local tcp port port. Parses output of shell 'sockstat' (FreeBSD) to get connections $ sudo sockstat -4 USER COMMAND PID FD PROTO LOCAL ADDRESS FOREIGN ADDRESS root python2.7 1456 29 tcp4 *:4505 *:* root python2.7 1445 17 tcp4 *:4506 *:* root python2.7 1294 14 tcp4 127.0.0.1:11813 127.0.0.1:4505 root python2.7 1294 41 tcp4 127.0.0.1:61115 127.0.0.1:4506 $ sudo sockstat -4 -c -p 4506 USER COMMAND PID FD PROTO LOCAL ADDRESS FOREIGN ADDRESS root python2.7 1294 41 tcp4 127.0.0.1:61115 127.0.0.1:4506 ''' port = int(port) remotes = set() try: cmd = salt.utils.args.shlex_split('sockstat -4 -c -p {0}'.format(port)) data = subprocess.check_output(cmd) # pylint: disable=minimum-python-version except subprocess.CalledProcessError as ex: log.error('Failed "sockstat" with returncode = %s', ex.returncode) raise lines = salt.utils.stringutils.to_str(data).split('\n') for line in lines: chunks = line.split() if not chunks: continue # ['root', 'python2.7', '1456', '37', 'tcp4', # '127.0.0.1:4505-', '127.0.0.1:55703'] # print chunks if 'COMMAND' in chunks[1]: continue # ignore header if len(chunks) < 2: continue # sockstat -4 -c -p 4506 does this with high PIDs: # USER COMMAND PID FD PROTO LOCAL ADDRESS FOREIGN ADDRESS # salt-master python2.781106 35 tcp4 192.168.12.34:4506 192.168.12.45:60143 local = chunks[-2] remote = chunks[-1] lhost, lport = local.split(':') rhost, rport = remote.split(':') if which_end == 'local' and int(lport) != port: # ignore if local port not port continue if which_end == 'remote' and int(rport) != port: # ignore if remote port not port continue remotes.add(rhost) return remotes def _netbsd_remotes_on(port, which_end): ''' Returns set of ipv4 host addresses of remote established connections on local tcp port port. Parses output of shell 'sockstat' (NetBSD) to get connections $ sudo sockstat -4 -n USER COMMAND PID FD PROTO LOCAL ADDRESS FOREIGN ADDRESS root python2.7 1456 29 tcp *.4505 *.* root python2.7 1445 17 tcp *.4506 *.* root python2.7 1294 14 tcp 127.0.0.1.11813 127.0.0.1.4505 root python2.7 1294 41 tcp 127.0.0.1.61115 127.0.0.1.4506 $ sudo sockstat -4 -c -n -p 4506 USER COMMAND PID FD PROTO LOCAL ADDRESS FOREIGN ADDRESS root python2.7 1294 41 tcp 127.0.0.1.61115 127.0.0.1.4506 ''' port = int(port) remotes = set() try: cmd = salt.utils.args.shlex_split('sockstat -4 -c -n -p {0}'.format(port)) data = subprocess.check_output(cmd) # pylint: disable=minimum-python-version except subprocess.CalledProcessError as ex: log.error('Failed "sockstat" with returncode = %s', ex.returncode) raise lines = salt.utils.stringutils.to_str(data).split('\n') for line in lines: chunks = line.split() if not chunks: continue # ['root', 'python2.7', '1456', '37', 'tcp', # '127.0.0.1.4505-', '127.0.0.1.55703'] # print chunks if 'COMMAND' in chunks[1]: continue # ignore header if len(chunks) < 2: continue local = chunks[5].split('.') lport = local.pop() lhost = '.'.join(local) remote = chunks[6].split('.') rport = remote.pop() rhost = '.'.join(remote) if which_end == 'local' and int(lport) != port: # ignore if local port not port continue if which_end == 'remote' and int(rport) != port: # ignore if remote port not port continue remotes.add(rhost) return remotes def _openbsd_remotes_on(port, which_end): ''' OpenBSD specific helper function. Returns set of ipv4 host addresses of remote established connections on local or remote tcp port. Parses output of shell 'netstat' to get connections $ netstat -nf inet Active Internet connections Proto Recv-Q Send-Q Local Address Foreign Address (state) tcp 0 0 10.0.0.101.4505 10.0.0.1.45329 ESTABLISHED tcp 0 0 10.0.0.101.4505 10.0.0.100.50798 ESTABLISHED ''' remotes = set() try: data = subprocess.check_output(['netstat', '-nf', 'inet']) # pylint: disable=minimum-python-version except subprocess.CalledProcessError: log.error('Failed netstat') raise lines = data.split('\n') for line in lines: if 'ESTABLISHED' not in line: continue chunks = line.split() local_host, local_port = chunks[3].rsplit('.', 1) remote_host, remote_port = chunks[4].rsplit('.', 1) if which_end == 'remote_port' and int(remote_port) != port: continue if which_end == 'local_port' and int(local_port) != port: continue remotes.add(remote_host) return remotes def _windows_remotes_on(port, which_end): r''' Windows specific helper function. Returns set of ipv4 host addresses of remote established connections on local or remote tcp port. Parses output of shell 'netstat' to get connections C:\>netstat -n Active Connections Proto Local Address Foreign Address State TCP 10.2.33.17:3007 130.164.12.233:10123 ESTABLISHED TCP 10.2.33.17:3389 130.164.30.5:10378 ESTABLISHED ''' remotes = set() try: data = subprocess.check_output(['netstat', '-n']) # pylint: disable=minimum-python-version except subprocess.CalledProcessError: log.error('Failed netstat') raise lines = salt.utils.stringutils.to_str(data).split('\n') for line in lines: if 'ESTABLISHED' not in line: continue chunks = line.split() local_host, local_port = chunks[1].rsplit(':', 1) remote_host, remote_port = chunks[2].rsplit(':', 1) if which_end == 'remote_port' and int(remote_port) != port: continue if which_end == 'local_port' and int(local_port) != port: continue remotes.add(remote_host) return remotes def _linux_remotes_on(port, which_end): ''' Linux specific helper function. Returns set of ip host addresses of remote established connections on local tcp port port. Parses output of shell 'lsof' to get connections $ sudo lsof -iTCP:4505 -n COMMAND PID USER FD TYPE DEVICE SIZE/OFF NODE NAME Python 9971 root 35u IPv4 0x18a8464a29ca329d 0t0 TCP *:4505 (LISTEN) Python 9971 root 37u IPv4 0x18a8464a29b2b29d 0t0 TCP 127.0.0.1:4505->127.0.0.1:55703 (ESTABLISHED) Python 10152 root 22u IPv4 0x18a8464a29c8cab5 0t0 TCP 127.0.0.1:55703->127.0.0.1:4505 (ESTABLISHED) Python 10153 root 22u IPv4 0x18a8464a29c8cab5 0t0 TCP [fe80::249a]:4505->[fe80::150]:59367 (ESTABLISHED) ''' remotes = set() try: data = subprocess.check_output( ['lsof', '-iTCP:{0:d}'.format(port), '-n', '-P'] # pylint: disable=minimum-python-version ) except subprocess.CalledProcessError as ex: if ex.returncode == 1: # Lsof return 1 if any error was detected, including the failure # to locate Internet addresses, and it is not an error in this case. log.warning('"lsof" returncode = 1, likely no active TCP sessions.') return remotes log.error('Failed "lsof" with returncode = %s', ex.returncode) raise lines = salt.utils.stringutils.to_str(data).split('\n') for line in lines: chunks = line.split() if not chunks: continue # ['Python', '9971', 'root', '37u', 'IPv4', '0x18a8464a29b2b29d', '0t0', # 'TCP', '127.0.0.1:4505->127.0.0.1:55703', '(ESTABLISHED)'] # print chunks if 'COMMAND' in chunks[0]: continue # ignore header if 'ESTABLISHED' not in chunks[-1]: continue # ignore if not ESTABLISHED # '127.0.0.1:4505->127.0.0.1:55703' local, remote = chunks[8].split('->') _, lport = local.rsplit(':', 1) rhost, rport = remote.rsplit(':', 1) if which_end == 'remote_port' and int(rport) != port: continue if which_end == 'local_port' and int(lport) != port: continue remotes.add(rhost.strip("[]")) return remotes def _aix_remotes_on(port, which_end): ''' AIX specific helper function. Returns set of ipv4 host addresses of remote established connections on local or remote tcp port. Parses output of shell 'netstat' to get connections root@la68pp002_pub:/opt/salt/lib/python2.7/site-packages/salt/modules# netstat -f inet -n Active Internet connections Proto Recv-Q Send-Q Local Address Foreign Address (state) tcp4 0 0 172.29.149.95.50093 209.41.78.13.4505 ESTABLISHED tcp4 0 0 127.0.0.1.9514 *.* LISTEN tcp4 0 0 127.0.0.1.9515 *.* LISTEN tcp4 0 0 127.0.0.1.199 127.0.0.1.32779 ESTABLISHED tcp4 0 0 127.0.0.1.32779 127.0.0.1.199 ESTABLISHED tcp4 0 40 172.29.149.95.22 172.29.96.83.41022 ESTABLISHED tcp4 0 0 172.29.149.95.22 172.29.96.83.41032 ESTABLISHED tcp4 0 0 127.0.0.1.32771 127.0.0.1.32775 ESTABLISHED tcp 0 0 127.0.0.1.32775 127.0.0.1.32771 ESTABLISHED tcp4 0 0 127.0.0.1.32771 127.0.0.1.32776 ESTABLISHED tcp 0 0 127.0.0.1.32776 127.0.0.1.32771 ESTABLISHED tcp4 0 0 127.0.0.1.32771 127.0.0.1.32777 ESTABLISHED tcp 0 0 127.0.0.1.32777 127.0.0.1.32771 ESTABLISHED tcp4 0 0 127.0.0.1.32771 127.0.0.1.32778 ESTABLISHED tcp 0 0 127.0.0.1.32778 127.0.0.1.32771 ESTABLISHED ''' remotes = set() try: data = subprocess.check_output(['netstat', '-f', 'inet', '-n']) # pylint: disable=minimum-python-version except subprocess.CalledProcessError: log.error('Failed netstat') raise lines = salt.utils.stringutils.to_str(data).split('\n') for line in lines: if 'ESTABLISHED' not in line: continue chunks = line.split() local_host, local_port = chunks[3].rsplit('.', 1) remote_host, remote_port = chunks[4].rsplit('.', 1) if which_end == 'remote_port' and int(remote_port) != port: continue if which_end == 'local_port' and int(local_port) != port: continue remotes.add(remote_host) return remotes @jinja_filter('gen_mac') def gen_mac(prefix='AC:DE:48'): ''' Generates a MAC address with the defined OUI prefix. Common prefixes: - ``00:16:3E`` -- Xen - ``00:18:51`` -- OpenVZ - ``00:50:56`` -- VMware (manually generated) - ``52:54:00`` -- QEMU/KVM - ``AC:DE:48`` -- PRIVATE References: - http://standards.ieee.org/develop/regauth/oui/oui.txt - https://www.wireshark.org/tools/oui-lookup.html - https://en.wikipedia.org/wiki/MAC_address ''' return '{0}:{1:02X}:{2:02X}:{3:02X}'.format(prefix, random.randint(0, 0xff), random.randint(0, 0xff), random.randint(0, 0xff)) @jinja_filter('mac_str_to_bytes') def mac_str_to_bytes(mac_str): ''' Convert a MAC address string into bytes. Works with or without separators: b1 = mac_str_to_bytes('08:00:27:13:69:77') b2 = mac_str_to_bytes('080027136977') assert b1 == b2 assert isinstance(b1, bytes) ''' if len(mac_str) == 12: pass elif len(mac_str) == 17: sep = mac_str[2] mac_str = mac_str.replace(sep, '') else: raise ValueError('Invalid MAC address') chars = (int(mac_str[s:s+2], 16) for s in range(0, 12, 2)) return bytes(chars) if six.PY3 else b''.join(chr(x) for x in chars) def refresh_dns(): ''' issue #21397: force glibc to re-read resolv.conf ''' try: res_init() except NameError: # Exception raised loading the library, thus res_init is not defined pass @jinja_filter('connection_check') def connection_check(addr, port=80, safe=False, ipv6=None): ''' Provides a convenient alias for the dns_check filter. ''' return dns_check(addr, port, safe, ipv6) @jinja_filter('dns_check') def dns_check(addr, port=80, safe=False, ipv6=None, attempt_connect=True): ''' Return the ip resolved by dns, but do not exit on failure, only raise an exception. Obeys system preference for IPv4/6 address resolution - this can be overridden by the ipv6 flag. Tries to connect to the address before considering it useful. If no address can be reached, the first one resolved is used as a fallback. ''' error = False lookup = addr seen_ipv6 = False family = socket.AF_INET6 if ipv6 else socket.AF_INET if ipv6 is False else socket.AF_UNSPEC hostnames = [] try: refresh_dns() hostnames = socket.getaddrinfo(addr, port, family, socket.SOCK_STREAM) except TypeError: err = ('Attempt to resolve address \'{0}\' failed. Invalid or unresolveable address').format(lookup) raise SaltSystemExit(code=42, msg=err) except socket.error: error = True # If ipv6 is set to True, attempt another lookup using the IPv4 family, # just in case we're attempting to lookup an IPv4 IP # as an IPv6 hostname. if error and ipv6: try: refresh_dns() hostnames = socket.getaddrinfo(addr, port, socket.AF_INET, socket.SOCK_STREAM) except TypeError: err = ('Attempt to resolve address \'{0}\' failed. Invalid or unresolveable address').format(lookup) raise SaltSystemExit(code=42, msg=err) except socket.error: error = True try: if not hostnames: error = True else: resolved = False candidates = [] for h in hostnames: # Input is IP address, passed through unchanged, just return it if h[4][0] == addr: resolved = salt.utils.zeromq.ip_bracket(addr) break candidate_addr = salt.utils.zeromq.ip_bracket(h[4][0]) # sometimes /etc/hosts contains ::1 localhost if not ipv6 and candidate_addr == '[::1]': continue candidates.append(candidate_addr) if attempt_connect: try: s = socket.socket(h[0], socket.SOCK_STREAM) s.settimeout(2) s.connect((candidate_addr.strip('[]'), h[4][1])) s.close() resolved = candidate_addr break except socket.error: pass if not resolved: if candidates: resolved = candidates[0] else: error = True except TypeError: err = ('Attempt to resolve address \'{0}\' failed. Invalid or unresolveable address').format(lookup) raise SaltSystemExit(code=42, msg=err) except socket.error: error = True if error: err = ('DNS lookup or connection check of \'{0}\' failed.').format(addr) if safe: if salt.log.is_console_configured(): # If logging is not configured it also means that either # the master or minion instance calling this hasn't even # started running log.error(err) raise SaltClientError() raise SaltSystemExit(code=42, msg=err) return resolved def parse_host_port(host_port): """ Takes a string argument specifying host or host:port. Returns a (hostname, port) or (ip_address, port) tuple. If no port is given, the second (port) element of the returned tuple will be None. host:port argument, for example, is accepted in the forms of: - hostname - hostname:1234 - hostname.domain.tld - hostname.domain.tld:5678 - [1234::5]:5678 - 1234::5 - 10.11.12.13:4567 - 10.11.12.13 """ host, port = None, None # default _s_ = host_port[:] if _s_[0] == "[": if "]" in host_port: host, _s_ = _s_.lstrip("[").rsplit("]", 1) host = ipaddress.IPv6Address(host).compressed if _s_[0] == ":": port = int(_s_.lstrip(":")) else: if len(_s_) > 1: raise ValueError('found ambiguous "{}" port in "{}"'.format(_s_, host_port)) else: if _s_.count(":") == 1: host, _hostport_separator_, port = _s_.partition(":") try: port = int(port) except ValueError as _e_: log.error('host_port "%s" port value "%s" is not an integer.', host_port, port) raise _e_ else: host = _s_ try: if not isinstance(host, ipaddress._BaseAddress): host_ip = ipaddress.ip_address(host).compressed host = host_ip except ValueError: log.debug('"%s" Not an IP address? Assuming it is a hostname.', host) if host != sanitize_host(host): log.error('bad hostname: "%s"', host) raise ValueError('bad hostname: "{}"'.format(host)) return host, port def is_fqdn(hostname): """ Verify if hostname conforms to be a FQDN. :param hostname: text string with the name of the host :return: bool, True if hostname is correct FQDN, False otherwise """ compliant = re.compile(r"(?!-)[A-Z\d\-\_]{1,63}(?<!-)$", re.IGNORECASE) return "." in hostname and len(hostname) < 0xff and all(compliant.match(x) for x in hostname.rstrip(".").split("."))
saltstack/salt
salt/utils/network.py
get_socket
python
def get_socket(addr, type=socket.SOCK_STREAM, proto=0): ''' Return a socket object for the addr IP-version agnostic ''' version = ipaddress.ip_address(addr).version if version == 4: family = socket.AF_INET elif version == 6: family = socket.AF_INET6 return socket.socket(family, type, proto)
Return a socket object for the addr IP-version agnostic
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/network.py#L186-L197
null
# -*- coding: utf-8 -*- ''' Define some generic socket functions for network modules ''' # Import python libs from __future__ import absolute_import, unicode_literals, print_function import itertools import os import re import types import socket import logging import platform import random import subprocess from string import ascii_letters, digits # Import 3rd-party libs from salt.ext import six from salt.ext.six.moves import range # pylint: disable=import-error,redefined-builtin # Attempt to import wmi try: import wmi import salt.utils.winapi except ImportError: pass # Import salt libs import salt.utils.args import salt.utils.files import salt.utils.path import salt.utils.platform import salt.utils.stringutils import salt.utils.zeromq from salt._compat import ipaddress from salt.exceptions import SaltClientError, SaltSystemExit from salt.utils.decorators.jinja import jinja_filter from salt.utils.versions import LooseVersion # inet_pton does not exist in Windows, this is a workaround if salt.utils.platform.is_windows(): from salt.ext import win_inet_pton # pylint: disable=unused-import log = logging.getLogger(__name__) try: import ctypes import ctypes.util libc = ctypes.cdll.LoadLibrary(ctypes.util.find_library("c")) res_init = libc.__res_init except (ImportError, OSError, AttributeError, TypeError): pass # pylint: disable=C0103 def sanitize_host(host): ''' Sanitize host string. https://tools.ietf.org/html/rfc1123#section-2.1 ''' RFC952_characters = ascii_letters + digits + ".-" return "".join([c for c in host[0:255] if c in RFC952_characters]) def isportopen(host, port): ''' Return status of a port ''' if not 1 <= int(port) <= 65535: return False sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) out = sock.connect_ex((sanitize_host(host), int(port))) return out def host_to_ips(host): ''' Returns a list of IP addresses of a given hostname or None if not found. ''' ips = [] try: for family, socktype, proto, canonname, sockaddr in socket.getaddrinfo( host, 0, socket.AF_UNSPEC, socket.SOCK_STREAM): if family == socket.AF_INET: ip, port = sockaddr elif family == socket.AF_INET6: ip, port, flow_info, scope_id = sockaddr ips.append(ip) if not ips: ips = None except Exception: ips = None return ips def _generate_minion_id(): ''' Get list of possible host names and convention names. :return: ''' # There are three types of hostnames: # 1. Network names. How host is accessed from the network. # 2. Host aliases. They might be not available in all the network or only locally (/etc/hosts) # 3. Convention names, an internal nodename. class DistinctList(list): ''' List, which allows one to append only distinct objects. Needs to work on Python 2.6, because of collections.OrderedDict only since 2.7 version. Override 'filter()' for custom filtering. ''' localhost_matchers = [r'localhost.*', r'ip6-.*', r'127[.]\d', r'0\.0\.0\.0', r'::1.*', r'ipv6-.*', r'fe00::.*', r'fe02::.*', r'1.0.0.*.ip6.arpa'] def append(self, p_object): if p_object and p_object not in self and not self.filter(p_object): super(DistinctList, self).append(p_object) return self def extend(self, iterable): for obj in iterable: self.append(obj) return self def filter(self, element): 'Returns True if element needs to be filtered' for rgx in self.localhost_matchers: if re.match(rgx, element): return True def first(self): return self and self[0] or None hostname = socket.gethostname() hosts = DistinctList().append( salt.utils.stringutils.to_unicode(socket.getfqdn(salt.utils.stringutils.to_bytes(hostname))) ).append(platform.node()).append(hostname) if not hosts: try: for a_nfo in socket.getaddrinfo(hosts.first() or 'localhost', None, socket.AF_INET, socket.SOCK_RAW, socket.IPPROTO_IP, socket.AI_CANONNAME): if len(a_nfo) > 3: hosts.append(a_nfo[3]) except socket.gaierror: log.warning('Cannot resolve address %s info via socket: %s', hosts.first() or 'localhost (N/A)', socket.gaierror) # Universal method for everywhere (Linux, Slowlaris, Windows etc) for f_name in ('/etc/hostname', '/etc/nodename', '/etc/hosts', r'{win}\system32\drivers\etc\hosts'.format(win=os.getenv('WINDIR'))): try: with salt.utils.files.fopen(f_name) as f_hdl: for line in f_hdl: line = salt.utils.stringutils.to_unicode(line) hst = line.strip().split('#')[0].strip().split() if hst: if hst[0][:4] in ('127.', '::1') or len(hst) == 1: hosts.extend(hst) except IOError: pass # include public and private ipaddresses return hosts.extend([addr for addr in ip_addrs() if not ipaddress.ip_address(addr).is_loopback]) def generate_minion_id(): ''' Return only first element of the hostname from all possible list. :return: ''' try: ret = salt.utils.stringutils.to_unicode(_generate_minion_id().first()) except TypeError: ret = None return ret or 'localhost' def get_fqhostname(): ''' Returns the fully qualified hostname ''' # try getaddrinfo() fqdn = None try: addrinfo = socket.getaddrinfo( socket.gethostname(), 0, socket.AF_UNSPEC, socket.SOCK_STREAM, socket.SOL_TCP, socket.AI_CANONNAME ) for info in addrinfo: # info struct [family, socktype, proto, canonname, sockaddr] # On Windows `canonname` can be an empty string # This can cause the function to return `None` if len(info) > 3 and info[3]: fqdn = info[3] break except socket.gaierror: pass # NOTE: this used to log.error() but it was later disabled except socket.error as err: log.debug('socket.getaddrinfo() failure while finding fqdn: %s', err) if fqdn is None: fqdn = socket.getfqdn() return fqdn def ip_to_host(ip): ''' Returns the hostname of a given IP ''' try: hostname, aliaslist, ipaddrlist = socket.gethostbyaddr(ip) except Exception as exc: log.debug('salt.utils.network.ip_to_host(%r) failed: %s', ip, exc) hostname = None return hostname # pylint: enable=C0103 def is_reachable_host(entity_name): ''' Returns a bool telling if the entity name is a reachable host (IPv4/IPv6/FQDN/etc). :param hostname: :return: ''' try: assert type(socket.getaddrinfo(entity_name, 0, 0, 0, 0)) == list ret = True except socket.gaierror: ret = False return ret def is_ip(ip): ''' Returns a bool telling if the passed IP is a valid IPv4 or IPv6 address. ''' return is_ipv4(ip) or is_ipv6(ip) def is_ipv4(ip): ''' Returns a bool telling if the value passed to it was a valid IPv4 address ''' try: return ipaddress.ip_address(ip).version == 4 except ValueError: return False def is_ipv6(ip): ''' Returns a bool telling if the value passed to it was a valid IPv6 address ''' try: return ipaddress.ip_address(ip).version == 6 except ValueError: return False def is_subnet(cidr): ''' Returns a bool telling if the passed string is an IPv4 or IPv6 subnet ''' return is_ipv4_subnet(cidr) or is_ipv6_subnet(cidr) def is_ipv4_subnet(cidr): ''' Returns a bool telling if the passed string is an IPv4 subnet ''' try: return '/' in cidr and bool(ipaddress.IPv4Network(cidr)) except Exception: return False def is_ipv6_subnet(cidr): ''' Returns a bool telling if the passed string is an IPv6 subnet ''' try: return '/' in cidr and bool(ipaddress.IPv6Network(cidr)) except Exception: return False @jinja_filter('is_ip') def is_ip_filter(ip, options=None): ''' Returns a bool telling if the passed IP is a valid IPv4 or IPv6 address. ''' return is_ipv4_filter(ip, options=options) or is_ipv6_filter(ip, options=options) def _ip_options_global(ip_obj, version): return not ip_obj.is_private def _ip_options_multicast(ip_obj, version): return ip_obj.is_multicast def _ip_options_loopback(ip_obj, version): return ip_obj.is_loopback def _ip_options_link_local(ip_obj, version): return ip_obj.is_link_local def _ip_options_private(ip_obj, version): return ip_obj.is_private def _ip_options_reserved(ip_obj, version): return ip_obj.is_reserved def _ip_options_site_local(ip_obj, version): if version == 6: return ip_obj.is_site_local return False def _ip_options_unspecified(ip_obj, version): return ip_obj.is_unspecified def _ip_options(ip_obj, version, options=None): # will process and IP options options_fun_map = { 'global': _ip_options_global, 'link-local': _ip_options_link_local, 'linklocal': _ip_options_link_local, 'll': _ip_options_link_local, 'link_local': _ip_options_link_local, 'loopback': _ip_options_loopback, 'lo': _ip_options_loopback, 'multicast': _ip_options_multicast, 'private': _ip_options_private, 'public': _ip_options_global, 'reserved': _ip_options_reserved, 'site-local': _ip_options_site_local, 'sl': _ip_options_site_local, 'site_local': _ip_options_site_local, 'unspecified': _ip_options_unspecified } if not options: return six.text_type(ip_obj) # IP version already checked options_list = [option.strip() for option in options.split(',')] for option, fun in options_fun_map.items(): if option in options_list: fun_res = fun(ip_obj, version) if not fun_res: return None # stop at first failed test # else continue return six.text_type(ip_obj) def _is_ipv(ip, version, options=None): if not version: version = 4 if version not in (4, 6): return None try: ip_obj = ipaddress.ip_address(ip) except ValueError: # maybe it is an IP network try: ip_obj = ipaddress.ip_interface(ip) except ValueError: # nope, still not :( return None if not ip_obj.version == version: return None # has the right version, let's move on return _ip_options(ip_obj, version, options=options) @jinja_filter('is_ipv4') def is_ipv4_filter(ip, options=None): ''' Returns a bool telling if the value passed to it was a valid IPv4 address. ip The IP address. net: False Consider IP addresses followed by netmask. options CSV of options regarding the nature of the IP address. E.g.: loopback, multicast, private etc. ''' _is_ipv4 = _is_ipv(ip, 4, options=options) return isinstance(_is_ipv4, six.string_types) @jinja_filter('is_ipv6') def is_ipv6_filter(ip, options=None): ''' Returns a bool telling if the value passed to it was a valid IPv6 address. ip The IP address. net: False Consider IP addresses followed by netmask. options CSV of options regarding the nature of the IP address. E.g.: loopback, multicast, private etc. ''' _is_ipv6 = _is_ipv(ip, 6, options=options) return isinstance(_is_ipv6, six.string_types) def _ipv_filter(value, version, options=None): if version not in (4, 6): return if isinstance(value, (six.string_types, six.text_type, six.binary_type)): return _is_ipv(value, version, options=options) # calls is_ipv4 or is_ipv6 for `value` elif isinstance(value, (list, tuple, types.GeneratorType)): # calls is_ipv4 or is_ipv6 for each element in the list # os it filters and returns only those elements having the desired IP version return [ _is_ipv(addr, version, options=options) for addr in value if _is_ipv(addr, version, options=options) is not None ] return None @jinja_filter('ipv4') def ipv4(value, options=None): ''' Filters a list and returns IPv4 values only. ''' return _ipv_filter(value, 4, options=options) @jinja_filter('ipv6') def ipv6(value, options=None): ''' Filters a list and returns IPv6 values only. ''' return _ipv_filter(value, 6, options=options) @jinja_filter('ipaddr') def ipaddr(value, options=None): ''' Filters and returns only valid IP objects. ''' ipv4_obj = ipv4(value, options=options) ipv6_obj = ipv6(value, options=options) if ipv4_obj is None or ipv6_obj is None: # an IP address can be either IPv4 either IPv6 # therefofe if the value passed as arg is not a list, at least one of the calls above will return None # if one of them is none, means that we should return only one of them return ipv4_obj or ipv6_obj # one of them else: return ipv4_obj + ipv6_obj # extend lists def _filter_ipaddr(value, options, version=None): ipaddr_filter_out = None if version: if version == 4: ipaddr_filter_out = ipv4(value, options) elif version == 6: ipaddr_filter_out = ipv6(value, options) else: ipaddr_filter_out = ipaddr(value, options) if not ipaddr_filter_out: return if not isinstance(ipaddr_filter_out, (list, tuple, types.GeneratorType)): ipaddr_filter_out = [ipaddr_filter_out] return ipaddr_filter_out @jinja_filter('ip_host') def ip_host(value, options=None, version=None): ''' Returns the interfaces IP address, e.g.: 192.168.0.1/28. ''' ipaddr_filter_out = _filter_ipaddr(value, options=options, version=version) if not ipaddr_filter_out: return if not isinstance(value, (list, tuple, types.GeneratorType)): return six.text_type(ipaddress.ip_interface(ipaddr_filter_out[0])) return [six.text_type(ipaddress.ip_interface(ip_a)) for ip_a in ipaddr_filter_out] def _network_hosts(ip_addr_entry): return [ six.text_type(host) for host in ipaddress.ip_network(ip_addr_entry, strict=False).hosts() ] @jinja_filter('network_hosts') def network_hosts(value, options=None, version=None): ''' Return the list of hosts within a network. .. note:: When running this command with a large IPv6 network, the command will take a long time to gather all of the hosts. ''' ipaddr_filter_out = _filter_ipaddr(value, options=options, version=version) if not ipaddr_filter_out: return if not isinstance(value, (list, tuple, types.GeneratorType)): return _network_hosts(ipaddr_filter_out[0]) return [ _network_hosts(ip_a) for ip_a in ipaddr_filter_out ] def _network_size(ip_addr_entry): return ipaddress.ip_network(ip_addr_entry, strict=False).num_addresses @jinja_filter('network_size') def network_size(value, options=None, version=None): ''' Get the size of a network. ''' ipaddr_filter_out = _filter_ipaddr(value, options=options, version=version) if not ipaddr_filter_out: return if not isinstance(value, (list, tuple, types.GeneratorType)): return _network_size(ipaddr_filter_out[0]) return [ _network_size(ip_a) for ip_a in ipaddr_filter_out ] def natural_ipv4_netmask(ip, fmt='prefixlen'): ''' Returns the "natural" mask of an IPv4 address ''' bits = _ipv4_to_bits(ip) if bits.startswith('11'): mask = '24' elif bits.startswith('1'): mask = '16' else: mask = '8' if fmt == 'netmask': return cidr_to_ipv4_netmask(mask) else: return '/' + mask def rpad_ipv4_network(ip): ''' Returns an IP network address padded with zeros. Ex: '192.168.3' -> '192.168.3.0' '10.209' -> '10.209.0.0' ''' return '.'.join(itertools.islice(itertools.chain(ip.split('.'), '0000'), 0, 4)) def cidr_to_ipv4_netmask(cidr_bits): ''' Returns an IPv4 netmask ''' try: cidr_bits = int(cidr_bits) if not 1 <= cidr_bits <= 32: return '' except ValueError: return '' netmask = '' for idx in range(4): if idx: netmask += '.' if cidr_bits >= 8: netmask += '255' cidr_bits -= 8 else: netmask += '{0:d}'.format(256 - (2 ** (8 - cidr_bits))) cidr_bits = 0 return netmask def _number_of_set_bits_to_ipv4_netmask(set_bits): # pylint: disable=C0103 ''' Returns an IPv4 netmask from the integer representation of that mask. Ex. 0xffffff00 -> '255.255.255.0' ''' return cidr_to_ipv4_netmask(_number_of_set_bits(set_bits)) # pylint: disable=C0103 def _number_of_set_bits(x): ''' Returns the number of bits that are set in a 32bit int ''' # Taken from http://stackoverflow.com/a/4912729. Many thanks! x -= (x >> 1) & 0x55555555 x = ((x >> 2) & 0x33333333) + (x & 0x33333333) x = ((x >> 4) + x) & 0x0f0f0f0f x += x >> 8 x += x >> 16 return x & 0x0000003f # pylint: enable=C0103 def _interfaces_ip(out): ''' Uses ip to return a dictionary of interfaces with various information about each (up/down state, ip address, netmask, and hwaddr) ''' ret = dict() def parse_network(value, cols): ''' Return a tuple of ip, netmask, broadcast based on the current set of cols ''' brd = None scope = None if '/' in value: # we have a CIDR in this address ip, cidr = value.split('/') # pylint: disable=C0103 else: ip = value # pylint: disable=C0103 cidr = 32 if type_ == 'inet': mask = cidr_to_ipv4_netmask(int(cidr)) if 'brd' in cols: brd = cols[cols.index('brd') + 1] elif type_ == 'inet6': mask = cidr if 'scope' in cols: scope = cols[cols.index('scope') + 1] return (ip, mask, brd, scope) groups = re.compile('\r?\n\\d').split(out) for group in groups: iface = None data = dict() for line in group.splitlines(): if ' ' not in line: continue match = re.match(r'^\d*:\s+([\w.\-]+)(?:@)?([\w.\-]+)?:\s+<(.+)>', line) if match: iface, parent, attrs = match.groups() if 'UP' in attrs.split(','): data['up'] = True else: data['up'] = False if parent: data['parent'] = parent continue cols = line.split() if len(cols) >= 2: type_, value = tuple(cols[0:2]) iflabel = cols[-1:][0] if type_ in ('inet', 'inet6'): if 'secondary' not in cols: ipaddr, netmask, broadcast, scope = parse_network(value, cols) if type_ == 'inet': if 'inet' not in data: data['inet'] = list() addr_obj = dict() addr_obj['address'] = ipaddr addr_obj['netmask'] = netmask addr_obj['broadcast'] = broadcast addr_obj['label'] = iflabel data['inet'].append(addr_obj) elif type_ == 'inet6': if 'inet6' not in data: data['inet6'] = list() addr_obj = dict() addr_obj['address'] = ipaddr addr_obj['prefixlen'] = netmask addr_obj['scope'] = scope data['inet6'].append(addr_obj) else: if 'secondary' not in data: data['secondary'] = list() ip_, mask, brd, scp = parse_network(value, cols) data['secondary'].append({ 'type': type_, 'address': ip_, 'netmask': mask, 'broadcast': brd, 'label': iflabel, }) del ip_, mask, brd, scp elif type_.startswith('link'): data['hwaddr'] = value if iface: ret[iface] = data del iface, data return ret def _interfaces_ifconfig(out): ''' Uses ifconfig to return a dictionary of interfaces with various information about each (up/down state, ip address, netmask, and hwaddr) ''' ret = dict() piface = re.compile(r'^([^\s:]+)') pmac = re.compile('.*?(?:HWaddr|ether|address:|lladdr) ([0-9a-fA-F:]+)') if salt.utils.platform.is_sunos(): pip = re.compile(r'.*?(?:inet\s+)([0-9]+\.[0-9]+\.[0-9]+\.[0-9]+)(.*)') pip6 = re.compile('.*?(?:inet6 )([0-9a-fA-F:]+)') pmask6 = re.compile(r'.*?(?:inet6 [0-9a-fA-F:]+/(\d+)).*') else: pip = re.compile(r'.*?(?:inet addr:|inet [^\d]*)(.*?)\s') pip6 = re.compile('.*?(?:inet6 addr: (.*?)/|inet6 )([0-9a-fA-F:]+)') pmask6 = re.compile(r'.*?(?:inet6 addr: [0-9a-fA-F:]+/(\d+)|prefixlen (\d+))(?: Scope:([a-zA-Z]+)| scopeid (0x[0-9a-fA-F]))?') pmask = re.compile(r'.*?(?:Mask:|netmask )(?:((?:0x)?[0-9a-fA-F]{8})|([\d\.]+))') pupdown = re.compile('UP') pbcast = re.compile(r'.*?(?:Bcast:|broadcast )([\d\.]+)') groups = re.compile('\r?\n(?=\\S)').split(out) for group in groups: data = dict() iface = '' updown = False for line in group.splitlines(): miface = piface.match(line) mmac = pmac.match(line) mip = pip.match(line) mip6 = pip6.match(line) mupdown = pupdown.search(line) if miface: iface = miface.group(1) if mmac: data['hwaddr'] = mmac.group(1) if salt.utils.platform.is_sunos(): expand_mac = [] for chunk in data['hwaddr'].split(':'): expand_mac.append('0{0}'.format(chunk) if len(chunk) < 2 else '{0}'.format(chunk)) data['hwaddr'] = ':'.join(expand_mac) if mip: if 'inet' not in data: data['inet'] = list() addr_obj = dict() addr_obj['address'] = mip.group(1) mmask = pmask.match(line) if mmask: if mmask.group(1): mmask = _number_of_set_bits_to_ipv4_netmask( int(mmask.group(1), 16)) else: mmask = mmask.group(2) addr_obj['netmask'] = mmask mbcast = pbcast.match(line) if mbcast: addr_obj['broadcast'] = mbcast.group(1) data['inet'].append(addr_obj) if mupdown: updown = True if mip6: if 'inet6' not in data: data['inet6'] = list() addr_obj = dict() addr_obj['address'] = mip6.group(1) or mip6.group(2) mmask6 = pmask6.match(line) if mmask6: addr_obj['prefixlen'] = mmask6.group(1) or mmask6.group(2) if not salt.utils.platform.is_sunos(): ipv6scope = mmask6.group(3) or mmask6.group(4) addr_obj['scope'] = ipv6scope.lower() if ipv6scope is not None else ipv6scope # SunOS sometimes has ::/0 as inet6 addr when using addrconf if not salt.utils.platform.is_sunos() \ or addr_obj['address'] != '::' \ and addr_obj['prefixlen'] != 0: data['inet6'].append(addr_obj) data['up'] = updown if iface in ret: # SunOS optimization, where interfaces occur twice in 'ifconfig -a' # output with the same name: for ipv4 and then for ipv6 addr family. # Every instance has it's own 'UP' status and we assume that ipv4 # status determines global interface status. # # merge items with higher priority for older values # after that merge the inet and inet6 sub items for both ret[iface] = dict(list(data.items()) + list(ret[iface].items())) if 'inet' in data: ret[iface]['inet'].extend(x for x in data['inet'] if x not in ret[iface]['inet']) if 'inet6' in data: ret[iface]['inet6'].extend(x for x in data['inet6'] if x not in ret[iface]['inet6']) else: ret[iface] = data del data return ret def linux_interfaces(): ''' Obtain interface information for *NIX/BSD variants ''' ifaces = dict() ip_path = salt.utils.path.which('ip') ifconfig_path = None if ip_path else salt.utils.path.which('ifconfig') if ip_path: cmd1 = subprocess.Popen( '{0} link show'.format(ip_path), shell=True, close_fds=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate()[0] cmd2 = subprocess.Popen( '{0} addr show'.format(ip_path), shell=True, close_fds=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate()[0] ifaces = _interfaces_ip("{0}\n{1}".format( salt.utils.stringutils.to_str(cmd1), salt.utils.stringutils.to_str(cmd2))) elif ifconfig_path: cmd = subprocess.Popen( '{0} -a'.format(ifconfig_path), shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate()[0] ifaces = _interfaces_ifconfig(salt.utils.stringutils.to_str(cmd)) return ifaces def _netbsd_interfaces_ifconfig(out): ''' Uses ifconfig to return a dictionary of interfaces with various information about each (up/down state, ip address, netmask, and hwaddr) ''' ret = dict() piface = re.compile(r'^([^\s:]+)') pmac = re.compile('.*?address: ([0-9a-f:]+)') pip = re.compile(r'.*?inet [^\d]*(.*?)/([\d]*)\s') pip6 = re.compile(r'.*?inet6 ([0-9a-f:]+)%([a-zA-Z0-9]*)/([\d]*)\s') pupdown = re.compile('UP') pbcast = re.compile(r'.*?broadcast ([\d\.]+)') groups = re.compile('\r?\n(?=\\S)').split(out) for group in groups: data = dict() iface = '' updown = False for line in group.splitlines(): miface = piface.match(line) mmac = pmac.match(line) mip = pip.match(line) mip6 = pip6.match(line) mupdown = pupdown.search(line) if miface: iface = miface.group(1) if mmac: data['hwaddr'] = mmac.group(1) if mip: if 'inet' not in data: data['inet'] = list() addr_obj = dict() addr_obj['address'] = mip.group(1) mmask = mip.group(2) if mip.group(2): addr_obj['netmask'] = cidr_to_ipv4_netmask(mip.group(2)) mbcast = pbcast.match(line) if mbcast: addr_obj['broadcast'] = mbcast.group(1) data['inet'].append(addr_obj) if mupdown: updown = True if mip6: if 'inet6' not in data: data['inet6'] = list() addr_obj = dict() addr_obj['address'] = mip6.group(1) mmask6 = mip6.group(3) addr_obj['scope'] = mip6.group(2) addr_obj['prefixlen'] = mip6.group(3) data['inet6'].append(addr_obj) data['up'] = updown ret[iface] = data del data return ret def netbsd_interfaces(): ''' Obtain interface information for NetBSD >= 8 where the ifconfig output diverged from other BSD variants (Netmask is now part of the address) ''' # NetBSD versions prior to 8.0 can still use linux_interfaces() if LooseVersion(os.uname()[2]) < LooseVersion('8.0'): return linux_interfaces() ifconfig_path = salt.utils.path.which('ifconfig') cmd = subprocess.Popen( '{0} -a'.format(ifconfig_path), shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate()[0] return _netbsd_interfaces_ifconfig(salt.utils.stringutils.to_str(cmd)) def _interfaces_ipconfig(out): ''' Returns a dictionary of interfaces with various information about each (up/down state, ip address, netmask, and hwaddr) NOTE: This is not used by any function and may be able to be removed in the future. ''' ifaces = dict() iface = None adapter_iface_regex = re.compile(r'adapter (\S.+):$') for line in out.splitlines(): if not line: continue # TODO what does Windows call Infiniband and 10/40gige adapters if line.startswith('Ethernet'): iface = ifaces[adapter_iface_regex.search(line).group(1)] iface['up'] = True addr = None continue if iface: key, val = line.split(',', 1) key = key.strip(' .') val = val.strip() if addr and key == 'Subnet Mask': addr['netmask'] = val elif key in ('IP Address', 'IPv4 Address'): if 'inet' not in iface: iface['inet'] = list() addr = {'address': val.rstrip('(Preferred)'), 'netmask': None, 'broadcast': None} # TODO find the broadcast iface['inet'].append(addr) elif 'IPv6 Address' in key: if 'inet6' not in iface: iface['inet'] = list() # XXX What is the prefixlen!? addr = {'address': val.rstrip('(Preferred)'), 'prefixlen': None} iface['inet6'].append(addr) elif key == 'Physical Address': iface['hwaddr'] = val elif key == 'Media State': # XXX seen used for tunnel adaptors # might be useful iface['up'] = (val != 'Media disconnected') def win_interfaces(): ''' Obtain interface information for Windows systems ''' with salt.utils.winapi.Com(): c = wmi.WMI() ifaces = {} for iface in c.Win32_NetworkAdapterConfiguration(IPEnabled=1): ifaces[iface.Description] = dict() if iface.MACAddress: ifaces[iface.Description]['hwaddr'] = iface.MACAddress if iface.IPEnabled: ifaces[iface.Description]['up'] = True for ip in iface.IPAddress: if '.' in ip: if 'inet' not in ifaces[iface.Description]: ifaces[iface.Description]['inet'] = [] item = {'address': ip, 'label': iface.Description} if iface.DefaultIPGateway: broadcast = next((i for i in iface.DefaultIPGateway if '.' in i), '') if broadcast: item['broadcast'] = broadcast if iface.IPSubnet: netmask = next((i for i in iface.IPSubnet if '.' in i), '') if netmask: item['netmask'] = netmask ifaces[iface.Description]['inet'].append(item) if ':' in ip: if 'inet6' not in ifaces[iface.Description]: ifaces[iface.Description]['inet6'] = [] item = {'address': ip} if iface.DefaultIPGateway: broadcast = next((i for i in iface.DefaultIPGateway if ':' in i), '') if broadcast: item['broadcast'] = broadcast if iface.IPSubnet: netmask = next((i for i in iface.IPSubnet if ':' in i), '') if netmask: item['netmask'] = netmask ifaces[iface.Description]['inet6'].append(item) else: ifaces[iface.Description]['up'] = False return ifaces def interfaces(): ''' Return a dictionary of information about all the interfaces on the minion ''' if salt.utils.platform.is_windows(): return win_interfaces() elif salt.utils.platform.is_netbsd(): return netbsd_interfaces() else: return linux_interfaces() def get_net_start(ipaddr, netmask): ''' Return the address of the network ''' net = ipaddress.ip_network('{0}/{1}'.format(ipaddr, netmask), strict=False) return six.text_type(net.network_address) def get_net_size(mask): ''' Turns an IPv4 netmask into it's corresponding prefix length (255.255.255.0 -> 24 as in 192.168.1.10/24). ''' binary_str = '' for octet in mask.split('.'): binary_str += bin(int(octet))[2:].zfill(8) return len(binary_str.rstrip('0')) def calc_net(ipaddr, netmask=None): ''' Takes IP (CIDR notation supported) and optionally netmask and returns the network in CIDR-notation. (The IP can be any IP inside the subnet) ''' if netmask is not None: ipaddr = '{0}/{1}'.format(ipaddr, netmask) return six.text_type(ipaddress.ip_network(ipaddr, strict=False)) def _ipv4_to_bits(ipaddr): ''' Accepts an IPv4 dotted quad and returns a string representing its binary counterpart ''' return ''.join([bin(int(x))[2:].rjust(8, '0') for x in ipaddr.split('.')]) def _get_iface_info(iface): ''' If `iface` is available, return interface info and no error, otherwise return no info and log and return an error ''' iface_info = interfaces() if iface in iface_info.keys(): return iface_info, False else: error_msg = ('Interface "{0}" not in available interfaces: "{1}"' ''.format(iface, '", "'.join(iface_info.keys()))) log.error(error_msg) return None, error_msg def _hw_addr_aix(iface): ''' Return the hardware address (a.k.a. MAC address) for a given interface on AIX MAC address not available in through interfaces ''' cmd = subprocess.Popen( 'entstat -d {0} | grep \'Hardware Address\''.format(iface), shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate()[0] if cmd: comps = cmd.split(' ') if len(comps) == 3: mac_addr = comps[2].strip('\'').strip() return mac_addr error_msg = ('Interface "{0}" either not available or does not contain a hardware address'.format(iface)) log.error(error_msg) return error_msg def hw_addr(iface): ''' Return the hardware address (a.k.a. MAC address) for a given interface .. versionchanged:: 2016.11.4 Added support for AIX ''' if salt.utils.platform.is_aix(): return _hw_addr_aix iface_info, error = _get_iface_info(iface) if error is False: return iface_info.get(iface, {}).get('hwaddr', '') else: return error def interface(iface): ''' Return the details of `iface` or an error if it does not exist ''' iface_info, error = _get_iface_info(iface) if error is False: return iface_info.get(iface, {}).get('inet', '') else: return error def interface_ip(iface): ''' Return `iface` IPv4 addr or an error if `iface` does not exist ''' iface_info, error = _get_iface_info(iface) if error is False: inet = iface_info.get(iface, {}).get('inet', None) return inet[0].get('address', '') if inet else '' else: return error def _subnets(proto='inet', interfaces_=None): ''' Returns a list of subnets to which the host belongs ''' if interfaces_ is None: ifaces = interfaces() elif isinstance(interfaces_, list): ifaces = {} for key, value in six.iteritems(interfaces()): if key in interfaces_: ifaces[key] = value else: ifaces = {interfaces_: interfaces().get(interfaces_, {})} ret = set() if proto == 'inet': subnet = 'netmask' dflt_cidr = 32 elif proto == 'inet6': subnet = 'prefixlen' dflt_cidr = 128 else: log.error('Invalid proto %s calling subnets()', proto) return for ip_info in six.itervalues(ifaces): addrs = ip_info.get(proto, []) addrs.extend([addr for addr in ip_info.get('secondary', []) if addr.get('type') == proto]) for intf in addrs: if subnet in intf: intf = ipaddress.ip_interface('{0}/{1}'.format(intf['address'], intf[subnet])) else: intf = ipaddress.ip_interface('{0}/{1}'.format(intf['address'], dflt_cidr)) if not intf.is_loopback: ret.add(intf.network) return [six.text_type(net) for net in sorted(ret)] def subnets(interfaces=None): ''' Returns a list of IPv4 subnets to which the host belongs ''' return _subnets('inet', interfaces_=interfaces) def subnets6(): ''' Returns a list of IPv6 subnets to which the host belongs ''' return _subnets('inet6') def in_subnet(cidr, addr=None): ''' Returns True if host or (any of) addrs is within specified subnet, otherwise False ''' try: cidr = ipaddress.ip_network(cidr) except ValueError: log.error('Invalid CIDR \'%s\'', cidr) return False if addr is None: addr = ip_addrs() addr.extend(ip_addrs6()) elif not isinstance(addr, (list, tuple)): addr = (addr,) return any(ipaddress.ip_address(item) in cidr for item in addr) def _ip_addrs(interface=None, include_loopback=False, interface_data=None, proto='inet'): ''' Return the full list of IP adresses matching the criteria proto = inet|inet6 ''' ret = set() ifaces = interface_data \ if isinstance(interface_data, dict) \ else interfaces() if interface is None: target_ifaces = ifaces else: target_ifaces = dict([(k, v) for k, v in six.iteritems(ifaces) if k == interface]) if not target_ifaces: log.error('Interface %s not found.', interface) for ip_info in six.itervalues(target_ifaces): addrs = ip_info.get(proto, []) addrs.extend([addr for addr in ip_info.get('secondary', []) if addr.get('type') == proto]) for addr in addrs: addr = ipaddress.ip_address(addr.get('address')) if not addr.is_loopback or include_loopback: ret.add(addr) return [six.text_type(addr) for addr in sorted(ret)] def ip_addrs(interface=None, include_loopback=False, interface_data=None): ''' Returns a list of IPv4 addresses assigned to the host. 127.0.0.1 is ignored, unless 'include_loopback=True' is indicated. If 'interface' is provided, then only IP addresses from that interface will be returned. ''' return _ip_addrs(interface, include_loopback, interface_data, 'inet') def ip_addrs6(interface=None, include_loopback=False, interface_data=None): ''' Returns a list of IPv6 addresses assigned to the host. ::1 is ignored, unless 'include_loopback=True' is indicated. If 'interface' is provided, then only IP addresses from that interface will be returned. ''' return _ip_addrs(interface, include_loopback, interface_data, 'inet6') def hex2ip(hex_ip, invert=False): ''' Convert a hex string to an ip, if a failure occurs the original hex is returned. If 'invert=True' assume that ip from /proc/net/<proto> ''' if len(hex_ip) == 32: # ipv6 ip = [] for i in range(0, 32, 8): ip_part = hex_ip[i:i + 8] ip_part = [ip_part[x:x + 2] for x in range(0, 8, 2)] if invert: ip.append("{0[3]}{0[2]}:{0[1]}{0[0]}".format(ip_part)) else: ip.append("{0[0]}{0[1]}:{0[2]}{0[3]}".format(ip_part)) try: address = ipaddress.IPv6Address(":".join(ip)) if address.ipv4_mapped: return str(address.ipv4_mapped) else: return address.compressed except ipaddress.AddressValueError as ex: log.error('hex2ip - ipv6 address error: %s', ex) return hex_ip try: hip = int(hex_ip, 16) except ValueError: return hex_ip if invert: return '{3}.{2}.{1}.{0}'.format(hip >> 24 & 255, hip >> 16 & 255, hip >> 8 & 255, hip & 255) return '{0}.{1}.{2}.{3}'.format(hip >> 24 & 255, hip >> 16 & 255, hip >> 8 & 255, hip & 255) def mac2eui64(mac, prefix=None): ''' Convert a MAC address to a EUI64 identifier or, with prefix provided, a full IPv6 address ''' # http://tools.ietf.org/html/rfc4291#section-2.5.1 eui64 = re.sub(r'[.:-]', '', mac).lower() eui64 = eui64[0:6] + 'fffe' + eui64[6:] eui64 = hex(int(eui64[0:2], 16) | 2)[2:].zfill(2) + eui64[2:] if prefix is None: return ':'.join(re.findall(r'.{4}', eui64)) else: try: net = ipaddress.ip_network(prefix, strict=False) euil = int('0x{0}'.format(eui64), 16) return '{0}/{1}'.format(net[euil], net.prefixlen) except Exception: return def active_tcp(): ''' Return a dict describing all active tcp connections as quickly as possible ''' ret = {} for statf in ['/proc/net/tcp', '/proc/net/tcp6']: if os.path.isfile(statf): with salt.utils.files.fopen(statf, 'rb') as fp_: for line in fp_: line = salt.utils.stringutils.to_unicode(line) if line.strip().startswith('sl'): continue iret = _parse_tcp_line(line) sl = next(iter(iret)) if iret[sl]['state'] == 1: # 1 is ESTABLISHED del iret[sl]['state'] ret[len(ret)] = iret[sl] return ret def local_port_tcp(port): ''' Return a set of remote ip addrs attached to the specified local port ''' ret = _remotes_on(port, 'local_port') return ret def remote_port_tcp(port): ''' Return a set of ip addrs the current host is connected to on given port ''' ret = _remotes_on(port, 'remote_port') return ret def _remotes_on(port, which_end): ''' Return a set of ip addrs active tcp connections ''' port = int(port) ret = _netlink_tool_remote_on(port, which_end) if ret is not None: return ret ret = set() proc_available = False for statf in ['/proc/net/tcp', '/proc/net/tcp6']: if os.path.isfile(statf): proc_available = True with salt.utils.files.fopen(statf, 'r') as fp_: for line in fp_: line = salt.utils.stringutils.to_unicode(line) if line.strip().startswith('sl'): continue iret = _parse_tcp_line(line) sl = next(iter(iret)) if iret[sl][which_end] == port and iret[sl]['state'] == 1: # 1 is ESTABLISHED ret.add(iret[sl]['remote_addr']) if not proc_available: # Fallback to use OS specific tools if salt.utils.platform.is_sunos(): return _sunos_remotes_on(port, which_end) if salt.utils.platform.is_freebsd(): return _freebsd_remotes_on(port, which_end) if salt.utils.platform.is_netbsd(): return _netbsd_remotes_on(port, which_end) if salt.utils.platform.is_openbsd(): return _openbsd_remotes_on(port, which_end) if salt.utils.platform.is_windows(): return _windows_remotes_on(port, which_end) if salt.utils.platform.is_aix(): return _aix_remotes_on(port, which_end) return _linux_remotes_on(port, which_end) return ret def _parse_tcp_line(line): ''' Parse a single line from the contents of /proc/net/tcp or /proc/net/tcp6 ''' ret = {} comps = line.strip().split() sl = comps[0].rstrip(':') ret[sl] = {} l_addr, l_port = comps[1].split(':') r_addr, r_port = comps[2].split(':') ret[sl]['local_addr'] = hex2ip(l_addr, True) ret[sl]['local_port'] = int(l_port, 16) ret[sl]['remote_addr'] = hex2ip(r_addr, True) ret[sl]['remote_port'] = int(r_port, 16) ret[sl]['state'] = int(comps[3], 16) return ret def _netlink_tool_remote_on(port, which_end): ''' Returns set of ipv4 host addresses of remote established connections on local or remote tcp port. Parses output of shell 'ss' to get connections [root@salt-master ~]# ss -ant State Recv-Q Send-Q Local Address:Port Peer Address:Port LISTEN 0 511 *:80 *:* LISTEN 0 128 *:22 *:* ESTAB 0 0 127.0.0.1:56726 127.0.0.1:4505 ''' remotes = set() valid = False try: data = subprocess.check_output(['ss', '-ant']) # pylint: disable=minimum-python-version except subprocess.CalledProcessError: log.error('Failed ss') raise except OSError: # not command "No such file or directory" return None lines = salt.utils.stringutils.to_str(data).split('\n') for line in lines: if 'Address:Port' in line: # ss tools may not be valid valid = True continue elif 'ESTAB' not in line: continue chunks = line.split() local_host, local_port = chunks[3].split(':', 1) remote_host, remote_port = chunks[4].split(':', 1) if which_end == 'remote_port' and int(remote_port) != port: continue if which_end == 'local_port' and int(local_port) != port: continue remotes.add(remote_host) if valid is False: remotes = None return remotes def _sunos_remotes_on(port, which_end): ''' SunOS specific helper function. Returns set of ipv4 host addresses of remote established connections on local or remote tcp port. Parses output of shell 'netstat' to get connections [root@salt-master ~]# netstat -f inet -n TCP: IPv4 Local Address Remote Address Swind Send-Q Rwind Recv-Q State -------------------- -------------------- ----- ------ ----- ------ ----------- 10.0.0.101.4505 10.0.0.1.45329 1064800 0 1055864 0 ESTABLISHED 10.0.0.101.4505 10.0.0.100.50798 1064800 0 1055864 0 ESTABLISHED ''' remotes = set() try: data = subprocess.check_output(['netstat', '-f', 'inet', '-n']) # pylint: disable=minimum-python-version except subprocess.CalledProcessError: log.error('Failed netstat') raise lines = salt.utils.stringutils.to_str(data).split('\n') for line in lines: if 'ESTABLISHED' not in line: continue chunks = line.split() local_host, local_port = chunks[0].rsplit('.', 1) remote_host, remote_port = chunks[1].rsplit('.', 1) if which_end == 'remote_port' and int(remote_port) != port: continue if which_end == 'local_port' and int(local_port) != port: continue remotes.add(remote_host) return remotes def _freebsd_remotes_on(port, which_end): ''' Returns set of ipv4 host addresses of remote established connections on local tcp port port. Parses output of shell 'sockstat' (FreeBSD) to get connections $ sudo sockstat -4 USER COMMAND PID FD PROTO LOCAL ADDRESS FOREIGN ADDRESS root python2.7 1456 29 tcp4 *:4505 *:* root python2.7 1445 17 tcp4 *:4506 *:* root python2.7 1294 14 tcp4 127.0.0.1:11813 127.0.0.1:4505 root python2.7 1294 41 tcp4 127.0.0.1:61115 127.0.0.1:4506 $ sudo sockstat -4 -c -p 4506 USER COMMAND PID FD PROTO LOCAL ADDRESS FOREIGN ADDRESS root python2.7 1294 41 tcp4 127.0.0.1:61115 127.0.0.1:4506 ''' port = int(port) remotes = set() try: cmd = salt.utils.args.shlex_split('sockstat -4 -c -p {0}'.format(port)) data = subprocess.check_output(cmd) # pylint: disable=minimum-python-version except subprocess.CalledProcessError as ex: log.error('Failed "sockstat" with returncode = %s', ex.returncode) raise lines = salt.utils.stringutils.to_str(data).split('\n') for line in lines: chunks = line.split() if not chunks: continue # ['root', 'python2.7', '1456', '37', 'tcp4', # '127.0.0.1:4505-', '127.0.0.1:55703'] # print chunks if 'COMMAND' in chunks[1]: continue # ignore header if len(chunks) < 2: continue # sockstat -4 -c -p 4506 does this with high PIDs: # USER COMMAND PID FD PROTO LOCAL ADDRESS FOREIGN ADDRESS # salt-master python2.781106 35 tcp4 192.168.12.34:4506 192.168.12.45:60143 local = chunks[-2] remote = chunks[-1] lhost, lport = local.split(':') rhost, rport = remote.split(':') if which_end == 'local' and int(lport) != port: # ignore if local port not port continue if which_end == 'remote' and int(rport) != port: # ignore if remote port not port continue remotes.add(rhost) return remotes def _netbsd_remotes_on(port, which_end): ''' Returns set of ipv4 host addresses of remote established connections on local tcp port port. Parses output of shell 'sockstat' (NetBSD) to get connections $ sudo sockstat -4 -n USER COMMAND PID FD PROTO LOCAL ADDRESS FOREIGN ADDRESS root python2.7 1456 29 tcp *.4505 *.* root python2.7 1445 17 tcp *.4506 *.* root python2.7 1294 14 tcp 127.0.0.1.11813 127.0.0.1.4505 root python2.7 1294 41 tcp 127.0.0.1.61115 127.0.0.1.4506 $ sudo sockstat -4 -c -n -p 4506 USER COMMAND PID FD PROTO LOCAL ADDRESS FOREIGN ADDRESS root python2.7 1294 41 tcp 127.0.0.1.61115 127.0.0.1.4506 ''' port = int(port) remotes = set() try: cmd = salt.utils.args.shlex_split('sockstat -4 -c -n -p {0}'.format(port)) data = subprocess.check_output(cmd) # pylint: disable=minimum-python-version except subprocess.CalledProcessError as ex: log.error('Failed "sockstat" with returncode = %s', ex.returncode) raise lines = salt.utils.stringutils.to_str(data).split('\n') for line in lines: chunks = line.split() if not chunks: continue # ['root', 'python2.7', '1456', '37', 'tcp', # '127.0.0.1.4505-', '127.0.0.1.55703'] # print chunks if 'COMMAND' in chunks[1]: continue # ignore header if len(chunks) < 2: continue local = chunks[5].split('.') lport = local.pop() lhost = '.'.join(local) remote = chunks[6].split('.') rport = remote.pop() rhost = '.'.join(remote) if which_end == 'local' and int(lport) != port: # ignore if local port not port continue if which_end == 'remote' and int(rport) != port: # ignore if remote port not port continue remotes.add(rhost) return remotes def _openbsd_remotes_on(port, which_end): ''' OpenBSD specific helper function. Returns set of ipv4 host addresses of remote established connections on local or remote tcp port. Parses output of shell 'netstat' to get connections $ netstat -nf inet Active Internet connections Proto Recv-Q Send-Q Local Address Foreign Address (state) tcp 0 0 10.0.0.101.4505 10.0.0.1.45329 ESTABLISHED tcp 0 0 10.0.0.101.4505 10.0.0.100.50798 ESTABLISHED ''' remotes = set() try: data = subprocess.check_output(['netstat', '-nf', 'inet']) # pylint: disable=minimum-python-version except subprocess.CalledProcessError: log.error('Failed netstat') raise lines = data.split('\n') for line in lines: if 'ESTABLISHED' not in line: continue chunks = line.split() local_host, local_port = chunks[3].rsplit('.', 1) remote_host, remote_port = chunks[4].rsplit('.', 1) if which_end == 'remote_port' and int(remote_port) != port: continue if which_end == 'local_port' and int(local_port) != port: continue remotes.add(remote_host) return remotes def _windows_remotes_on(port, which_end): r''' Windows specific helper function. Returns set of ipv4 host addresses of remote established connections on local or remote tcp port. Parses output of shell 'netstat' to get connections C:\>netstat -n Active Connections Proto Local Address Foreign Address State TCP 10.2.33.17:3007 130.164.12.233:10123 ESTABLISHED TCP 10.2.33.17:3389 130.164.30.5:10378 ESTABLISHED ''' remotes = set() try: data = subprocess.check_output(['netstat', '-n']) # pylint: disable=minimum-python-version except subprocess.CalledProcessError: log.error('Failed netstat') raise lines = salt.utils.stringutils.to_str(data).split('\n') for line in lines: if 'ESTABLISHED' not in line: continue chunks = line.split() local_host, local_port = chunks[1].rsplit(':', 1) remote_host, remote_port = chunks[2].rsplit(':', 1) if which_end == 'remote_port' and int(remote_port) != port: continue if which_end == 'local_port' and int(local_port) != port: continue remotes.add(remote_host) return remotes def _linux_remotes_on(port, which_end): ''' Linux specific helper function. Returns set of ip host addresses of remote established connections on local tcp port port. Parses output of shell 'lsof' to get connections $ sudo lsof -iTCP:4505 -n COMMAND PID USER FD TYPE DEVICE SIZE/OFF NODE NAME Python 9971 root 35u IPv4 0x18a8464a29ca329d 0t0 TCP *:4505 (LISTEN) Python 9971 root 37u IPv4 0x18a8464a29b2b29d 0t0 TCP 127.0.0.1:4505->127.0.0.1:55703 (ESTABLISHED) Python 10152 root 22u IPv4 0x18a8464a29c8cab5 0t0 TCP 127.0.0.1:55703->127.0.0.1:4505 (ESTABLISHED) Python 10153 root 22u IPv4 0x18a8464a29c8cab5 0t0 TCP [fe80::249a]:4505->[fe80::150]:59367 (ESTABLISHED) ''' remotes = set() try: data = subprocess.check_output( ['lsof', '-iTCP:{0:d}'.format(port), '-n', '-P'] # pylint: disable=minimum-python-version ) except subprocess.CalledProcessError as ex: if ex.returncode == 1: # Lsof return 1 if any error was detected, including the failure # to locate Internet addresses, and it is not an error in this case. log.warning('"lsof" returncode = 1, likely no active TCP sessions.') return remotes log.error('Failed "lsof" with returncode = %s', ex.returncode) raise lines = salt.utils.stringutils.to_str(data).split('\n') for line in lines: chunks = line.split() if not chunks: continue # ['Python', '9971', 'root', '37u', 'IPv4', '0x18a8464a29b2b29d', '0t0', # 'TCP', '127.0.0.1:4505->127.0.0.1:55703', '(ESTABLISHED)'] # print chunks if 'COMMAND' in chunks[0]: continue # ignore header if 'ESTABLISHED' not in chunks[-1]: continue # ignore if not ESTABLISHED # '127.0.0.1:4505->127.0.0.1:55703' local, remote = chunks[8].split('->') _, lport = local.rsplit(':', 1) rhost, rport = remote.rsplit(':', 1) if which_end == 'remote_port' and int(rport) != port: continue if which_end == 'local_port' and int(lport) != port: continue remotes.add(rhost.strip("[]")) return remotes def _aix_remotes_on(port, which_end): ''' AIX specific helper function. Returns set of ipv4 host addresses of remote established connections on local or remote tcp port. Parses output of shell 'netstat' to get connections root@la68pp002_pub:/opt/salt/lib/python2.7/site-packages/salt/modules# netstat -f inet -n Active Internet connections Proto Recv-Q Send-Q Local Address Foreign Address (state) tcp4 0 0 172.29.149.95.50093 209.41.78.13.4505 ESTABLISHED tcp4 0 0 127.0.0.1.9514 *.* LISTEN tcp4 0 0 127.0.0.1.9515 *.* LISTEN tcp4 0 0 127.0.0.1.199 127.0.0.1.32779 ESTABLISHED tcp4 0 0 127.0.0.1.32779 127.0.0.1.199 ESTABLISHED tcp4 0 40 172.29.149.95.22 172.29.96.83.41022 ESTABLISHED tcp4 0 0 172.29.149.95.22 172.29.96.83.41032 ESTABLISHED tcp4 0 0 127.0.0.1.32771 127.0.0.1.32775 ESTABLISHED tcp 0 0 127.0.0.1.32775 127.0.0.1.32771 ESTABLISHED tcp4 0 0 127.0.0.1.32771 127.0.0.1.32776 ESTABLISHED tcp 0 0 127.0.0.1.32776 127.0.0.1.32771 ESTABLISHED tcp4 0 0 127.0.0.1.32771 127.0.0.1.32777 ESTABLISHED tcp 0 0 127.0.0.1.32777 127.0.0.1.32771 ESTABLISHED tcp4 0 0 127.0.0.1.32771 127.0.0.1.32778 ESTABLISHED tcp 0 0 127.0.0.1.32778 127.0.0.1.32771 ESTABLISHED ''' remotes = set() try: data = subprocess.check_output(['netstat', '-f', 'inet', '-n']) # pylint: disable=minimum-python-version except subprocess.CalledProcessError: log.error('Failed netstat') raise lines = salt.utils.stringutils.to_str(data).split('\n') for line in lines: if 'ESTABLISHED' not in line: continue chunks = line.split() local_host, local_port = chunks[3].rsplit('.', 1) remote_host, remote_port = chunks[4].rsplit('.', 1) if which_end == 'remote_port' and int(remote_port) != port: continue if which_end == 'local_port' and int(local_port) != port: continue remotes.add(remote_host) return remotes @jinja_filter('gen_mac') def gen_mac(prefix='AC:DE:48'): ''' Generates a MAC address with the defined OUI prefix. Common prefixes: - ``00:16:3E`` -- Xen - ``00:18:51`` -- OpenVZ - ``00:50:56`` -- VMware (manually generated) - ``52:54:00`` -- QEMU/KVM - ``AC:DE:48`` -- PRIVATE References: - http://standards.ieee.org/develop/regauth/oui/oui.txt - https://www.wireshark.org/tools/oui-lookup.html - https://en.wikipedia.org/wiki/MAC_address ''' return '{0}:{1:02X}:{2:02X}:{3:02X}'.format(prefix, random.randint(0, 0xff), random.randint(0, 0xff), random.randint(0, 0xff)) @jinja_filter('mac_str_to_bytes') def mac_str_to_bytes(mac_str): ''' Convert a MAC address string into bytes. Works with or without separators: b1 = mac_str_to_bytes('08:00:27:13:69:77') b2 = mac_str_to_bytes('080027136977') assert b1 == b2 assert isinstance(b1, bytes) ''' if len(mac_str) == 12: pass elif len(mac_str) == 17: sep = mac_str[2] mac_str = mac_str.replace(sep, '') else: raise ValueError('Invalid MAC address') chars = (int(mac_str[s:s+2], 16) for s in range(0, 12, 2)) return bytes(chars) if six.PY3 else b''.join(chr(x) for x in chars) def refresh_dns(): ''' issue #21397: force glibc to re-read resolv.conf ''' try: res_init() except NameError: # Exception raised loading the library, thus res_init is not defined pass @jinja_filter('connection_check') def connection_check(addr, port=80, safe=False, ipv6=None): ''' Provides a convenient alias for the dns_check filter. ''' return dns_check(addr, port, safe, ipv6) @jinja_filter('dns_check') def dns_check(addr, port=80, safe=False, ipv6=None, attempt_connect=True): ''' Return the ip resolved by dns, but do not exit on failure, only raise an exception. Obeys system preference for IPv4/6 address resolution - this can be overridden by the ipv6 flag. Tries to connect to the address before considering it useful. If no address can be reached, the first one resolved is used as a fallback. ''' error = False lookup = addr seen_ipv6 = False family = socket.AF_INET6 if ipv6 else socket.AF_INET if ipv6 is False else socket.AF_UNSPEC hostnames = [] try: refresh_dns() hostnames = socket.getaddrinfo(addr, port, family, socket.SOCK_STREAM) except TypeError: err = ('Attempt to resolve address \'{0}\' failed. Invalid or unresolveable address').format(lookup) raise SaltSystemExit(code=42, msg=err) except socket.error: error = True # If ipv6 is set to True, attempt another lookup using the IPv4 family, # just in case we're attempting to lookup an IPv4 IP # as an IPv6 hostname. if error and ipv6: try: refresh_dns() hostnames = socket.getaddrinfo(addr, port, socket.AF_INET, socket.SOCK_STREAM) except TypeError: err = ('Attempt to resolve address \'{0}\' failed. Invalid or unresolveable address').format(lookup) raise SaltSystemExit(code=42, msg=err) except socket.error: error = True try: if not hostnames: error = True else: resolved = False candidates = [] for h in hostnames: # Input is IP address, passed through unchanged, just return it if h[4][0] == addr: resolved = salt.utils.zeromq.ip_bracket(addr) break candidate_addr = salt.utils.zeromq.ip_bracket(h[4][0]) # sometimes /etc/hosts contains ::1 localhost if not ipv6 and candidate_addr == '[::1]': continue candidates.append(candidate_addr) if attempt_connect: try: s = socket.socket(h[0], socket.SOCK_STREAM) s.settimeout(2) s.connect((candidate_addr.strip('[]'), h[4][1])) s.close() resolved = candidate_addr break except socket.error: pass if not resolved: if candidates: resolved = candidates[0] else: error = True except TypeError: err = ('Attempt to resolve address \'{0}\' failed. Invalid or unresolveable address').format(lookup) raise SaltSystemExit(code=42, msg=err) except socket.error: error = True if error: err = ('DNS lookup or connection check of \'{0}\' failed.').format(addr) if safe: if salt.log.is_console_configured(): # If logging is not configured it also means that either # the master or minion instance calling this hasn't even # started running log.error(err) raise SaltClientError() raise SaltSystemExit(code=42, msg=err) return resolved def parse_host_port(host_port): """ Takes a string argument specifying host or host:port. Returns a (hostname, port) or (ip_address, port) tuple. If no port is given, the second (port) element of the returned tuple will be None. host:port argument, for example, is accepted in the forms of: - hostname - hostname:1234 - hostname.domain.tld - hostname.domain.tld:5678 - [1234::5]:5678 - 1234::5 - 10.11.12.13:4567 - 10.11.12.13 """ host, port = None, None # default _s_ = host_port[:] if _s_[0] == "[": if "]" in host_port: host, _s_ = _s_.lstrip("[").rsplit("]", 1) host = ipaddress.IPv6Address(host).compressed if _s_[0] == ":": port = int(_s_.lstrip(":")) else: if len(_s_) > 1: raise ValueError('found ambiguous "{}" port in "{}"'.format(_s_, host_port)) else: if _s_.count(":") == 1: host, _hostport_separator_, port = _s_.partition(":") try: port = int(port) except ValueError as _e_: log.error('host_port "%s" port value "%s" is not an integer.', host_port, port) raise _e_ else: host = _s_ try: if not isinstance(host, ipaddress._BaseAddress): host_ip = ipaddress.ip_address(host).compressed host = host_ip except ValueError: log.debug('"%s" Not an IP address? Assuming it is a hostname.', host) if host != sanitize_host(host): log.error('bad hostname: "%s"', host) raise ValueError('bad hostname: "{}"'.format(host)) return host, port def is_fqdn(hostname): """ Verify if hostname conforms to be a FQDN. :param hostname: text string with the name of the host :return: bool, True if hostname is correct FQDN, False otherwise """ compliant = re.compile(r"(?!-)[A-Z\d\-\_]{1,63}(?<!-)$", re.IGNORECASE) return "." in hostname and len(hostname) < 0xff and all(compliant.match(x) for x in hostname.rstrip(".").split("."))
saltstack/salt
salt/utils/network.py
get_fqhostname
python
def get_fqhostname(): ''' Returns the fully qualified hostname ''' # try getaddrinfo() fqdn = None try: addrinfo = socket.getaddrinfo( socket.gethostname(), 0, socket.AF_UNSPEC, socket.SOCK_STREAM, socket.SOL_TCP, socket.AI_CANONNAME ) for info in addrinfo: # info struct [family, socktype, proto, canonname, sockaddr] # On Windows `canonname` can be an empty string # This can cause the function to return `None` if len(info) > 3 and info[3]: fqdn = info[3] break except socket.gaierror: pass # NOTE: this used to log.error() but it was later disabled except socket.error as err: log.debug('socket.getaddrinfo() failure while finding fqdn: %s', err) if fqdn is None: fqdn = socket.getfqdn() return fqdn
Returns the fully qualified hostname
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/network.py#L200-L224
null
# -*- coding: utf-8 -*- ''' Define some generic socket functions for network modules ''' # Import python libs from __future__ import absolute_import, unicode_literals, print_function import itertools import os import re import types import socket import logging import platform import random import subprocess from string import ascii_letters, digits # Import 3rd-party libs from salt.ext import six from salt.ext.six.moves import range # pylint: disable=import-error,redefined-builtin # Attempt to import wmi try: import wmi import salt.utils.winapi except ImportError: pass # Import salt libs import salt.utils.args import salt.utils.files import salt.utils.path import salt.utils.platform import salt.utils.stringutils import salt.utils.zeromq from salt._compat import ipaddress from salt.exceptions import SaltClientError, SaltSystemExit from salt.utils.decorators.jinja import jinja_filter from salt.utils.versions import LooseVersion # inet_pton does not exist in Windows, this is a workaround if salt.utils.platform.is_windows(): from salt.ext import win_inet_pton # pylint: disable=unused-import log = logging.getLogger(__name__) try: import ctypes import ctypes.util libc = ctypes.cdll.LoadLibrary(ctypes.util.find_library("c")) res_init = libc.__res_init except (ImportError, OSError, AttributeError, TypeError): pass # pylint: disable=C0103 def sanitize_host(host): ''' Sanitize host string. https://tools.ietf.org/html/rfc1123#section-2.1 ''' RFC952_characters = ascii_letters + digits + ".-" return "".join([c for c in host[0:255] if c in RFC952_characters]) def isportopen(host, port): ''' Return status of a port ''' if not 1 <= int(port) <= 65535: return False sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) out = sock.connect_ex((sanitize_host(host), int(port))) return out def host_to_ips(host): ''' Returns a list of IP addresses of a given hostname or None if not found. ''' ips = [] try: for family, socktype, proto, canonname, sockaddr in socket.getaddrinfo( host, 0, socket.AF_UNSPEC, socket.SOCK_STREAM): if family == socket.AF_INET: ip, port = sockaddr elif family == socket.AF_INET6: ip, port, flow_info, scope_id = sockaddr ips.append(ip) if not ips: ips = None except Exception: ips = None return ips def _generate_minion_id(): ''' Get list of possible host names and convention names. :return: ''' # There are three types of hostnames: # 1. Network names. How host is accessed from the network. # 2. Host aliases. They might be not available in all the network or only locally (/etc/hosts) # 3. Convention names, an internal nodename. class DistinctList(list): ''' List, which allows one to append only distinct objects. Needs to work on Python 2.6, because of collections.OrderedDict only since 2.7 version. Override 'filter()' for custom filtering. ''' localhost_matchers = [r'localhost.*', r'ip6-.*', r'127[.]\d', r'0\.0\.0\.0', r'::1.*', r'ipv6-.*', r'fe00::.*', r'fe02::.*', r'1.0.0.*.ip6.arpa'] def append(self, p_object): if p_object and p_object not in self and not self.filter(p_object): super(DistinctList, self).append(p_object) return self def extend(self, iterable): for obj in iterable: self.append(obj) return self def filter(self, element): 'Returns True if element needs to be filtered' for rgx in self.localhost_matchers: if re.match(rgx, element): return True def first(self): return self and self[0] or None hostname = socket.gethostname() hosts = DistinctList().append( salt.utils.stringutils.to_unicode(socket.getfqdn(salt.utils.stringutils.to_bytes(hostname))) ).append(platform.node()).append(hostname) if not hosts: try: for a_nfo in socket.getaddrinfo(hosts.first() or 'localhost', None, socket.AF_INET, socket.SOCK_RAW, socket.IPPROTO_IP, socket.AI_CANONNAME): if len(a_nfo) > 3: hosts.append(a_nfo[3]) except socket.gaierror: log.warning('Cannot resolve address %s info via socket: %s', hosts.first() or 'localhost (N/A)', socket.gaierror) # Universal method for everywhere (Linux, Slowlaris, Windows etc) for f_name in ('/etc/hostname', '/etc/nodename', '/etc/hosts', r'{win}\system32\drivers\etc\hosts'.format(win=os.getenv('WINDIR'))): try: with salt.utils.files.fopen(f_name) as f_hdl: for line in f_hdl: line = salt.utils.stringutils.to_unicode(line) hst = line.strip().split('#')[0].strip().split() if hst: if hst[0][:4] in ('127.', '::1') or len(hst) == 1: hosts.extend(hst) except IOError: pass # include public and private ipaddresses return hosts.extend([addr for addr in ip_addrs() if not ipaddress.ip_address(addr).is_loopback]) def generate_minion_id(): ''' Return only first element of the hostname from all possible list. :return: ''' try: ret = salt.utils.stringutils.to_unicode(_generate_minion_id().first()) except TypeError: ret = None return ret or 'localhost' def get_socket(addr, type=socket.SOCK_STREAM, proto=0): ''' Return a socket object for the addr IP-version agnostic ''' version = ipaddress.ip_address(addr).version if version == 4: family = socket.AF_INET elif version == 6: family = socket.AF_INET6 return socket.socket(family, type, proto) def ip_to_host(ip): ''' Returns the hostname of a given IP ''' try: hostname, aliaslist, ipaddrlist = socket.gethostbyaddr(ip) except Exception as exc: log.debug('salt.utils.network.ip_to_host(%r) failed: %s', ip, exc) hostname = None return hostname # pylint: enable=C0103 def is_reachable_host(entity_name): ''' Returns a bool telling if the entity name is a reachable host (IPv4/IPv6/FQDN/etc). :param hostname: :return: ''' try: assert type(socket.getaddrinfo(entity_name, 0, 0, 0, 0)) == list ret = True except socket.gaierror: ret = False return ret def is_ip(ip): ''' Returns a bool telling if the passed IP is a valid IPv4 or IPv6 address. ''' return is_ipv4(ip) or is_ipv6(ip) def is_ipv4(ip): ''' Returns a bool telling if the value passed to it was a valid IPv4 address ''' try: return ipaddress.ip_address(ip).version == 4 except ValueError: return False def is_ipv6(ip): ''' Returns a bool telling if the value passed to it was a valid IPv6 address ''' try: return ipaddress.ip_address(ip).version == 6 except ValueError: return False def is_subnet(cidr): ''' Returns a bool telling if the passed string is an IPv4 or IPv6 subnet ''' return is_ipv4_subnet(cidr) or is_ipv6_subnet(cidr) def is_ipv4_subnet(cidr): ''' Returns a bool telling if the passed string is an IPv4 subnet ''' try: return '/' in cidr and bool(ipaddress.IPv4Network(cidr)) except Exception: return False def is_ipv6_subnet(cidr): ''' Returns a bool telling if the passed string is an IPv6 subnet ''' try: return '/' in cidr and bool(ipaddress.IPv6Network(cidr)) except Exception: return False @jinja_filter('is_ip') def is_ip_filter(ip, options=None): ''' Returns a bool telling if the passed IP is a valid IPv4 or IPv6 address. ''' return is_ipv4_filter(ip, options=options) or is_ipv6_filter(ip, options=options) def _ip_options_global(ip_obj, version): return not ip_obj.is_private def _ip_options_multicast(ip_obj, version): return ip_obj.is_multicast def _ip_options_loopback(ip_obj, version): return ip_obj.is_loopback def _ip_options_link_local(ip_obj, version): return ip_obj.is_link_local def _ip_options_private(ip_obj, version): return ip_obj.is_private def _ip_options_reserved(ip_obj, version): return ip_obj.is_reserved def _ip_options_site_local(ip_obj, version): if version == 6: return ip_obj.is_site_local return False def _ip_options_unspecified(ip_obj, version): return ip_obj.is_unspecified def _ip_options(ip_obj, version, options=None): # will process and IP options options_fun_map = { 'global': _ip_options_global, 'link-local': _ip_options_link_local, 'linklocal': _ip_options_link_local, 'll': _ip_options_link_local, 'link_local': _ip_options_link_local, 'loopback': _ip_options_loopback, 'lo': _ip_options_loopback, 'multicast': _ip_options_multicast, 'private': _ip_options_private, 'public': _ip_options_global, 'reserved': _ip_options_reserved, 'site-local': _ip_options_site_local, 'sl': _ip_options_site_local, 'site_local': _ip_options_site_local, 'unspecified': _ip_options_unspecified } if not options: return six.text_type(ip_obj) # IP version already checked options_list = [option.strip() for option in options.split(',')] for option, fun in options_fun_map.items(): if option in options_list: fun_res = fun(ip_obj, version) if not fun_res: return None # stop at first failed test # else continue return six.text_type(ip_obj) def _is_ipv(ip, version, options=None): if not version: version = 4 if version not in (4, 6): return None try: ip_obj = ipaddress.ip_address(ip) except ValueError: # maybe it is an IP network try: ip_obj = ipaddress.ip_interface(ip) except ValueError: # nope, still not :( return None if not ip_obj.version == version: return None # has the right version, let's move on return _ip_options(ip_obj, version, options=options) @jinja_filter('is_ipv4') def is_ipv4_filter(ip, options=None): ''' Returns a bool telling if the value passed to it was a valid IPv4 address. ip The IP address. net: False Consider IP addresses followed by netmask. options CSV of options regarding the nature of the IP address. E.g.: loopback, multicast, private etc. ''' _is_ipv4 = _is_ipv(ip, 4, options=options) return isinstance(_is_ipv4, six.string_types) @jinja_filter('is_ipv6') def is_ipv6_filter(ip, options=None): ''' Returns a bool telling if the value passed to it was a valid IPv6 address. ip The IP address. net: False Consider IP addresses followed by netmask. options CSV of options regarding the nature of the IP address. E.g.: loopback, multicast, private etc. ''' _is_ipv6 = _is_ipv(ip, 6, options=options) return isinstance(_is_ipv6, six.string_types) def _ipv_filter(value, version, options=None): if version not in (4, 6): return if isinstance(value, (six.string_types, six.text_type, six.binary_type)): return _is_ipv(value, version, options=options) # calls is_ipv4 or is_ipv6 for `value` elif isinstance(value, (list, tuple, types.GeneratorType)): # calls is_ipv4 or is_ipv6 for each element in the list # os it filters and returns only those elements having the desired IP version return [ _is_ipv(addr, version, options=options) for addr in value if _is_ipv(addr, version, options=options) is not None ] return None @jinja_filter('ipv4') def ipv4(value, options=None): ''' Filters a list and returns IPv4 values only. ''' return _ipv_filter(value, 4, options=options) @jinja_filter('ipv6') def ipv6(value, options=None): ''' Filters a list and returns IPv6 values only. ''' return _ipv_filter(value, 6, options=options) @jinja_filter('ipaddr') def ipaddr(value, options=None): ''' Filters and returns only valid IP objects. ''' ipv4_obj = ipv4(value, options=options) ipv6_obj = ipv6(value, options=options) if ipv4_obj is None or ipv6_obj is None: # an IP address can be either IPv4 either IPv6 # therefofe if the value passed as arg is not a list, at least one of the calls above will return None # if one of them is none, means that we should return only one of them return ipv4_obj or ipv6_obj # one of them else: return ipv4_obj + ipv6_obj # extend lists def _filter_ipaddr(value, options, version=None): ipaddr_filter_out = None if version: if version == 4: ipaddr_filter_out = ipv4(value, options) elif version == 6: ipaddr_filter_out = ipv6(value, options) else: ipaddr_filter_out = ipaddr(value, options) if not ipaddr_filter_out: return if not isinstance(ipaddr_filter_out, (list, tuple, types.GeneratorType)): ipaddr_filter_out = [ipaddr_filter_out] return ipaddr_filter_out @jinja_filter('ip_host') def ip_host(value, options=None, version=None): ''' Returns the interfaces IP address, e.g.: 192.168.0.1/28. ''' ipaddr_filter_out = _filter_ipaddr(value, options=options, version=version) if not ipaddr_filter_out: return if not isinstance(value, (list, tuple, types.GeneratorType)): return six.text_type(ipaddress.ip_interface(ipaddr_filter_out[0])) return [six.text_type(ipaddress.ip_interface(ip_a)) for ip_a in ipaddr_filter_out] def _network_hosts(ip_addr_entry): return [ six.text_type(host) for host in ipaddress.ip_network(ip_addr_entry, strict=False).hosts() ] @jinja_filter('network_hosts') def network_hosts(value, options=None, version=None): ''' Return the list of hosts within a network. .. note:: When running this command with a large IPv6 network, the command will take a long time to gather all of the hosts. ''' ipaddr_filter_out = _filter_ipaddr(value, options=options, version=version) if not ipaddr_filter_out: return if not isinstance(value, (list, tuple, types.GeneratorType)): return _network_hosts(ipaddr_filter_out[0]) return [ _network_hosts(ip_a) for ip_a in ipaddr_filter_out ] def _network_size(ip_addr_entry): return ipaddress.ip_network(ip_addr_entry, strict=False).num_addresses @jinja_filter('network_size') def network_size(value, options=None, version=None): ''' Get the size of a network. ''' ipaddr_filter_out = _filter_ipaddr(value, options=options, version=version) if not ipaddr_filter_out: return if not isinstance(value, (list, tuple, types.GeneratorType)): return _network_size(ipaddr_filter_out[0]) return [ _network_size(ip_a) for ip_a in ipaddr_filter_out ] def natural_ipv4_netmask(ip, fmt='prefixlen'): ''' Returns the "natural" mask of an IPv4 address ''' bits = _ipv4_to_bits(ip) if bits.startswith('11'): mask = '24' elif bits.startswith('1'): mask = '16' else: mask = '8' if fmt == 'netmask': return cidr_to_ipv4_netmask(mask) else: return '/' + mask def rpad_ipv4_network(ip): ''' Returns an IP network address padded with zeros. Ex: '192.168.3' -> '192.168.3.0' '10.209' -> '10.209.0.0' ''' return '.'.join(itertools.islice(itertools.chain(ip.split('.'), '0000'), 0, 4)) def cidr_to_ipv4_netmask(cidr_bits): ''' Returns an IPv4 netmask ''' try: cidr_bits = int(cidr_bits) if not 1 <= cidr_bits <= 32: return '' except ValueError: return '' netmask = '' for idx in range(4): if idx: netmask += '.' if cidr_bits >= 8: netmask += '255' cidr_bits -= 8 else: netmask += '{0:d}'.format(256 - (2 ** (8 - cidr_bits))) cidr_bits = 0 return netmask def _number_of_set_bits_to_ipv4_netmask(set_bits): # pylint: disable=C0103 ''' Returns an IPv4 netmask from the integer representation of that mask. Ex. 0xffffff00 -> '255.255.255.0' ''' return cidr_to_ipv4_netmask(_number_of_set_bits(set_bits)) # pylint: disable=C0103 def _number_of_set_bits(x): ''' Returns the number of bits that are set in a 32bit int ''' # Taken from http://stackoverflow.com/a/4912729. Many thanks! x -= (x >> 1) & 0x55555555 x = ((x >> 2) & 0x33333333) + (x & 0x33333333) x = ((x >> 4) + x) & 0x0f0f0f0f x += x >> 8 x += x >> 16 return x & 0x0000003f # pylint: enable=C0103 def _interfaces_ip(out): ''' Uses ip to return a dictionary of interfaces with various information about each (up/down state, ip address, netmask, and hwaddr) ''' ret = dict() def parse_network(value, cols): ''' Return a tuple of ip, netmask, broadcast based on the current set of cols ''' brd = None scope = None if '/' in value: # we have a CIDR in this address ip, cidr = value.split('/') # pylint: disable=C0103 else: ip = value # pylint: disable=C0103 cidr = 32 if type_ == 'inet': mask = cidr_to_ipv4_netmask(int(cidr)) if 'brd' in cols: brd = cols[cols.index('brd') + 1] elif type_ == 'inet6': mask = cidr if 'scope' in cols: scope = cols[cols.index('scope') + 1] return (ip, mask, brd, scope) groups = re.compile('\r?\n\\d').split(out) for group in groups: iface = None data = dict() for line in group.splitlines(): if ' ' not in line: continue match = re.match(r'^\d*:\s+([\w.\-]+)(?:@)?([\w.\-]+)?:\s+<(.+)>', line) if match: iface, parent, attrs = match.groups() if 'UP' in attrs.split(','): data['up'] = True else: data['up'] = False if parent: data['parent'] = parent continue cols = line.split() if len(cols) >= 2: type_, value = tuple(cols[0:2]) iflabel = cols[-1:][0] if type_ in ('inet', 'inet6'): if 'secondary' not in cols: ipaddr, netmask, broadcast, scope = parse_network(value, cols) if type_ == 'inet': if 'inet' not in data: data['inet'] = list() addr_obj = dict() addr_obj['address'] = ipaddr addr_obj['netmask'] = netmask addr_obj['broadcast'] = broadcast addr_obj['label'] = iflabel data['inet'].append(addr_obj) elif type_ == 'inet6': if 'inet6' not in data: data['inet6'] = list() addr_obj = dict() addr_obj['address'] = ipaddr addr_obj['prefixlen'] = netmask addr_obj['scope'] = scope data['inet6'].append(addr_obj) else: if 'secondary' not in data: data['secondary'] = list() ip_, mask, brd, scp = parse_network(value, cols) data['secondary'].append({ 'type': type_, 'address': ip_, 'netmask': mask, 'broadcast': brd, 'label': iflabel, }) del ip_, mask, brd, scp elif type_.startswith('link'): data['hwaddr'] = value if iface: ret[iface] = data del iface, data return ret def _interfaces_ifconfig(out): ''' Uses ifconfig to return a dictionary of interfaces with various information about each (up/down state, ip address, netmask, and hwaddr) ''' ret = dict() piface = re.compile(r'^([^\s:]+)') pmac = re.compile('.*?(?:HWaddr|ether|address:|lladdr) ([0-9a-fA-F:]+)') if salt.utils.platform.is_sunos(): pip = re.compile(r'.*?(?:inet\s+)([0-9]+\.[0-9]+\.[0-9]+\.[0-9]+)(.*)') pip6 = re.compile('.*?(?:inet6 )([0-9a-fA-F:]+)') pmask6 = re.compile(r'.*?(?:inet6 [0-9a-fA-F:]+/(\d+)).*') else: pip = re.compile(r'.*?(?:inet addr:|inet [^\d]*)(.*?)\s') pip6 = re.compile('.*?(?:inet6 addr: (.*?)/|inet6 )([0-9a-fA-F:]+)') pmask6 = re.compile(r'.*?(?:inet6 addr: [0-9a-fA-F:]+/(\d+)|prefixlen (\d+))(?: Scope:([a-zA-Z]+)| scopeid (0x[0-9a-fA-F]))?') pmask = re.compile(r'.*?(?:Mask:|netmask )(?:((?:0x)?[0-9a-fA-F]{8})|([\d\.]+))') pupdown = re.compile('UP') pbcast = re.compile(r'.*?(?:Bcast:|broadcast )([\d\.]+)') groups = re.compile('\r?\n(?=\\S)').split(out) for group in groups: data = dict() iface = '' updown = False for line in group.splitlines(): miface = piface.match(line) mmac = pmac.match(line) mip = pip.match(line) mip6 = pip6.match(line) mupdown = pupdown.search(line) if miface: iface = miface.group(1) if mmac: data['hwaddr'] = mmac.group(1) if salt.utils.platform.is_sunos(): expand_mac = [] for chunk in data['hwaddr'].split(':'): expand_mac.append('0{0}'.format(chunk) if len(chunk) < 2 else '{0}'.format(chunk)) data['hwaddr'] = ':'.join(expand_mac) if mip: if 'inet' not in data: data['inet'] = list() addr_obj = dict() addr_obj['address'] = mip.group(1) mmask = pmask.match(line) if mmask: if mmask.group(1): mmask = _number_of_set_bits_to_ipv4_netmask( int(mmask.group(1), 16)) else: mmask = mmask.group(2) addr_obj['netmask'] = mmask mbcast = pbcast.match(line) if mbcast: addr_obj['broadcast'] = mbcast.group(1) data['inet'].append(addr_obj) if mupdown: updown = True if mip6: if 'inet6' not in data: data['inet6'] = list() addr_obj = dict() addr_obj['address'] = mip6.group(1) or mip6.group(2) mmask6 = pmask6.match(line) if mmask6: addr_obj['prefixlen'] = mmask6.group(1) or mmask6.group(2) if not salt.utils.platform.is_sunos(): ipv6scope = mmask6.group(3) or mmask6.group(4) addr_obj['scope'] = ipv6scope.lower() if ipv6scope is not None else ipv6scope # SunOS sometimes has ::/0 as inet6 addr when using addrconf if not salt.utils.platform.is_sunos() \ or addr_obj['address'] != '::' \ and addr_obj['prefixlen'] != 0: data['inet6'].append(addr_obj) data['up'] = updown if iface in ret: # SunOS optimization, where interfaces occur twice in 'ifconfig -a' # output with the same name: for ipv4 and then for ipv6 addr family. # Every instance has it's own 'UP' status and we assume that ipv4 # status determines global interface status. # # merge items with higher priority for older values # after that merge the inet and inet6 sub items for both ret[iface] = dict(list(data.items()) + list(ret[iface].items())) if 'inet' in data: ret[iface]['inet'].extend(x for x in data['inet'] if x not in ret[iface]['inet']) if 'inet6' in data: ret[iface]['inet6'].extend(x for x in data['inet6'] if x not in ret[iface]['inet6']) else: ret[iface] = data del data return ret def linux_interfaces(): ''' Obtain interface information for *NIX/BSD variants ''' ifaces = dict() ip_path = salt.utils.path.which('ip') ifconfig_path = None if ip_path else salt.utils.path.which('ifconfig') if ip_path: cmd1 = subprocess.Popen( '{0} link show'.format(ip_path), shell=True, close_fds=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate()[0] cmd2 = subprocess.Popen( '{0} addr show'.format(ip_path), shell=True, close_fds=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate()[0] ifaces = _interfaces_ip("{0}\n{1}".format( salt.utils.stringutils.to_str(cmd1), salt.utils.stringutils.to_str(cmd2))) elif ifconfig_path: cmd = subprocess.Popen( '{0} -a'.format(ifconfig_path), shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate()[0] ifaces = _interfaces_ifconfig(salt.utils.stringutils.to_str(cmd)) return ifaces def _netbsd_interfaces_ifconfig(out): ''' Uses ifconfig to return a dictionary of interfaces with various information about each (up/down state, ip address, netmask, and hwaddr) ''' ret = dict() piface = re.compile(r'^([^\s:]+)') pmac = re.compile('.*?address: ([0-9a-f:]+)') pip = re.compile(r'.*?inet [^\d]*(.*?)/([\d]*)\s') pip6 = re.compile(r'.*?inet6 ([0-9a-f:]+)%([a-zA-Z0-9]*)/([\d]*)\s') pupdown = re.compile('UP') pbcast = re.compile(r'.*?broadcast ([\d\.]+)') groups = re.compile('\r?\n(?=\\S)').split(out) for group in groups: data = dict() iface = '' updown = False for line in group.splitlines(): miface = piface.match(line) mmac = pmac.match(line) mip = pip.match(line) mip6 = pip6.match(line) mupdown = pupdown.search(line) if miface: iface = miface.group(1) if mmac: data['hwaddr'] = mmac.group(1) if mip: if 'inet' not in data: data['inet'] = list() addr_obj = dict() addr_obj['address'] = mip.group(1) mmask = mip.group(2) if mip.group(2): addr_obj['netmask'] = cidr_to_ipv4_netmask(mip.group(2)) mbcast = pbcast.match(line) if mbcast: addr_obj['broadcast'] = mbcast.group(1) data['inet'].append(addr_obj) if mupdown: updown = True if mip6: if 'inet6' not in data: data['inet6'] = list() addr_obj = dict() addr_obj['address'] = mip6.group(1) mmask6 = mip6.group(3) addr_obj['scope'] = mip6.group(2) addr_obj['prefixlen'] = mip6.group(3) data['inet6'].append(addr_obj) data['up'] = updown ret[iface] = data del data return ret def netbsd_interfaces(): ''' Obtain interface information for NetBSD >= 8 where the ifconfig output diverged from other BSD variants (Netmask is now part of the address) ''' # NetBSD versions prior to 8.0 can still use linux_interfaces() if LooseVersion(os.uname()[2]) < LooseVersion('8.0'): return linux_interfaces() ifconfig_path = salt.utils.path.which('ifconfig') cmd = subprocess.Popen( '{0} -a'.format(ifconfig_path), shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate()[0] return _netbsd_interfaces_ifconfig(salt.utils.stringutils.to_str(cmd)) def _interfaces_ipconfig(out): ''' Returns a dictionary of interfaces with various information about each (up/down state, ip address, netmask, and hwaddr) NOTE: This is not used by any function and may be able to be removed in the future. ''' ifaces = dict() iface = None adapter_iface_regex = re.compile(r'adapter (\S.+):$') for line in out.splitlines(): if not line: continue # TODO what does Windows call Infiniband and 10/40gige adapters if line.startswith('Ethernet'): iface = ifaces[adapter_iface_regex.search(line).group(1)] iface['up'] = True addr = None continue if iface: key, val = line.split(',', 1) key = key.strip(' .') val = val.strip() if addr and key == 'Subnet Mask': addr['netmask'] = val elif key in ('IP Address', 'IPv4 Address'): if 'inet' not in iface: iface['inet'] = list() addr = {'address': val.rstrip('(Preferred)'), 'netmask': None, 'broadcast': None} # TODO find the broadcast iface['inet'].append(addr) elif 'IPv6 Address' in key: if 'inet6' not in iface: iface['inet'] = list() # XXX What is the prefixlen!? addr = {'address': val.rstrip('(Preferred)'), 'prefixlen': None} iface['inet6'].append(addr) elif key == 'Physical Address': iface['hwaddr'] = val elif key == 'Media State': # XXX seen used for tunnel adaptors # might be useful iface['up'] = (val != 'Media disconnected') def win_interfaces(): ''' Obtain interface information for Windows systems ''' with salt.utils.winapi.Com(): c = wmi.WMI() ifaces = {} for iface in c.Win32_NetworkAdapterConfiguration(IPEnabled=1): ifaces[iface.Description] = dict() if iface.MACAddress: ifaces[iface.Description]['hwaddr'] = iface.MACAddress if iface.IPEnabled: ifaces[iface.Description]['up'] = True for ip in iface.IPAddress: if '.' in ip: if 'inet' not in ifaces[iface.Description]: ifaces[iface.Description]['inet'] = [] item = {'address': ip, 'label': iface.Description} if iface.DefaultIPGateway: broadcast = next((i for i in iface.DefaultIPGateway if '.' in i), '') if broadcast: item['broadcast'] = broadcast if iface.IPSubnet: netmask = next((i for i in iface.IPSubnet if '.' in i), '') if netmask: item['netmask'] = netmask ifaces[iface.Description]['inet'].append(item) if ':' in ip: if 'inet6' not in ifaces[iface.Description]: ifaces[iface.Description]['inet6'] = [] item = {'address': ip} if iface.DefaultIPGateway: broadcast = next((i for i in iface.DefaultIPGateway if ':' in i), '') if broadcast: item['broadcast'] = broadcast if iface.IPSubnet: netmask = next((i for i in iface.IPSubnet if ':' in i), '') if netmask: item['netmask'] = netmask ifaces[iface.Description]['inet6'].append(item) else: ifaces[iface.Description]['up'] = False return ifaces def interfaces(): ''' Return a dictionary of information about all the interfaces on the minion ''' if salt.utils.platform.is_windows(): return win_interfaces() elif salt.utils.platform.is_netbsd(): return netbsd_interfaces() else: return linux_interfaces() def get_net_start(ipaddr, netmask): ''' Return the address of the network ''' net = ipaddress.ip_network('{0}/{1}'.format(ipaddr, netmask), strict=False) return six.text_type(net.network_address) def get_net_size(mask): ''' Turns an IPv4 netmask into it's corresponding prefix length (255.255.255.0 -> 24 as in 192.168.1.10/24). ''' binary_str = '' for octet in mask.split('.'): binary_str += bin(int(octet))[2:].zfill(8) return len(binary_str.rstrip('0')) def calc_net(ipaddr, netmask=None): ''' Takes IP (CIDR notation supported) and optionally netmask and returns the network in CIDR-notation. (The IP can be any IP inside the subnet) ''' if netmask is not None: ipaddr = '{0}/{1}'.format(ipaddr, netmask) return six.text_type(ipaddress.ip_network(ipaddr, strict=False)) def _ipv4_to_bits(ipaddr): ''' Accepts an IPv4 dotted quad and returns a string representing its binary counterpart ''' return ''.join([bin(int(x))[2:].rjust(8, '0') for x in ipaddr.split('.')]) def _get_iface_info(iface): ''' If `iface` is available, return interface info and no error, otherwise return no info and log and return an error ''' iface_info = interfaces() if iface in iface_info.keys(): return iface_info, False else: error_msg = ('Interface "{0}" not in available interfaces: "{1}"' ''.format(iface, '", "'.join(iface_info.keys()))) log.error(error_msg) return None, error_msg def _hw_addr_aix(iface): ''' Return the hardware address (a.k.a. MAC address) for a given interface on AIX MAC address not available in through interfaces ''' cmd = subprocess.Popen( 'entstat -d {0} | grep \'Hardware Address\''.format(iface), shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate()[0] if cmd: comps = cmd.split(' ') if len(comps) == 3: mac_addr = comps[2].strip('\'').strip() return mac_addr error_msg = ('Interface "{0}" either not available or does not contain a hardware address'.format(iface)) log.error(error_msg) return error_msg def hw_addr(iface): ''' Return the hardware address (a.k.a. MAC address) for a given interface .. versionchanged:: 2016.11.4 Added support for AIX ''' if salt.utils.platform.is_aix(): return _hw_addr_aix iface_info, error = _get_iface_info(iface) if error is False: return iface_info.get(iface, {}).get('hwaddr', '') else: return error def interface(iface): ''' Return the details of `iface` or an error if it does not exist ''' iface_info, error = _get_iface_info(iface) if error is False: return iface_info.get(iface, {}).get('inet', '') else: return error def interface_ip(iface): ''' Return `iface` IPv4 addr or an error if `iface` does not exist ''' iface_info, error = _get_iface_info(iface) if error is False: inet = iface_info.get(iface, {}).get('inet', None) return inet[0].get('address', '') if inet else '' else: return error def _subnets(proto='inet', interfaces_=None): ''' Returns a list of subnets to which the host belongs ''' if interfaces_ is None: ifaces = interfaces() elif isinstance(interfaces_, list): ifaces = {} for key, value in six.iteritems(interfaces()): if key in interfaces_: ifaces[key] = value else: ifaces = {interfaces_: interfaces().get(interfaces_, {})} ret = set() if proto == 'inet': subnet = 'netmask' dflt_cidr = 32 elif proto == 'inet6': subnet = 'prefixlen' dflt_cidr = 128 else: log.error('Invalid proto %s calling subnets()', proto) return for ip_info in six.itervalues(ifaces): addrs = ip_info.get(proto, []) addrs.extend([addr for addr in ip_info.get('secondary', []) if addr.get('type') == proto]) for intf in addrs: if subnet in intf: intf = ipaddress.ip_interface('{0}/{1}'.format(intf['address'], intf[subnet])) else: intf = ipaddress.ip_interface('{0}/{1}'.format(intf['address'], dflt_cidr)) if not intf.is_loopback: ret.add(intf.network) return [six.text_type(net) for net in sorted(ret)] def subnets(interfaces=None): ''' Returns a list of IPv4 subnets to which the host belongs ''' return _subnets('inet', interfaces_=interfaces) def subnets6(): ''' Returns a list of IPv6 subnets to which the host belongs ''' return _subnets('inet6') def in_subnet(cidr, addr=None): ''' Returns True if host or (any of) addrs is within specified subnet, otherwise False ''' try: cidr = ipaddress.ip_network(cidr) except ValueError: log.error('Invalid CIDR \'%s\'', cidr) return False if addr is None: addr = ip_addrs() addr.extend(ip_addrs6()) elif not isinstance(addr, (list, tuple)): addr = (addr,) return any(ipaddress.ip_address(item) in cidr for item in addr) def _ip_addrs(interface=None, include_loopback=False, interface_data=None, proto='inet'): ''' Return the full list of IP adresses matching the criteria proto = inet|inet6 ''' ret = set() ifaces = interface_data \ if isinstance(interface_data, dict) \ else interfaces() if interface is None: target_ifaces = ifaces else: target_ifaces = dict([(k, v) for k, v in six.iteritems(ifaces) if k == interface]) if not target_ifaces: log.error('Interface %s not found.', interface) for ip_info in six.itervalues(target_ifaces): addrs = ip_info.get(proto, []) addrs.extend([addr for addr in ip_info.get('secondary', []) if addr.get('type') == proto]) for addr in addrs: addr = ipaddress.ip_address(addr.get('address')) if not addr.is_loopback or include_loopback: ret.add(addr) return [six.text_type(addr) for addr in sorted(ret)] def ip_addrs(interface=None, include_loopback=False, interface_data=None): ''' Returns a list of IPv4 addresses assigned to the host. 127.0.0.1 is ignored, unless 'include_loopback=True' is indicated. If 'interface' is provided, then only IP addresses from that interface will be returned. ''' return _ip_addrs(interface, include_loopback, interface_data, 'inet') def ip_addrs6(interface=None, include_loopback=False, interface_data=None): ''' Returns a list of IPv6 addresses assigned to the host. ::1 is ignored, unless 'include_loopback=True' is indicated. If 'interface' is provided, then only IP addresses from that interface will be returned. ''' return _ip_addrs(interface, include_loopback, interface_data, 'inet6') def hex2ip(hex_ip, invert=False): ''' Convert a hex string to an ip, if a failure occurs the original hex is returned. If 'invert=True' assume that ip from /proc/net/<proto> ''' if len(hex_ip) == 32: # ipv6 ip = [] for i in range(0, 32, 8): ip_part = hex_ip[i:i + 8] ip_part = [ip_part[x:x + 2] for x in range(0, 8, 2)] if invert: ip.append("{0[3]}{0[2]}:{0[1]}{0[0]}".format(ip_part)) else: ip.append("{0[0]}{0[1]}:{0[2]}{0[3]}".format(ip_part)) try: address = ipaddress.IPv6Address(":".join(ip)) if address.ipv4_mapped: return str(address.ipv4_mapped) else: return address.compressed except ipaddress.AddressValueError as ex: log.error('hex2ip - ipv6 address error: %s', ex) return hex_ip try: hip = int(hex_ip, 16) except ValueError: return hex_ip if invert: return '{3}.{2}.{1}.{0}'.format(hip >> 24 & 255, hip >> 16 & 255, hip >> 8 & 255, hip & 255) return '{0}.{1}.{2}.{3}'.format(hip >> 24 & 255, hip >> 16 & 255, hip >> 8 & 255, hip & 255) def mac2eui64(mac, prefix=None): ''' Convert a MAC address to a EUI64 identifier or, with prefix provided, a full IPv6 address ''' # http://tools.ietf.org/html/rfc4291#section-2.5.1 eui64 = re.sub(r'[.:-]', '', mac).lower() eui64 = eui64[0:6] + 'fffe' + eui64[6:] eui64 = hex(int(eui64[0:2], 16) | 2)[2:].zfill(2) + eui64[2:] if prefix is None: return ':'.join(re.findall(r'.{4}', eui64)) else: try: net = ipaddress.ip_network(prefix, strict=False) euil = int('0x{0}'.format(eui64), 16) return '{0}/{1}'.format(net[euil], net.prefixlen) except Exception: return def active_tcp(): ''' Return a dict describing all active tcp connections as quickly as possible ''' ret = {} for statf in ['/proc/net/tcp', '/proc/net/tcp6']: if os.path.isfile(statf): with salt.utils.files.fopen(statf, 'rb') as fp_: for line in fp_: line = salt.utils.stringutils.to_unicode(line) if line.strip().startswith('sl'): continue iret = _parse_tcp_line(line) sl = next(iter(iret)) if iret[sl]['state'] == 1: # 1 is ESTABLISHED del iret[sl]['state'] ret[len(ret)] = iret[sl] return ret def local_port_tcp(port): ''' Return a set of remote ip addrs attached to the specified local port ''' ret = _remotes_on(port, 'local_port') return ret def remote_port_tcp(port): ''' Return a set of ip addrs the current host is connected to on given port ''' ret = _remotes_on(port, 'remote_port') return ret def _remotes_on(port, which_end): ''' Return a set of ip addrs active tcp connections ''' port = int(port) ret = _netlink_tool_remote_on(port, which_end) if ret is not None: return ret ret = set() proc_available = False for statf in ['/proc/net/tcp', '/proc/net/tcp6']: if os.path.isfile(statf): proc_available = True with salt.utils.files.fopen(statf, 'r') as fp_: for line in fp_: line = salt.utils.stringutils.to_unicode(line) if line.strip().startswith('sl'): continue iret = _parse_tcp_line(line) sl = next(iter(iret)) if iret[sl][which_end] == port and iret[sl]['state'] == 1: # 1 is ESTABLISHED ret.add(iret[sl]['remote_addr']) if not proc_available: # Fallback to use OS specific tools if salt.utils.platform.is_sunos(): return _sunos_remotes_on(port, which_end) if salt.utils.platform.is_freebsd(): return _freebsd_remotes_on(port, which_end) if salt.utils.platform.is_netbsd(): return _netbsd_remotes_on(port, which_end) if salt.utils.platform.is_openbsd(): return _openbsd_remotes_on(port, which_end) if salt.utils.platform.is_windows(): return _windows_remotes_on(port, which_end) if salt.utils.platform.is_aix(): return _aix_remotes_on(port, which_end) return _linux_remotes_on(port, which_end) return ret def _parse_tcp_line(line): ''' Parse a single line from the contents of /proc/net/tcp or /proc/net/tcp6 ''' ret = {} comps = line.strip().split() sl = comps[0].rstrip(':') ret[sl] = {} l_addr, l_port = comps[1].split(':') r_addr, r_port = comps[2].split(':') ret[sl]['local_addr'] = hex2ip(l_addr, True) ret[sl]['local_port'] = int(l_port, 16) ret[sl]['remote_addr'] = hex2ip(r_addr, True) ret[sl]['remote_port'] = int(r_port, 16) ret[sl]['state'] = int(comps[3], 16) return ret def _netlink_tool_remote_on(port, which_end): ''' Returns set of ipv4 host addresses of remote established connections on local or remote tcp port. Parses output of shell 'ss' to get connections [root@salt-master ~]# ss -ant State Recv-Q Send-Q Local Address:Port Peer Address:Port LISTEN 0 511 *:80 *:* LISTEN 0 128 *:22 *:* ESTAB 0 0 127.0.0.1:56726 127.0.0.1:4505 ''' remotes = set() valid = False try: data = subprocess.check_output(['ss', '-ant']) # pylint: disable=minimum-python-version except subprocess.CalledProcessError: log.error('Failed ss') raise except OSError: # not command "No such file or directory" return None lines = salt.utils.stringutils.to_str(data).split('\n') for line in lines: if 'Address:Port' in line: # ss tools may not be valid valid = True continue elif 'ESTAB' not in line: continue chunks = line.split() local_host, local_port = chunks[3].split(':', 1) remote_host, remote_port = chunks[4].split(':', 1) if which_end == 'remote_port' and int(remote_port) != port: continue if which_end == 'local_port' and int(local_port) != port: continue remotes.add(remote_host) if valid is False: remotes = None return remotes def _sunos_remotes_on(port, which_end): ''' SunOS specific helper function. Returns set of ipv4 host addresses of remote established connections on local or remote tcp port. Parses output of shell 'netstat' to get connections [root@salt-master ~]# netstat -f inet -n TCP: IPv4 Local Address Remote Address Swind Send-Q Rwind Recv-Q State -------------------- -------------------- ----- ------ ----- ------ ----------- 10.0.0.101.4505 10.0.0.1.45329 1064800 0 1055864 0 ESTABLISHED 10.0.0.101.4505 10.0.0.100.50798 1064800 0 1055864 0 ESTABLISHED ''' remotes = set() try: data = subprocess.check_output(['netstat', '-f', 'inet', '-n']) # pylint: disable=minimum-python-version except subprocess.CalledProcessError: log.error('Failed netstat') raise lines = salt.utils.stringutils.to_str(data).split('\n') for line in lines: if 'ESTABLISHED' not in line: continue chunks = line.split() local_host, local_port = chunks[0].rsplit('.', 1) remote_host, remote_port = chunks[1].rsplit('.', 1) if which_end == 'remote_port' and int(remote_port) != port: continue if which_end == 'local_port' and int(local_port) != port: continue remotes.add(remote_host) return remotes def _freebsd_remotes_on(port, which_end): ''' Returns set of ipv4 host addresses of remote established connections on local tcp port port. Parses output of shell 'sockstat' (FreeBSD) to get connections $ sudo sockstat -4 USER COMMAND PID FD PROTO LOCAL ADDRESS FOREIGN ADDRESS root python2.7 1456 29 tcp4 *:4505 *:* root python2.7 1445 17 tcp4 *:4506 *:* root python2.7 1294 14 tcp4 127.0.0.1:11813 127.0.0.1:4505 root python2.7 1294 41 tcp4 127.0.0.1:61115 127.0.0.1:4506 $ sudo sockstat -4 -c -p 4506 USER COMMAND PID FD PROTO LOCAL ADDRESS FOREIGN ADDRESS root python2.7 1294 41 tcp4 127.0.0.1:61115 127.0.0.1:4506 ''' port = int(port) remotes = set() try: cmd = salt.utils.args.shlex_split('sockstat -4 -c -p {0}'.format(port)) data = subprocess.check_output(cmd) # pylint: disable=minimum-python-version except subprocess.CalledProcessError as ex: log.error('Failed "sockstat" with returncode = %s', ex.returncode) raise lines = salt.utils.stringutils.to_str(data).split('\n') for line in lines: chunks = line.split() if not chunks: continue # ['root', 'python2.7', '1456', '37', 'tcp4', # '127.0.0.1:4505-', '127.0.0.1:55703'] # print chunks if 'COMMAND' in chunks[1]: continue # ignore header if len(chunks) < 2: continue # sockstat -4 -c -p 4506 does this with high PIDs: # USER COMMAND PID FD PROTO LOCAL ADDRESS FOREIGN ADDRESS # salt-master python2.781106 35 tcp4 192.168.12.34:4506 192.168.12.45:60143 local = chunks[-2] remote = chunks[-1] lhost, lport = local.split(':') rhost, rport = remote.split(':') if which_end == 'local' and int(lport) != port: # ignore if local port not port continue if which_end == 'remote' and int(rport) != port: # ignore if remote port not port continue remotes.add(rhost) return remotes def _netbsd_remotes_on(port, which_end): ''' Returns set of ipv4 host addresses of remote established connections on local tcp port port. Parses output of shell 'sockstat' (NetBSD) to get connections $ sudo sockstat -4 -n USER COMMAND PID FD PROTO LOCAL ADDRESS FOREIGN ADDRESS root python2.7 1456 29 tcp *.4505 *.* root python2.7 1445 17 tcp *.4506 *.* root python2.7 1294 14 tcp 127.0.0.1.11813 127.0.0.1.4505 root python2.7 1294 41 tcp 127.0.0.1.61115 127.0.0.1.4506 $ sudo sockstat -4 -c -n -p 4506 USER COMMAND PID FD PROTO LOCAL ADDRESS FOREIGN ADDRESS root python2.7 1294 41 tcp 127.0.0.1.61115 127.0.0.1.4506 ''' port = int(port) remotes = set() try: cmd = salt.utils.args.shlex_split('sockstat -4 -c -n -p {0}'.format(port)) data = subprocess.check_output(cmd) # pylint: disable=minimum-python-version except subprocess.CalledProcessError as ex: log.error('Failed "sockstat" with returncode = %s', ex.returncode) raise lines = salt.utils.stringutils.to_str(data).split('\n') for line in lines: chunks = line.split() if not chunks: continue # ['root', 'python2.7', '1456', '37', 'tcp', # '127.0.0.1.4505-', '127.0.0.1.55703'] # print chunks if 'COMMAND' in chunks[1]: continue # ignore header if len(chunks) < 2: continue local = chunks[5].split('.') lport = local.pop() lhost = '.'.join(local) remote = chunks[6].split('.') rport = remote.pop() rhost = '.'.join(remote) if which_end == 'local' and int(lport) != port: # ignore if local port not port continue if which_end == 'remote' and int(rport) != port: # ignore if remote port not port continue remotes.add(rhost) return remotes def _openbsd_remotes_on(port, which_end): ''' OpenBSD specific helper function. Returns set of ipv4 host addresses of remote established connections on local or remote tcp port. Parses output of shell 'netstat' to get connections $ netstat -nf inet Active Internet connections Proto Recv-Q Send-Q Local Address Foreign Address (state) tcp 0 0 10.0.0.101.4505 10.0.0.1.45329 ESTABLISHED tcp 0 0 10.0.0.101.4505 10.0.0.100.50798 ESTABLISHED ''' remotes = set() try: data = subprocess.check_output(['netstat', '-nf', 'inet']) # pylint: disable=minimum-python-version except subprocess.CalledProcessError: log.error('Failed netstat') raise lines = data.split('\n') for line in lines: if 'ESTABLISHED' not in line: continue chunks = line.split() local_host, local_port = chunks[3].rsplit('.', 1) remote_host, remote_port = chunks[4].rsplit('.', 1) if which_end == 'remote_port' and int(remote_port) != port: continue if which_end == 'local_port' and int(local_port) != port: continue remotes.add(remote_host) return remotes def _windows_remotes_on(port, which_end): r''' Windows specific helper function. Returns set of ipv4 host addresses of remote established connections on local or remote tcp port. Parses output of shell 'netstat' to get connections C:\>netstat -n Active Connections Proto Local Address Foreign Address State TCP 10.2.33.17:3007 130.164.12.233:10123 ESTABLISHED TCP 10.2.33.17:3389 130.164.30.5:10378 ESTABLISHED ''' remotes = set() try: data = subprocess.check_output(['netstat', '-n']) # pylint: disable=minimum-python-version except subprocess.CalledProcessError: log.error('Failed netstat') raise lines = salt.utils.stringutils.to_str(data).split('\n') for line in lines: if 'ESTABLISHED' not in line: continue chunks = line.split() local_host, local_port = chunks[1].rsplit(':', 1) remote_host, remote_port = chunks[2].rsplit(':', 1) if which_end == 'remote_port' and int(remote_port) != port: continue if which_end == 'local_port' and int(local_port) != port: continue remotes.add(remote_host) return remotes def _linux_remotes_on(port, which_end): ''' Linux specific helper function. Returns set of ip host addresses of remote established connections on local tcp port port. Parses output of shell 'lsof' to get connections $ sudo lsof -iTCP:4505 -n COMMAND PID USER FD TYPE DEVICE SIZE/OFF NODE NAME Python 9971 root 35u IPv4 0x18a8464a29ca329d 0t0 TCP *:4505 (LISTEN) Python 9971 root 37u IPv4 0x18a8464a29b2b29d 0t0 TCP 127.0.0.1:4505->127.0.0.1:55703 (ESTABLISHED) Python 10152 root 22u IPv4 0x18a8464a29c8cab5 0t0 TCP 127.0.0.1:55703->127.0.0.1:4505 (ESTABLISHED) Python 10153 root 22u IPv4 0x18a8464a29c8cab5 0t0 TCP [fe80::249a]:4505->[fe80::150]:59367 (ESTABLISHED) ''' remotes = set() try: data = subprocess.check_output( ['lsof', '-iTCP:{0:d}'.format(port), '-n', '-P'] # pylint: disable=minimum-python-version ) except subprocess.CalledProcessError as ex: if ex.returncode == 1: # Lsof return 1 if any error was detected, including the failure # to locate Internet addresses, and it is not an error in this case. log.warning('"lsof" returncode = 1, likely no active TCP sessions.') return remotes log.error('Failed "lsof" with returncode = %s', ex.returncode) raise lines = salt.utils.stringutils.to_str(data).split('\n') for line in lines: chunks = line.split() if not chunks: continue # ['Python', '9971', 'root', '37u', 'IPv4', '0x18a8464a29b2b29d', '0t0', # 'TCP', '127.0.0.1:4505->127.0.0.1:55703', '(ESTABLISHED)'] # print chunks if 'COMMAND' in chunks[0]: continue # ignore header if 'ESTABLISHED' not in chunks[-1]: continue # ignore if not ESTABLISHED # '127.0.0.1:4505->127.0.0.1:55703' local, remote = chunks[8].split('->') _, lport = local.rsplit(':', 1) rhost, rport = remote.rsplit(':', 1) if which_end == 'remote_port' and int(rport) != port: continue if which_end == 'local_port' and int(lport) != port: continue remotes.add(rhost.strip("[]")) return remotes def _aix_remotes_on(port, which_end): ''' AIX specific helper function. Returns set of ipv4 host addresses of remote established connections on local or remote tcp port. Parses output of shell 'netstat' to get connections root@la68pp002_pub:/opt/salt/lib/python2.7/site-packages/salt/modules# netstat -f inet -n Active Internet connections Proto Recv-Q Send-Q Local Address Foreign Address (state) tcp4 0 0 172.29.149.95.50093 209.41.78.13.4505 ESTABLISHED tcp4 0 0 127.0.0.1.9514 *.* LISTEN tcp4 0 0 127.0.0.1.9515 *.* LISTEN tcp4 0 0 127.0.0.1.199 127.0.0.1.32779 ESTABLISHED tcp4 0 0 127.0.0.1.32779 127.0.0.1.199 ESTABLISHED tcp4 0 40 172.29.149.95.22 172.29.96.83.41022 ESTABLISHED tcp4 0 0 172.29.149.95.22 172.29.96.83.41032 ESTABLISHED tcp4 0 0 127.0.0.1.32771 127.0.0.1.32775 ESTABLISHED tcp 0 0 127.0.0.1.32775 127.0.0.1.32771 ESTABLISHED tcp4 0 0 127.0.0.1.32771 127.0.0.1.32776 ESTABLISHED tcp 0 0 127.0.0.1.32776 127.0.0.1.32771 ESTABLISHED tcp4 0 0 127.0.0.1.32771 127.0.0.1.32777 ESTABLISHED tcp 0 0 127.0.0.1.32777 127.0.0.1.32771 ESTABLISHED tcp4 0 0 127.0.0.1.32771 127.0.0.1.32778 ESTABLISHED tcp 0 0 127.0.0.1.32778 127.0.0.1.32771 ESTABLISHED ''' remotes = set() try: data = subprocess.check_output(['netstat', '-f', 'inet', '-n']) # pylint: disable=minimum-python-version except subprocess.CalledProcessError: log.error('Failed netstat') raise lines = salt.utils.stringutils.to_str(data).split('\n') for line in lines: if 'ESTABLISHED' not in line: continue chunks = line.split() local_host, local_port = chunks[3].rsplit('.', 1) remote_host, remote_port = chunks[4].rsplit('.', 1) if which_end == 'remote_port' and int(remote_port) != port: continue if which_end == 'local_port' and int(local_port) != port: continue remotes.add(remote_host) return remotes @jinja_filter('gen_mac') def gen_mac(prefix='AC:DE:48'): ''' Generates a MAC address with the defined OUI prefix. Common prefixes: - ``00:16:3E`` -- Xen - ``00:18:51`` -- OpenVZ - ``00:50:56`` -- VMware (manually generated) - ``52:54:00`` -- QEMU/KVM - ``AC:DE:48`` -- PRIVATE References: - http://standards.ieee.org/develop/regauth/oui/oui.txt - https://www.wireshark.org/tools/oui-lookup.html - https://en.wikipedia.org/wiki/MAC_address ''' return '{0}:{1:02X}:{2:02X}:{3:02X}'.format(prefix, random.randint(0, 0xff), random.randint(0, 0xff), random.randint(0, 0xff)) @jinja_filter('mac_str_to_bytes') def mac_str_to_bytes(mac_str): ''' Convert a MAC address string into bytes. Works with or without separators: b1 = mac_str_to_bytes('08:00:27:13:69:77') b2 = mac_str_to_bytes('080027136977') assert b1 == b2 assert isinstance(b1, bytes) ''' if len(mac_str) == 12: pass elif len(mac_str) == 17: sep = mac_str[2] mac_str = mac_str.replace(sep, '') else: raise ValueError('Invalid MAC address') chars = (int(mac_str[s:s+2], 16) for s in range(0, 12, 2)) return bytes(chars) if six.PY3 else b''.join(chr(x) for x in chars) def refresh_dns(): ''' issue #21397: force glibc to re-read resolv.conf ''' try: res_init() except NameError: # Exception raised loading the library, thus res_init is not defined pass @jinja_filter('connection_check') def connection_check(addr, port=80, safe=False, ipv6=None): ''' Provides a convenient alias for the dns_check filter. ''' return dns_check(addr, port, safe, ipv6) @jinja_filter('dns_check') def dns_check(addr, port=80, safe=False, ipv6=None, attempt_connect=True): ''' Return the ip resolved by dns, but do not exit on failure, only raise an exception. Obeys system preference for IPv4/6 address resolution - this can be overridden by the ipv6 flag. Tries to connect to the address before considering it useful. If no address can be reached, the first one resolved is used as a fallback. ''' error = False lookup = addr seen_ipv6 = False family = socket.AF_INET6 if ipv6 else socket.AF_INET if ipv6 is False else socket.AF_UNSPEC hostnames = [] try: refresh_dns() hostnames = socket.getaddrinfo(addr, port, family, socket.SOCK_STREAM) except TypeError: err = ('Attempt to resolve address \'{0}\' failed. Invalid or unresolveable address').format(lookup) raise SaltSystemExit(code=42, msg=err) except socket.error: error = True # If ipv6 is set to True, attempt another lookup using the IPv4 family, # just in case we're attempting to lookup an IPv4 IP # as an IPv6 hostname. if error and ipv6: try: refresh_dns() hostnames = socket.getaddrinfo(addr, port, socket.AF_INET, socket.SOCK_STREAM) except TypeError: err = ('Attempt to resolve address \'{0}\' failed. Invalid or unresolveable address').format(lookup) raise SaltSystemExit(code=42, msg=err) except socket.error: error = True try: if not hostnames: error = True else: resolved = False candidates = [] for h in hostnames: # Input is IP address, passed through unchanged, just return it if h[4][0] == addr: resolved = salt.utils.zeromq.ip_bracket(addr) break candidate_addr = salt.utils.zeromq.ip_bracket(h[4][0]) # sometimes /etc/hosts contains ::1 localhost if not ipv6 and candidate_addr == '[::1]': continue candidates.append(candidate_addr) if attempt_connect: try: s = socket.socket(h[0], socket.SOCK_STREAM) s.settimeout(2) s.connect((candidate_addr.strip('[]'), h[4][1])) s.close() resolved = candidate_addr break except socket.error: pass if not resolved: if candidates: resolved = candidates[0] else: error = True except TypeError: err = ('Attempt to resolve address \'{0}\' failed. Invalid or unresolveable address').format(lookup) raise SaltSystemExit(code=42, msg=err) except socket.error: error = True if error: err = ('DNS lookup or connection check of \'{0}\' failed.').format(addr) if safe: if salt.log.is_console_configured(): # If logging is not configured it also means that either # the master or minion instance calling this hasn't even # started running log.error(err) raise SaltClientError() raise SaltSystemExit(code=42, msg=err) return resolved def parse_host_port(host_port): """ Takes a string argument specifying host or host:port. Returns a (hostname, port) or (ip_address, port) tuple. If no port is given, the second (port) element of the returned tuple will be None. host:port argument, for example, is accepted in the forms of: - hostname - hostname:1234 - hostname.domain.tld - hostname.domain.tld:5678 - [1234::5]:5678 - 1234::5 - 10.11.12.13:4567 - 10.11.12.13 """ host, port = None, None # default _s_ = host_port[:] if _s_[0] == "[": if "]" in host_port: host, _s_ = _s_.lstrip("[").rsplit("]", 1) host = ipaddress.IPv6Address(host).compressed if _s_[0] == ":": port = int(_s_.lstrip(":")) else: if len(_s_) > 1: raise ValueError('found ambiguous "{}" port in "{}"'.format(_s_, host_port)) else: if _s_.count(":") == 1: host, _hostport_separator_, port = _s_.partition(":") try: port = int(port) except ValueError as _e_: log.error('host_port "%s" port value "%s" is not an integer.', host_port, port) raise _e_ else: host = _s_ try: if not isinstance(host, ipaddress._BaseAddress): host_ip = ipaddress.ip_address(host).compressed host = host_ip except ValueError: log.debug('"%s" Not an IP address? Assuming it is a hostname.', host) if host != sanitize_host(host): log.error('bad hostname: "%s"', host) raise ValueError('bad hostname: "{}"'.format(host)) return host, port def is_fqdn(hostname): """ Verify if hostname conforms to be a FQDN. :param hostname: text string with the name of the host :return: bool, True if hostname is correct FQDN, False otherwise """ compliant = re.compile(r"(?!-)[A-Z\d\-\_]{1,63}(?<!-)$", re.IGNORECASE) return "." in hostname and len(hostname) < 0xff and all(compliant.match(x) for x in hostname.rstrip(".").split("."))
saltstack/salt
salt/utils/network.py
ip_to_host
python
def ip_to_host(ip): ''' Returns the hostname of a given IP ''' try: hostname, aliaslist, ipaddrlist = socket.gethostbyaddr(ip) except Exception as exc: log.debug('salt.utils.network.ip_to_host(%r) failed: %s', ip, exc) hostname = None return hostname
Returns the hostname of a given IP
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/network.py#L227-L236
null
# -*- coding: utf-8 -*- ''' Define some generic socket functions for network modules ''' # Import python libs from __future__ import absolute_import, unicode_literals, print_function import itertools import os import re import types import socket import logging import platform import random import subprocess from string import ascii_letters, digits # Import 3rd-party libs from salt.ext import six from salt.ext.six.moves import range # pylint: disable=import-error,redefined-builtin # Attempt to import wmi try: import wmi import salt.utils.winapi except ImportError: pass # Import salt libs import salt.utils.args import salt.utils.files import salt.utils.path import salt.utils.platform import salt.utils.stringutils import salt.utils.zeromq from salt._compat import ipaddress from salt.exceptions import SaltClientError, SaltSystemExit from salt.utils.decorators.jinja import jinja_filter from salt.utils.versions import LooseVersion # inet_pton does not exist in Windows, this is a workaround if salt.utils.platform.is_windows(): from salt.ext import win_inet_pton # pylint: disable=unused-import log = logging.getLogger(__name__) try: import ctypes import ctypes.util libc = ctypes.cdll.LoadLibrary(ctypes.util.find_library("c")) res_init = libc.__res_init except (ImportError, OSError, AttributeError, TypeError): pass # pylint: disable=C0103 def sanitize_host(host): ''' Sanitize host string. https://tools.ietf.org/html/rfc1123#section-2.1 ''' RFC952_characters = ascii_letters + digits + ".-" return "".join([c for c in host[0:255] if c in RFC952_characters]) def isportopen(host, port): ''' Return status of a port ''' if not 1 <= int(port) <= 65535: return False sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) out = sock.connect_ex((sanitize_host(host), int(port))) return out def host_to_ips(host): ''' Returns a list of IP addresses of a given hostname or None if not found. ''' ips = [] try: for family, socktype, proto, canonname, sockaddr in socket.getaddrinfo( host, 0, socket.AF_UNSPEC, socket.SOCK_STREAM): if family == socket.AF_INET: ip, port = sockaddr elif family == socket.AF_INET6: ip, port, flow_info, scope_id = sockaddr ips.append(ip) if not ips: ips = None except Exception: ips = None return ips def _generate_minion_id(): ''' Get list of possible host names and convention names. :return: ''' # There are three types of hostnames: # 1. Network names. How host is accessed from the network. # 2. Host aliases. They might be not available in all the network or only locally (/etc/hosts) # 3. Convention names, an internal nodename. class DistinctList(list): ''' List, which allows one to append only distinct objects. Needs to work on Python 2.6, because of collections.OrderedDict only since 2.7 version. Override 'filter()' for custom filtering. ''' localhost_matchers = [r'localhost.*', r'ip6-.*', r'127[.]\d', r'0\.0\.0\.0', r'::1.*', r'ipv6-.*', r'fe00::.*', r'fe02::.*', r'1.0.0.*.ip6.arpa'] def append(self, p_object): if p_object and p_object not in self and not self.filter(p_object): super(DistinctList, self).append(p_object) return self def extend(self, iterable): for obj in iterable: self.append(obj) return self def filter(self, element): 'Returns True if element needs to be filtered' for rgx in self.localhost_matchers: if re.match(rgx, element): return True def first(self): return self and self[0] or None hostname = socket.gethostname() hosts = DistinctList().append( salt.utils.stringutils.to_unicode(socket.getfqdn(salt.utils.stringutils.to_bytes(hostname))) ).append(platform.node()).append(hostname) if not hosts: try: for a_nfo in socket.getaddrinfo(hosts.first() or 'localhost', None, socket.AF_INET, socket.SOCK_RAW, socket.IPPROTO_IP, socket.AI_CANONNAME): if len(a_nfo) > 3: hosts.append(a_nfo[3]) except socket.gaierror: log.warning('Cannot resolve address %s info via socket: %s', hosts.first() or 'localhost (N/A)', socket.gaierror) # Universal method for everywhere (Linux, Slowlaris, Windows etc) for f_name in ('/etc/hostname', '/etc/nodename', '/etc/hosts', r'{win}\system32\drivers\etc\hosts'.format(win=os.getenv('WINDIR'))): try: with salt.utils.files.fopen(f_name) as f_hdl: for line in f_hdl: line = salt.utils.stringutils.to_unicode(line) hst = line.strip().split('#')[0].strip().split() if hst: if hst[0][:4] in ('127.', '::1') or len(hst) == 1: hosts.extend(hst) except IOError: pass # include public and private ipaddresses return hosts.extend([addr for addr in ip_addrs() if not ipaddress.ip_address(addr).is_loopback]) def generate_minion_id(): ''' Return only first element of the hostname from all possible list. :return: ''' try: ret = salt.utils.stringutils.to_unicode(_generate_minion_id().first()) except TypeError: ret = None return ret or 'localhost' def get_socket(addr, type=socket.SOCK_STREAM, proto=0): ''' Return a socket object for the addr IP-version agnostic ''' version = ipaddress.ip_address(addr).version if version == 4: family = socket.AF_INET elif version == 6: family = socket.AF_INET6 return socket.socket(family, type, proto) def get_fqhostname(): ''' Returns the fully qualified hostname ''' # try getaddrinfo() fqdn = None try: addrinfo = socket.getaddrinfo( socket.gethostname(), 0, socket.AF_UNSPEC, socket.SOCK_STREAM, socket.SOL_TCP, socket.AI_CANONNAME ) for info in addrinfo: # info struct [family, socktype, proto, canonname, sockaddr] # On Windows `canonname` can be an empty string # This can cause the function to return `None` if len(info) > 3 and info[3]: fqdn = info[3] break except socket.gaierror: pass # NOTE: this used to log.error() but it was later disabled except socket.error as err: log.debug('socket.getaddrinfo() failure while finding fqdn: %s', err) if fqdn is None: fqdn = socket.getfqdn() return fqdn # pylint: enable=C0103 def is_reachable_host(entity_name): ''' Returns a bool telling if the entity name is a reachable host (IPv4/IPv6/FQDN/etc). :param hostname: :return: ''' try: assert type(socket.getaddrinfo(entity_name, 0, 0, 0, 0)) == list ret = True except socket.gaierror: ret = False return ret def is_ip(ip): ''' Returns a bool telling if the passed IP is a valid IPv4 or IPv6 address. ''' return is_ipv4(ip) or is_ipv6(ip) def is_ipv4(ip): ''' Returns a bool telling if the value passed to it was a valid IPv4 address ''' try: return ipaddress.ip_address(ip).version == 4 except ValueError: return False def is_ipv6(ip): ''' Returns a bool telling if the value passed to it was a valid IPv6 address ''' try: return ipaddress.ip_address(ip).version == 6 except ValueError: return False def is_subnet(cidr): ''' Returns a bool telling if the passed string is an IPv4 or IPv6 subnet ''' return is_ipv4_subnet(cidr) or is_ipv6_subnet(cidr) def is_ipv4_subnet(cidr): ''' Returns a bool telling if the passed string is an IPv4 subnet ''' try: return '/' in cidr and bool(ipaddress.IPv4Network(cidr)) except Exception: return False def is_ipv6_subnet(cidr): ''' Returns a bool telling if the passed string is an IPv6 subnet ''' try: return '/' in cidr and bool(ipaddress.IPv6Network(cidr)) except Exception: return False @jinja_filter('is_ip') def is_ip_filter(ip, options=None): ''' Returns a bool telling if the passed IP is a valid IPv4 or IPv6 address. ''' return is_ipv4_filter(ip, options=options) or is_ipv6_filter(ip, options=options) def _ip_options_global(ip_obj, version): return not ip_obj.is_private def _ip_options_multicast(ip_obj, version): return ip_obj.is_multicast def _ip_options_loopback(ip_obj, version): return ip_obj.is_loopback def _ip_options_link_local(ip_obj, version): return ip_obj.is_link_local def _ip_options_private(ip_obj, version): return ip_obj.is_private def _ip_options_reserved(ip_obj, version): return ip_obj.is_reserved def _ip_options_site_local(ip_obj, version): if version == 6: return ip_obj.is_site_local return False def _ip_options_unspecified(ip_obj, version): return ip_obj.is_unspecified def _ip_options(ip_obj, version, options=None): # will process and IP options options_fun_map = { 'global': _ip_options_global, 'link-local': _ip_options_link_local, 'linklocal': _ip_options_link_local, 'll': _ip_options_link_local, 'link_local': _ip_options_link_local, 'loopback': _ip_options_loopback, 'lo': _ip_options_loopback, 'multicast': _ip_options_multicast, 'private': _ip_options_private, 'public': _ip_options_global, 'reserved': _ip_options_reserved, 'site-local': _ip_options_site_local, 'sl': _ip_options_site_local, 'site_local': _ip_options_site_local, 'unspecified': _ip_options_unspecified } if not options: return six.text_type(ip_obj) # IP version already checked options_list = [option.strip() for option in options.split(',')] for option, fun in options_fun_map.items(): if option in options_list: fun_res = fun(ip_obj, version) if not fun_res: return None # stop at first failed test # else continue return six.text_type(ip_obj) def _is_ipv(ip, version, options=None): if not version: version = 4 if version not in (4, 6): return None try: ip_obj = ipaddress.ip_address(ip) except ValueError: # maybe it is an IP network try: ip_obj = ipaddress.ip_interface(ip) except ValueError: # nope, still not :( return None if not ip_obj.version == version: return None # has the right version, let's move on return _ip_options(ip_obj, version, options=options) @jinja_filter('is_ipv4') def is_ipv4_filter(ip, options=None): ''' Returns a bool telling if the value passed to it was a valid IPv4 address. ip The IP address. net: False Consider IP addresses followed by netmask. options CSV of options regarding the nature of the IP address. E.g.: loopback, multicast, private etc. ''' _is_ipv4 = _is_ipv(ip, 4, options=options) return isinstance(_is_ipv4, six.string_types) @jinja_filter('is_ipv6') def is_ipv6_filter(ip, options=None): ''' Returns a bool telling if the value passed to it was a valid IPv6 address. ip The IP address. net: False Consider IP addresses followed by netmask. options CSV of options regarding the nature of the IP address. E.g.: loopback, multicast, private etc. ''' _is_ipv6 = _is_ipv(ip, 6, options=options) return isinstance(_is_ipv6, six.string_types) def _ipv_filter(value, version, options=None): if version not in (4, 6): return if isinstance(value, (six.string_types, six.text_type, six.binary_type)): return _is_ipv(value, version, options=options) # calls is_ipv4 or is_ipv6 for `value` elif isinstance(value, (list, tuple, types.GeneratorType)): # calls is_ipv4 or is_ipv6 for each element in the list # os it filters and returns only those elements having the desired IP version return [ _is_ipv(addr, version, options=options) for addr in value if _is_ipv(addr, version, options=options) is not None ] return None @jinja_filter('ipv4') def ipv4(value, options=None): ''' Filters a list and returns IPv4 values only. ''' return _ipv_filter(value, 4, options=options) @jinja_filter('ipv6') def ipv6(value, options=None): ''' Filters a list and returns IPv6 values only. ''' return _ipv_filter(value, 6, options=options) @jinja_filter('ipaddr') def ipaddr(value, options=None): ''' Filters and returns only valid IP objects. ''' ipv4_obj = ipv4(value, options=options) ipv6_obj = ipv6(value, options=options) if ipv4_obj is None or ipv6_obj is None: # an IP address can be either IPv4 either IPv6 # therefofe if the value passed as arg is not a list, at least one of the calls above will return None # if one of them is none, means that we should return only one of them return ipv4_obj or ipv6_obj # one of them else: return ipv4_obj + ipv6_obj # extend lists def _filter_ipaddr(value, options, version=None): ipaddr_filter_out = None if version: if version == 4: ipaddr_filter_out = ipv4(value, options) elif version == 6: ipaddr_filter_out = ipv6(value, options) else: ipaddr_filter_out = ipaddr(value, options) if not ipaddr_filter_out: return if not isinstance(ipaddr_filter_out, (list, tuple, types.GeneratorType)): ipaddr_filter_out = [ipaddr_filter_out] return ipaddr_filter_out @jinja_filter('ip_host') def ip_host(value, options=None, version=None): ''' Returns the interfaces IP address, e.g.: 192.168.0.1/28. ''' ipaddr_filter_out = _filter_ipaddr(value, options=options, version=version) if not ipaddr_filter_out: return if not isinstance(value, (list, tuple, types.GeneratorType)): return six.text_type(ipaddress.ip_interface(ipaddr_filter_out[0])) return [six.text_type(ipaddress.ip_interface(ip_a)) for ip_a in ipaddr_filter_out] def _network_hosts(ip_addr_entry): return [ six.text_type(host) for host in ipaddress.ip_network(ip_addr_entry, strict=False).hosts() ] @jinja_filter('network_hosts') def network_hosts(value, options=None, version=None): ''' Return the list of hosts within a network. .. note:: When running this command with a large IPv6 network, the command will take a long time to gather all of the hosts. ''' ipaddr_filter_out = _filter_ipaddr(value, options=options, version=version) if not ipaddr_filter_out: return if not isinstance(value, (list, tuple, types.GeneratorType)): return _network_hosts(ipaddr_filter_out[0]) return [ _network_hosts(ip_a) for ip_a in ipaddr_filter_out ] def _network_size(ip_addr_entry): return ipaddress.ip_network(ip_addr_entry, strict=False).num_addresses @jinja_filter('network_size') def network_size(value, options=None, version=None): ''' Get the size of a network. ''' ipaddr_filter_out = _filter_ipaddr(value, options=options, version=version) if not ipaddr_filter_out: return if not isinstance(value, (list, tuple, types.GeneratorType)): return _network_size(ipaddr_filter_out[0]) return [ _network_size(ip_a) for ip_a in ipaddr_filter_out ] def natural_ipv4_netmask(ip, fmt='prefixlen'): ''' Returns the "natural" mask of an IPv4 address ''' bits = _ipv4_to_bits(ip) if bits.startswith('11'): mask = '24' elif bits.startswith('1'): mask = '16' else: mask = '8' if fmt == 'netmask': return cidr_to_ipv4_netmask(mask) else: return '/' + mask def rpad_ipv4_network(ip): ''' Returns an IP network address padded with zeros. Ex: '192.168.3' -> '192.168.3.0' '10.209' -> '10.209.0.0' ''' return '.'.join(itertools.islice(itertools.chain(ip.split('.'), '0000'), 0, 4)) def cidr_to_ipv4_netmask(cidr_bits): ''' Returns an IPv4 netmask ''' try: cidr_bits = int(cidr_bits) if not 1 <= cidr_bits <= 32: return '' except ValueError: return '' netmask = '' for idx in range(4): if idx: netmask += '.' if cidr_bits >= 8: netmask += '255' cidr_bits -= 8 else: netmask += '{0:d}'.format(256 - (2 ** (8 - cidr_bits))) cidr_bits = 0 return netmask def _number_of_set_bits_to_ipv4_netmask(set_bits): # pylint: disable=C0103 ''' Returns an IPv4 netmask from the integer representation of that mask. Ex. 0xffffff00 -> '255.255.255.0' ''' return cidr_to_ipv4_netmask(_number_of_set_bits(set_bits)) # pylint: disable=C0103 def _number_of_set_bits(x): ''' Returns the number of bits that are set in a 32bit int ''' # Taken from http://stackoverflow.com/a/4912729. Many thanks! x -= (x >> 1) & 0x55555555 x = ((x >> 2) & 0x33333333) + (x & 0x33333333) x = ((x >> 4) + x) & 0x0f0f0f0f x += x >> 8 x += x >> 16 return x & 0x0000003f # pylint: enable=C0103 def _interfaces_ip(out): ''' Uses ip to return a dictionary of interfaces with various information about each (up/down state, ip address, netmask, and hwaddr) ''' ret = dict() def parse_network(value, cols): ''' Return a tuple of ip, netmask, broadcast based on the current set of cols ''' brd = None scope = None if '/' in value: # we have a CIDR in this address ip, cidr = value.split('/') # pylint: disable=C0103 else: ip = value # pylint: disable=C0103 cidr = 32 if type_ == 'inet': mask = cidr_to_ipv4_netmask(int(cidr)) if 'brd' in cols: brd = cols[cols.index('brd') + 1] elif type_ == 'inet6': mask = cidr if 'scope' in cols: scope = cols[cols.index('scope') + 1] return (ip, mask, brd, scope) groups = re.compile('\r?\n\\d').split(out) for group in groups: iface = None data = dict() for line in group.splitlines(): if ' ' not in line: continue match = re.match(r'^\d*:\s+([\w.\-]+)(?:@)?([\w.\-]+)?:\s+<(.+)>', line) if match: iface, parent, attrs = match.groups() if 'UP' in attrs.split(','): data['up'] = True else: data['up'] = False if parent: data['parent'] = parent continue cols = line.split() if len(cols) >= 2: type_, value = tuple(cols[0:2]) iflabel = cols[-1:][0] if type_ in ('inet', 'inet6'): if 'secondary' not in cols: ipaddr, netmask, broadcast, scope = parse_network(value, cols) if type_ == 'inet': if 'inet' not in data: data['inet'] = list() addr_obj = dict() addr_obj['address'] = ipaddr addr_obj['netmask'] = netmask addr_obj['broadcast'] = broadcast addr_obj['label'] = iflabel data['inet'].append(addr_obj) elif type_ == 'inet6': if 'inet6' not in data: data['inet6'] = list() addr_obj = dict() addr_obj['address'] = ipaddr addr_obj['prefixlen'] = netmask addr_obj['scope'] = scope data['inet6'].append(addr_obj) else: if 'secondary' not in data: data['secondary'] = list() ip_, mask, brd, scp = parse_network(value, cols) data['secondary'].append({ 'type': type_, 'address': ip_, 'netmask': mask, 'broadcast': brd, 'label': iflabel, }) del ip_, mask, brd, scp elif type_.startswith('link'): data['hwaddr'] = value if iface: ret[iface] = data del iface, data return ret def _interfaces_ifconfig(out): ''' Uses ifconfig to return a dictionary of interfaces with various information about each (up/down state, ip address, netmask, and hwaddr) ''' ret = dict() piface = re.compile(r'^([^\s:]+)') pmac = re.compile('.*?(?:HWaddr|ether|address:|lladdr) ([0-9a-fA-F:]+)') if salt.utils.platform.is_sunos(): pip = re.compile(r'.*?(?:inet\s+)([0-9]+\.[0-9]+\.[0-9]+\.[0-9]+)(.*)') pip6 = re.compile('.*?(?:inet6 )([0-9a-fA-F:]+)') pmask6 = re.compile(r'.*?(?:inet6 [0-9a-fA-F:]+/(\d+)).*') else: pip = re.compile(r'.*?(?:inet addr:|inet [^\d]*)(.*?)\s') pip6 = re.compile('.*?(?:inet6 addr: (.*?)/|inet6 )([0-9a-fA-F:]+)') pmask6 = re.compile(r'.*?(?:inet6 addr: [0-9a-fA-F:]+/(\d+)|prefixlen (\d+))(?: Scope:([a-zA-Z]+)| scopeid (0x[0-9a-fA-F]))?') pmask = re.compile(r'.*?(?:Mask:|netmask )(?:((?:0x)?[0-9a-fA-F]{8})|([\d\.]+))') pupdown = re.compile('UP') pbcast = re.compile(r'.*?(?:Bcast:|broadcast )([\d\.]+)') groups = re.compile('\r?\n(?=\\S)').split(out) for group in groups: data = dict() iface = '' updown = False for line in group.splitlines(): miface = piface.match(line) mmac = pmac.match(line) mip = pip.match(line) mip6 = pip6.match(line) mupdown = pupdown.search(line) if miface: iface = miface.group(1) if mmac: data['hwaddr'] = mmac.group(1) if salt.utils.platform.is_sunos(): expand_mac = [] for chunk in data['hwaddr'].split(':'): expand_mac.append('0{0}'.format(chunk) if len(chunk) < 2 else '{0}'.format(chunk)) data['hwaddr'] = ':'.join(expand_mac) if mip: if 'inet' not in data: data['inet'] = list() addr_obj = dict() addr_obj['address'] = mip.group(1) mmask = pmask.match(line) if mmask: if mmask.group(1): mmask = _number_of_set_bits_to_ipv4_netmask( int(mmask.group(1), 16)) else: mmask = mmask.group(2) addr_obj['netmask'] = mmask mbcast = pbcast.match(line) if mbcast: addr_obj['broadcast'] = mbcast.group(1) data['inet'].append(addr_obj) if mupdown: updown = True if mip6: if 'inet6' not in data: data['inet6'] = list() addr_obj = dict() addr_obj['address'] = mip6.group(1) or mip6.group(2) mmask6 = pmask6.match(line) if mmask6: addr_obj['prefixlen'] = mmask6.group(1) or mmask6.group(2) if not salt.utils.platform.is_sunos(): ipv6scope = mmask6.group(3) or mmask6.group(4) addr_obj['scope'] = ipv6scope.lower() if ipv6scope is not None else ipv6scope # SunOS sometimes has ::/0 as inet6 addr when using addrconf if not salt.utils.platform.is_sunos() \ or addr_obj['address'] != '::' \ and addr_obj['prefixlen'] != 0: data['inet6'].append(addr_obj) data['up'] = updown if iface in ret: # SunOS optimization, where interfaces occur twice in 'ifconfig -a' # output with the same name: for ipv4 and then for ipv6 addr family. # Every instance has it's own 'UP' status and we assume that ipv4 # status determines global interface status. # # merge items with higher priority for older values # after that merge the inet and inet6 sub items for both ret[iface] = dict(list(data.items()) + list(ret[iface].items())) if 'inet' in data: ret[iface]['inet'].extend(x for x in data['inet'] if x not in ret[iface]['inet']) if 'inet6' in data: ret[iface]['inet6'].extend(x for x in data['inet6'] if x not in ret[iface]['inet6']) else: ret[iface] = data del data return ret def linux_interfaces(): ''' Obtain interface information for *NIX/BSD variants ''' ifaces = dict() ip_path = salt.utils.path.which('ip') ifconfig_path = None if ip_path else salt.utils.path.which('ifconfig') if ip_path: cmd1 = subprocess.Popen( '{0} link show'.format(ip_path), shell=True, close_fds=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate()[0] cmd2 = subprocess.Popen( '{0} addr show'.format(ip_path), shell=True, close_fds=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate()[0] ifaces = _interfaces_ip("{0}\n{1}".format( salt.utils.stringutils.to_str(cmd1), salt.utils.stringutils.to_str(cmd2))) elif ifconfig_path: cmd = subprocess.Popen( '{0} -a'.format(ifconfig_path), shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate()[0] ifaces = _interfaces_ifconfig(salt.utils.stringutils.to_str(cmd)) return ifaces def _netbsd_interfaces_ifconfig(out): ''' Uses ifconfig to return a dictionary of interfaces with various information about each (up/down state, ip address, netmask, and hwaddr) ''' ret = dict() piface = re.compile(r'^([^\s:]+)') pmac = re.compile('.*?address: ([0-9a-f:]+)') pip = re.compile(r'.*?inet [^\d]*(.*?)/([\d]*)\s') pip6 = re.compile(r'.*?inet6 ([0-9a-f:]+)%([a-zA-Z0-9]*)/([\d]*)\s') pupdown = re.compile('UP') pbcast = re.compile(r'.*?broadcast ([\d\.]+)') groups = re.compile('\r?\n(?=\\S)').split(out) for group in groups: data = dict() iface = '' updown = False for line in group.splitlines(): miface = piface.match(line) mmac = pmac.match(line) mip = pip.match(line) mip6 = pip6.match(line) mupdown = pupdown.search(line) if miface: iface = miface.group(1) if mmac: data['hwaddr'] = mmac.group(1) if mip: if 'inet' not in data: data['inet'] = list() addr_obj = dict() addr_obj['address'] = mip.group(1) mmask = mip.group(2) if mip.group(2): addr_obj['netmask'] = cidr_to_ipv4_netmask(mip.group(2)) mbcast = pbcast.match(line) if mbcast: addr_obj['broadcast'] = mbcast.group(1) data['inet'].append(addr_obj) if mupdown: updown = True if mip6: if 'inet6' not in data: data['inet6'] = list() addr_obj = dict() addr_obj['address'] = mip6.group(1) mmask6 = mip6.group(3) addr_obj['scope'] = mip6.group(2) addr_obj['prefixlen'] = mip6.group(3) data['inet6'].append(addr_obj) data['up'] = updown ret[iface] = data del data return ret def netbsd_interfaces(): ''' Obtain interface information for NetBSD >= 8 where the ifconfig output diverged from other BSD variants (Netmask is now part of the address) ''' # NetBSD versions prior to 8.0 can still use linux_interfaces() if LooseVersion(os.uname()[2]) < LooseVersion('8.0'): return linux_interfaces() ifconfig_path = salt.utils.path.which('ifconfig') cmd = subprocess.Popen( '{0} -a'.format(ifconfig_path), shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate()[0] return _netbsd_interfaces_ifconfig(salt.utils.stringutils.to_str(cmd)) def _interfaces_ipconfig(out): ''' Returns a dictionary of interfaces with various information about each (up/down state, ip address, netmask, and hwaddr) NOTE: This is not used by any function and may be able to be removed in the future. ''' ifaces = dict() iface = None adapter_iface_regex = re.compile(r'adapter (\S.+):$') for line in out.splitlines(): if not line: continue # TODO what does Windows call Infiniband and 10/40gige adapters if line.startswith('Ethernet'): iface = ifaces[adapter_iface_regex.search(line).group(1)] iface['up'] = True addr = None continue if iface: key, val = line.split(',', 1) key = key.strip(' .') val = val.strip() if addr and key == 'Subnet Mask': addr['netmask'] = val elif key in ('IP Address', 'IPv4 Address'): if 'inet' not in iface: iface['inet'] = list() addr = {'address': val.rstrip('(Preferred)'), 'netmask': None, 'broadcast': None} # TODO find the broadcast iface['inet'].append(addr) elif 'IPv6 Address' in key: if 'inet6' not in iface: iface['inet'] = list() # XXX What is the prefixlen!? addr = {'address': val.rstrip('(Preferred)'), 'prefixlen': None} iface['inet6'].append(addr) elif key == 'Physical Address': iface['hwaddr'] = val elif key == 'Media State': # XXX seen used for tunnel adaptors # might be useful iface['up'] = (val != 'Media disconnected') def win_interfaces(): ''' Obtain interface information for Windows systems ''' with salt.utils.winapi.Com(): c = wmi.WMI() ifaces = {} for iface in c.Win32_NetworkAdapterConfiguration(IPEnabled=1): ifaces[iface.Description] = dict() if iface.MACAddress: ifaces[iface.Description]['hwaddr'] = iface.MACAddress if iface.IPEnabled: ifaces[iface.Description]['up'] = True for ip in iface.IPAddress: if '.' in ip: if 'inet' not in ifaces[iface.Description]: ifaces[iface.Description]['inet'] = [] item = {'address': ip, 'label': iface.Description} if iface.DefaultIPGateway: broadcast = next((i for i in iface.DefaultIPGateway if '.' in i), '') if broadcast: item['broadcast'] = broadcast if iface.IPSubnet: netmask = next((i for i in iface.IPSubnet if '.' in i), '') if netmask: item['netmask'] = netmask ifaces[iface.Description]['inet'].append(item) if ':' in ip: if 'inet6' not in ifaces[iface.Description]: ifaces[iface.Description]['inet6'] = [] item = {'address': ip} if iface.DefaultIPGateway: broadcast = next((i for i in iface.DefaultIPGateway if ':' in i), '') if broadcast: item['broadcast'] = broadcast if iface.IPSubnet: netmask = next((i for i in iface.IPSubnet if ':' in i), '') if netmask: item['netmask'] = netmask ifaces[iface.Description]['inet6'].append(item) else: ifaces[iface.Description]['up'] = False return ifaces def interfaces(): ''' Return a dictionary of information about all the interfaces on the minion ''' if salt.utils.platform.is_windows(): return win_interfaces() elif salt.utils.platform.is_netbsd(): return netbsd_interfaces() else: return linux_interfaces() def get_net_start(ipaddr, netmask): ''' Return the address of the network ''' net = ipaddress.ip_network('{0}/{1}'.format(ipaddr, netmask), strict=False) return six.text_type(net.network_address) def get_net_size(mask): ''' Turns an IPv4 netmask into it's corresponding prefix length (255.255.255.0 -> 24 as in 192.168.1.10/24). ''' binary_str = '' for octet in mask.split('.'): binary_str += bin(int(octet))[2:].zfill(8) return len(binary_str.rstrip('0')) def calc_net(ipaddr, netmask=None): ''' Takes IP (CIDR notation supported) and optionally netmask and returns the network in CIDR-notation. (The IP can be any IP inside the subnet) ''' if netmask is not None: ipaddr = '{0}/{1}'.format(ipaddr, netmask) return six.text_type(ipaddress.ip_network(ipaddr, strict=False)) def _ipv4_to_bits(ipaddr): ''' Accepts an IPv4 dotted quad and returns a string representing its binary counterpart ''' return ''.join([bin(int(x))[2:].rjust(8, '0') for x in ipaddr.split('.')]) def _get_iface_info(iface): ''' If `iface` is available, return interface info and no error, otherwise return no info and log and return an error ''' iface_info = interfaces() if iface in iface_info.keys(): return iface_info, False else: error_msg = ('Interface "{0}" not in available interfaces: "{1}"' ''.format(iface, '", "'.join(iface_info.keys()))) log.error(error_msg) return None, error_msg def _hw_addr_aix(iface): ''' Return the hardware address (a.k.a. MAC address) for a given interface on AIX MAC address not available in through interfaces ''' cmd = subprocess.Popen( 'entstat -d {0} | grep \'Hardware Address\''.format(iface), shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate()[0] if cmd: comps = cmd.split(' ') if len(comps) == 3: mac_addr = comps[2].strip('\'').strip() return mac_addr error_msg = ('Interface "{0}" either not available or does not contain a hardware address'.format(iface)) log.error(error_msg) return error_msg def hw_addr(iface): ''' Return the hardware address (a.k.a. MAC address) for a given interface .. versionchanged:: 2016.11.4 Added support for AIX ''' if salt.utils.platform.is_aix(): return _hw_addr_aix iface_info, error = _get_iface_info(iface) if error is False: return iface_info.get(iface, {}).get('hwaddr', '') else: return error def interface(iface): ''' Return the details of `iface` or an error if it does not exist ''' iface_info, error = _get_iface_info(iface) if error is False: return iface_info.get(iface, {}).get('inet', '') else: return error def interface_ip(iface): ''' Return `iface` IPv4 addr or an error if `iface` does not exist ''' iface_info, error = _get_iface_info(iface) if error is False: inet = iface_info.get(iface, {}).get('inet', None) return inet[0].get('address', '') if inet else '' else: return error def _subnets(proto='inet', interfaces_=None): ''' Returns a list of subnets to which the host belongs ''' if interfaces_ is None: ifaces = interfaces() elif isinstance(interfaces_, list): ifaces = {} for key, value in six.iteritems(interfaces()): if key in interfaces_: ifaces[key] = value else: ifaces = {interfaces_: interfaces().get(interfaces_, {})} ret = set() if proto == 'inet': subnet = 'netmask' dflt_cidr = 32 elif proto == 'inet6': subnet = 'prefixlen' dflt_cidr = 128 else: log.error('Invalid proto %s calling subnets()', proto) return for ip_info in six.itervalues(ifaces): addrs = ip_info.get(proto, []) addrs.extend([addr for addr in ip_info.get('secondary', []) if addr.get('type') == proto]) for intf in addrs: if subnet in intf: intf = ipaddress.ip_interface('{0}/{1}'.format(intf['address'], intf[subnet])) else: intf = ipaddress.ip_interface('{0}/{1}'.format(intf['address'], dflt_cidr)) if not intf.is_loopback: ret.add(intf.network) return [six.text_type(net) for net in sorted(ret)] def subnets(interfaces=None): ''' Returns a list of IPv4 subnets to which the host belongs ''' return _subnets('inet', interfaces_=interfaces) def subnets6(): ''' Returns a list of IPv6 subnets to which the host belongs ''' return _subnets('inet6') def in_subnet(cidr, addr=None): ''' Returns True if host or (any of) addrs is within specified subnet, otherwise False ''' try: cidr = ipaddress.ip_network(cidr) except ValueError: log.error('Invalid CIDR \'%s\'', cidr) return False if addr is None: addr = ip_addrs() addr.extend(ip_addrs6()) elif not isinstance(addr, (list, tuple)): addr = (addr,) return any(ipaddress.ip_address(item) in cidr for item in addr) def _ip_addrs(interface=None, include_loopback=False, interface_data=None, proto='inet'): ''' Return the full list of IP adresses matching the criteria proto = inet|inet6 ''' ret = set() ifaces = interface_data \ if isinstance(interface_data, dict) \ else interfaces() if interface is None: target_ifaces = ifaces else: target_ifaces = dict([(k, v) for k, v in six.iteritems(ifaces) if k == interface]) if not target_ifaces: log.error('Interface %s not found.', interface) for ip_info in six.itervalues(target_ifaces): addrs = ip_info.get(proto, []) addrs.extend([addr for addr in ip_info.get('secondary', []) if addr.get('type') == proto]) for addr in addrs: addr = ipaddress.ip_address(addr.get('address')) if not addr.is_loopback or include_loopback: ret.add(addr) return [six.text_type(addr) for addr in sorted(ret)] def ip_addrs(interface=None, include_loopback=False, interface_data=None): ''' Returns a list of IPv4 addresses assigned to the host. 127.0.0.1 is ignored, unless 'include_loopback=True' is indicated. If 'interface' is provided, then only IP addresses from that interface will be returned. ''' return _ip_addrs(interface, include_loopback, interface_data, 'inet') def ip_addrs6(interface=None, include_loopback=False, interface_data=None): ''' Returns a list of IPv6 addresses assigned to the host. ::1 is ignored, unless 'include_loopback=True' is indicated. If 'interface' is provided, then only IP addresses from that interface will be returned. ''' return _ip_addrs(interface, include_loopback, interface_data, 'inet6') def hex2ip(hex_ip, invert=False): ''' Convert a hex string to an ip, if a failure occurs the original hex is returned. If 'invert=True' assume that ip from /proc/net/<proto> ''' if len(hex_ip) == 32: # ipv6 ip = [] for i in range(0, 32, 8): ip_part = hex_ip[i:i + 8] ip_part = [ip_part[x:x + 2] for x in range(0, 8, 2)] if invert: ip.append("{0[3]}{0[2]}:{0[1]}{0[0]}".format(ip_part)) else: ip.append("{0[0]}{0[1]}:{0[2]}{0[3]}".format(ip_part)) try: address = ipaddress.IPv6Address(":".join(ip)) if address.ipv4_mapped: return str(address.ipv4_mapped) else: return address.compressed except ipaddress.AddressValueError as ex: log.error('hex2ip - ipv6 address error: %s', ex) return hex_ip try: hip = int(hex_ip, 16) except ValueError: return hex_ip if invert: return '{3}.{2}.{1}.{0}'.format(hip >> 24 & 255, hip >> 16 & 255, hip >> 8 & 255, hip & 255) return '{0}.{1}.{2}.{3}'.format(hip >> 24 & 255, hip >> 16 & 255, hip >> 8 & 255, hip & 255) def mac2eui64(mac, prefix=None): ''' Convert a MAC address to a EUI64 identifier or, with prefix provided, a full IPv6 address ''' # http://tools.ietf.org/html/rfc4291#section-2.5.1 eui64 = re.sub(r'[.:-]', '', mac).lower() eui64 = eui64[0:6] + 'fffe' + eui64[6:] eui64 = hex(int(eui64[0:2], 16) | 2)[2:].zfill(2) + eui64[2:] if prefix is None: return ':'.join(re.findall(r'.{4}', eui64)) else: try: net = ipaddress.ip_network(prefix, strict=False) euil = int('0x{0}'.format(eui64), 16) return '{0}/{1}'.format(net[euil], net.prefixlen) except Exception: return def active_tcp(): ''' Return a dict describing all active tcp connections as quickly as possible ''' ret = {} for statf in ['/proc/net/tcp', '/proc/net/tcp6']: if os.path.isfile(statf): with salt.utils.files.fopen(statf, 'rb') as fp_: for line in fp_: line = salt.utils.stringutils.to_unicode(line) if line.strip().startswith('sl'): continue iret = _parse_tcp_line(line) sl = next(iter(iret)) if iret[sl]['state'] == 1: # 1 is ESTABLISHED del iret[sl]['state'] ret[len(ret)] = iret[sl] return ret def local_port_tcp(port): ''' Return a set of remote ip addrs attached to the specified local port ''' ret = _remotes_on(port, 'local_port') return ret def remote_port_tcp(port): ''' Return a set of ip addrs the current host is connected to on given port ''' ret = _remotes_on(port, 'remote_port') return ret def _remotes_on(port, which_end): ''' Return a set of ip addrs active tcp connections ''' port = int(port) ret = _netlink_tool_remote_on(port, which_end) if ret is not None: return ret ret = set() proc_available = False for statf in ['/proc/net/tcp', '/proc/net/tcp6']: if os.path.isfile(statf): proc_available = True with salt.utils.files.fopen(statf, 'r') as fp_: for line in fp_: line = salt.utils.stringutils.to_unicode(line) if line.strip().startswith('sl'): continue iret = _parse_tcp_line(line) sl = next(iter(iret)) if iret[sl][which_end] == port and iret[sl]['state'] == 1: # 1 is ESTABLISHED ret.add(iret[sl]['remote_addr']) if not proc_available: # Fallback to use OS specific tools if salt.utils.platform.is_sunos(): return _sunos_remotes_on(port, which_end) if salt.utils.platform.is_freebsd(): return _freebsd_remotes_on(port, which_end) if salt.utils.platform.is_netbsd(): return _netbsd_remotes_on(port, which_end) if salt.utils.platform.is_openbsd(): return _openbsd_remotes_on(port, which_end) if salt.utils.platform.is_windows(): return _windows_remotes_on(port, which_end) if salt.utils.platform.is_aix(): return _aix_remotes_on(port, which_end) return _linux_remotes_on(port, which_end) return ret def _parse_tcp_line(line): ''' Parse a single line from the contents of /proc/net/tcp or /proc/net/tcp6 ''' ret = {} comps = line.strip().split() sl = comps[0].rstrip(':') ret[sl] = {} l_addr, l_port = comps[1].split(':') r_addr, r_port = comps[2].split(':') ret[sl]['local_addr'] = hex2ip(l_addr, True) ret[sl]['local_port'] = int(l_port, 16) ret[sl]['remote_addr'] = hex2ip(r_addr, True) ret[sl]['remote_port'] = int(r_port, 16) ret[sl]['state'] = int(comps[3], 16) return ret def _netlink_tool_remote_on(port, which_end): ''' Returns set of ipv4 host addresses of remote established connections on local or remote tcp port. Parses output of shell 'ss' to get connections [root@salt-master ~]# ss -ant State Recv-Q Send-Q Local Address:Port Peer Address:Port LISTEN 0 511 *:80 *:* LISTEN 0 128 *:22 *:* ESTAB 0 0 127.0.0.1:56726 127.0.0.1:4505 ''' remotes = set() valid = False try: data = subprocess.check_output(['ss', '-ant']) # pylint: disable=minimum-python-version except subprocess.CalledProcessError: log.error('Failed ss') raise except OSError: # not command "No such file or directory" return None lines = salt.utils.stringutils.to_str(data).split('\n') for line in lines: if 'Address:Port' in line: # ss tools may not be valid valid = True continue elif 'ESTAB' not in line: continue chunks = line.split() local_host, local_port = chunks[3].split(':', 1) remote_host, remote_port = chunks[4].split(':', 1) if which_end == 'remote_port' and int(remote_port) != port: continue if which_end == 'local_port' and int(local_port) != port: continue remotes.add(remote_host) if valid is False: remotes = None return remotes def _sunos_remotes_on(port, which_end): ''' SunOS specific helper function. Returns set of ipv4 host addresses of remote established connections on local or remote tcp port. Parses output of shell 'netstat' to get connections [root@salt-master ~]# netstat -f inet -n TCP: IPv4 Local Address Remote Address Swind Send-Q Rwind Recv-Q State -------------------- -------------------- ----- ------ ----- ------ ----------- 10.0.0.101.4505 10.0.0.1.45329 1064800 0 1055864 0 ESTABLISHED 10.0.0.101.4505 10.0.0.100.50798 1064800 0 1055864 0 ESTABLISHED ''' remotes = set() try: data = subprocess.check_output(['netstat', '-f', 'inet', '-n']) # pylint: disable=minimum-python-version except subprocess.CalledProcessError: log.error('Failed netstat') raise lines = salt.utils.stringutils.to_str(data).split('\n') for line in lines: if 'ESTABLISHED' not in line: continue chunks = line.split() local_host, local_port = chunks[0].rsplit('.', 1) remote_host, remote_port = chunks[1].rsplit('.', 1) if which_end == 'remote_port' and int(remote_port) != port: continue if which_end == 'local_port' and int(local_port) != port: continue remotes.add(remote_host) return remotes def _freebsd_remotes_on(port, which_end): ''' Returns set of ipv4 host addresses of remote established connections on local tcp port port. Parses output of shell 'sockstat' (FreeBSD) to get connections $ sudo sockstat -4 USER COMMAND PID FD PROTO LOCAL ADDRESS FOREIGN ADDRESS root python2.7 1456 29 tcp4 *:4505 *:* root python2.7 1445 17 tcp4 *:4506 *:* root python2.7 1294 14 tcp4 127.0.0.1:11813 127.0.0.1:4505 root python2.7 1294 41 tcp4 127.0.0.1:61115 127.0.0.1:4506 $ sudo sockstat -4 -c -p 4506 USER COMMAND PID FD PROTO LOCAL ADDRESS FOREIGN ADDRESS root python2.7 1294 41 tcp4 127.0.0.1:61115 127.0.0.1:4506 ''' port = int(port) remotes = set() try: cmd = salt.utils.args.shlex_split('sockstat -4 -c -p {0}'.format(port)) data = subprocess.check_output(cmd) # pylint: disable=minimum-python-version except subprocess.CalledProcessError as ex: log.error('Failed "sockstat" with returncode = %s', ex.returncode) raise lines = salt.utils.stringutils.to_str(data).split('\n') for line in lines: chunks = line.split() if not chunks: continue # ['root', 'python2.7', '1456', '37', 'tcp4', # '127.0.0.1:4505-', '127.0.0.1:55703'] # print chunks if 'COMMAND' in chunks[1]: continue # ignore header if len(chunks) < 2: continue # sockstat -4 -c -p 4506 does this with high PIDs: # USER COMMAND PID FD PROTO LOCAL ADDRESS FOREIGN ADDRESS # salt-master python2.781106 35 tcp4 192.168.12.34:4506 192.168.12.45:60143 local = chunks[-2] remote = chunks[-1] lhost, lport = local.split(':') rhost, rport = remote.split(':') if which_end == 'local' and int(lport) != port: # ignore if local port not port continue if which_end == 'remote' and int(rport) != port: # ignore if remote port not port continue remotes.add(rhost) return remotes def _netbsd_remotes_on(port, which_end): ''' Returns set of ipv4 host addresses of remote established connections on local tcp port port. Parses output of shell 'sockstat' (NetBSD) to get connections $ sudo sockstat -4 -n USER COMMAND PID FD PROTO LOCAL ADDRESS FOREIGN ADDRESS root python2.7 1456 29 tcp *.4505 *.* root python2.7 1445 17 tcp *.4506 *.* root python2.7 1294 14 tcp 127.0.0.1.11813 127.0.0.1.4505 root python2.7 1294 41 tcp 127.0.0.1.61115 127.0.0.1.4506 $ sudo sockstat -4 -c -n -p 4506 USER COMMAND PID FD PROTO LOCAL ADDRESS FOREIGN ADDRESS root python2.7 1294 41 tcp 127.0.0.1.61115 127.0.0.1.4506 ''' port = int(port) remotes = set() try: cmd = salt.utils.args.shlex_split('sockstat -4 -c -n -p {0}'.format(port)) data = subprocess.check_output(cmd) # pylint: disable=minimum-python-version except subprocess.CalledProcessError as ex: log.error('Failed "sockstat" with returncode = %s', ex.returncode) raise lines = salt.utils.stringutils.to_str(data).split('\n') for line in lines: chunks = line.split() if not chunks: continue # ['root', 'python2.7', '1456', '37', 'tcp', # '127.0.0.1.4505-', '127.0.0.1.55703'] # print chunks if 'COMMAND' in chunks[1]: continue # ignore header if len(chunks) < 2: continue local = chunks[5].split('.') lport = local.pop() lhost = '.'.join(local) remote = chunks[6].split('.') rport = remote.pop() rhost = '.'.join(remote) if which_end == 'local' and int(lport) != port: # ignore if local port not port continue if which_end == 'remote' and int(rport) != port: # ignore if remote port not port continue remotes.add(rhost) return remotes def _openbsd_remotes_on(port, which_end): ''' OpenBSD specific helper function. Returns set of ipv4 host addresses of remote established connections on local or remote tcp port. Parses output of shell 'netstat' to get connections $ netstat -nf inet Active Internet connections Proto Recv-Q Send-Q Local Address Foreign Address (state) tcp 0 0 10.0.0.101.4505 10.0.0.1.45329 ESTABLISHED tcp 0 0 10.0.0.101.4505 10.0.0.100.50798 ESTABLISHED ''' remotes = set() try: data = subprocess.check_output(['netstat', '-nf', 'inet']) # pylint: disable=minimum-python-version except subprocess.CalledProcessError: log.error('Failed netstat') raise lines = data.split('\n') for line in lines: if 'ESTABLISHED' not in line: continue chunks = line.split() local_host, local_port = chunks[3].rsplit('.', 1) remote_host, remote_port = chunks[4].rsplit('.', 1) if which_end == 'remote_port' and int(remote_port) != port: continue if which_end == 'local_port' and int(local_port) != port: continue remotes.add(remote_host) return remotes def _windows_remotes_on(port, which_end): r''' Windows specific helper function. Returns set of ipv4 host addresses of remote established connections on local or remote tcp port. Parses output of shell 'netstat' to get connections C:\>netstat -n Active Connections Proto Local Address Foreign Address State TCP 10.2.33.17:3007 130.164.12.233:10123 ESTABLISHED TCP 10.2.33.17:3389 130.164.30.5:10378 ESTABLISHED ''' remotes = set() try: data = subprocess.check_output(['netstat', '-n']) # pylint: disable=minimum-python-version except subprocess.CalledProcessError: log.error('Failed netstat') raise lines = salt.utils.stringutils.to_str(data).split('\n') for line in lines: if 'ESTABLISHED' not in line: continue chunks = line.split() local_host, local_port = chunks[1].rsplit(':', 1) remote_host, remote_port = chunks[2].rsplit(':', 1) if which_end == 'remote_port' and int(remote_port) != port: continue if which_end == 'local_port' and int(local_port) != port: continue remotes.add(remote_host) return remotes def _linux_remotes_on(port, which_end): ''' Linux specific helper function. Returns set of ip host addresses of remote established connections on local tcp port port. Parses output of shell 'lsof' to get connections $ sudo lsof -iTCP:4505 -n COMMAND PID USER FD TYPE DEVICE SIZE/OFF NODE NAME Python 9971 root 35u IPv4 0x18a8464a29ca329d 0t0 TCP *:4505 (LISTEN) Python 9971 root 37u IPv4 0x18a8464a29b2b29d 0t0 TCP 127.0.0.1:4505->127.0.0.1:55703 (ESTABLISHED) Python 10152 root 22u IPv4 0x18a8464a29c8cab5 0t0 TCP 127.0.0.1:55703->127.0.0.1:4505 (ESTABLISHED) Python 10153 root 22u IPv4 0x18a8464a29c8cab5 0t0 TCP [fe80::249a]:4505->[fe80::150]:59367 (ESTABLISHED) ''' remotes = set() try: data = subprocess.check_output( ['lsof', '-iTCP:{0:d}'.format(port), '-n', '-P'] # pylint: disable=minimum-python-version ) except subprocess.CalledProcessError as ex: if ex.returncode == 1: # Lsof return 1 if any error was detected, including the failure # to locate Internet addresses, and it is not an error in this case. log.warning('"lsof" returncode = 1, likely no active TCP sessions.') return remotes log.error('Failed "lsof" with returncode = %s', ex.returncode) raise lines = salt.utils.stringutils.to_str(data).split('\n') for line in lines: chunks = line.split() if not chunks: continue # ['Python', '9971', 'root', '37u', 'IPv4', '0x18a8464a29b2b29d', '0t0', # 'TCP', '127.0.0.1:4505->127.0.0.1:55703', '(ESTABLISHED)'] # print chunks if 'COMMAND' in chunks[0]: continue # ignore header if 'ESTABLISHED' not in chunks[-1]: continue # ignore if not ESTABLISHED # '127.0.0.1:4505->127.0.0.1:55703' local, remote = chunks[8].split('->') _, lport = local.rsplit(':', 1) rhost, rport = remote.rsplit(':', 1) if which_end == 'remote_port' and int(rport) != port: continue if which_end == 'local_port' and int(lport) != port: continue remotes.add(rhost.strip("[]")) return remotes def _aix_remotes_on(port, which_end): ''' AIX specific helper function. Returns set of ipv4 host addresses of remote established connections on local or remote tcp port. Parses output of shell 'netstat' to get connections root@la68pp002_pub:/opt/salt/lib/python2.7/site-packages/salt/modules# netstat -f inet -n Active Internet connections Proto Recv-Q Send-Q Local Address Foreign Address (state) tcp4 0 0 172.29.149.95.50093 209.41.78.13.4505 ESTABLISHED tcp4 0 0 127.0.0.1.9514 *.* LISTEN tcp4 0 0 127.0.0.1.9515 *.* LISTEN tcp4 0 0 127.0.0.1.199 127.0.0.1.32779 ESTABLISHED tcp4 0 0 127.0.0.1.32779 127.0.0.1.199 ESTABLISHED tcp4 0 40 172.29.149.95.22 172.29.96.83.41022 ESTABLISHED tcp4 0 0 172.29.149.95.22 172.29.96.83.41032 ESTABLISHED tcp4 0 0 127.0.0.1.32771 127.0.0.1.32775 ESTABLISHED tcp 0 0 127.0.0.1.32775 127.0.0.1.32771 ESTABLISHED tcp4 0 0 127.0.0.1.32771 127.0.0.1.32776 ESTABLISHED tcp 0 0 127.0.0.1.32776 127.0.0.1.32771 ESTABLISHED tcp4 0 0 127.0.0.1.32771 127.0.0.1.32777 ESTABLISHED tcp 0 0 127.0.0.1.32777 127.0.0.1.32771 ESTABLISHED tcp4 0 0 127.0.0.1.32771 127.0.0.1.32778 ESTABLISHED tcp 0 0 127.0.0.1.32778 127.0.0.1.32771 ESTABLISHED ''' remotes = set() try: data = subprocess.check_output(['netstat', '-f', 'inet', '-n']) # pylint: disable=minimum-python-version except subprocess.CalledProcessError: log.error('Failed netstat') raise lines = salt.utils.stringutils.to_str(data).split('\n') for line in lines: if 'ESTABLISHED' not in line: continue chunks = line.split() local_host, local_port = chunks[3].rsplit('.', 1) remote_host, remote_port = chunks[4].rsplit('.', 1) if which_end == 'remote_port' and int(remote_port) != port: continue if which_end == 'local_port' and int(local_port) != port: continue remotes.add(remote_host) return remotes @jinja_filter('gen_mac') def gen_mac(prefix='AC:DE:48'): ''' Generates a MAC address with the defined OUI prefix. Common prefixes: - ``00:16:3E`` -- Xen - ``00:18:51`` -- OpenVZ - ``00:50:56`` -- VMware (manually generated) - ``52:54:00`` -- QEMU/KVM - ``AC:DE:48`` -- PRIVATE References: - http://standards.ieee.org/develop/regauth/oui/oui.txt - https://www.wireshark.org/tools/oui-lookup.html - https://en.wikipedia.org/wiki/MAC_address ''' return '{0}:{1:02X}:{2:02X}:{3:02X}'.format(prefix, random.randint(0, 0xff), random.randint(0, 0xff), random.randint(0, 0xff)) @jinja_filter('mac_str_to_bytes') def mac_str_to_bytes(mac_str): ''' Convert a MAC address string into bytes. Works with or without separators: b1 = mac_str_to_bytes('08:00:27:13:69:77') b2 = mac_str_to_bytes('080027136977') assert b1 == b2 assert isinstance(b1, bytes) ''' if len(mac_str) == 12: pass elif len(mac_str) == 17: sep = mac_str[2] mac_str = mac_str.replace(sep, '') else: raise ValueError('Invalid MAC address') chars = (int(mac_str[s:s+2], 16) for s in range(0, 12, 2)) return bytes(chars) if six.PY3 else b''.join(chr(x) for x in chars) def refresh_dns(): ''' issue #21397: force glibc to re-read resolv.conf ''' try: res_init() except NameError: # Exception raised loading the library, thus res_init is not defined pass @jinja_filter('connection_check') def connection_check(addr, port=80, safe=False, ipv6=None): ''' Provides a convenient alias for the dns_check filter. ''' return dns_check(addr, port, safe, ipv6) @jinja_filter('dns_check') def dns_check(addr, port=80, safe=False, ipv6=None, attempt_connect=True): ''' Return the ip resolved by dns, but do not exit on failure, only raise an exception. Obeys system preference for IPv4/6 address resolution - this can be overridden by the ipv6 flag. Tries to connect to the address before considering it useful. If no address can be reached, the first one resolved is used as a fallback. ''' error = False lookup = addr seen_ipv6 = False family = socket.AF_INET6 if ipv6 else socket.AF_INET if ipv6 is False else socket.AF_UNSPEC hostnames = [] try: refresh_dns() hostnames = socket.getaddrinfo(addr, port, family, socket.SOCK_STREAM) except TypeError: err = ('Attempt to resolve address \'{0}\' failed. Invalid or unresolveable address').format(lookup) raise SaltSystemExit(code=42, msg=err) except socket.error: error = True # If ipv6 is set to True, attempt another lookup using the IPv4 family, # just in case we're attempting to lookup an IPv4 IP # as an IPv6 hostname. if error and ipv6: try: refresh_dns() hostnames = socket.getaddrinfo(addr, port, socket.AF_INET, socket.SOCK_STREAM) except TypeError: err = ('Attempt to resolve address \'{0}\' failed. Invalid or unresolveable address').format(lookup) raise SaltSystemExit(code=42, msg=err) except socket.error: error = True try: if not hostnames: error = True else: resolved = False candidates = [] for h in hostnames: # Input is IP address, passed through unchanged, just return it if h[4][0] == addr: resolved = salt.utils.zeromq.ip_bracket(addr) break candidate_addr = salt.utils.zeromq.ip_bracket(h[4][0]) # sometimes /etc/hosts contains ::1 localhost if not ipv6 and candidate_addr == '[::1]': continue candidates.append(candidate_addr) if attempt_connect: try: s = socket.socket(h[0], socket.SOCK_STREAM) s.settimeout(2) s.connect((candidate_addr.strip('[]'), h[4][1])) s.close() resolved = candidate_addr break except socket.error: pass if not resolved: if candidates: resolved = candidates[0] else: error = True except TypeError: err = ('Attempt to resolve address \'{0}\' failed. Invalid or unresolveable address').format(lookup) raise SaltSystemExit(code=42, msg=err) except socket.error: error = True if error: err = ('DNS lookup or connection check of \'{0}\' failed.').format(addr) if safe: if salt.log.is_console_configured(): # If logging is not configured it also means that either # the master or minion instance calling this hasn't even # started running log.error(err) raise SaltClientError() raise SaltSystemExit(code=42, msg=err) return resolved def parse_host_port(host_port): """ Takes a string argument specifying host or host:port. Returns a (hostname, port) or (ip_address, port) tuple. If no port is given, the second (port) element of the returned tuple will be None. host:port argument, for example, is accepted in the forms of: - hostname - hostname:1234 - hostname.domain.tld - hostname.domain.tld:5678 - [1234::5]:5678 - 1234::5 - 10.11.12.13:4567 - 10.11.12.13 """ host, port = None, None # default _s_ = host_port[:] if _s_[0] == "[": if "]" in host_port: host, _s_ = _s_.lstrip("[").rsplit("]", 1) host = ipaddress.IPv6Address(host).compressed if _s_[0] == ":": port = int(_s_.lstrip(":")) else: if len(_s_) > 1: raise ValueError('found ambiguous "{}" port in "{}"'.format(_s_, host_port)) else: if _s_.count(":") == 1: host, _hostport_separator_, port = _s_.partition(":") try: port = int(port) except ValueError as _e_: log.error('host_port "%s" port value "%s" is not an integer.', host_port, port) raise _e_ else: host = _s_ try: if not isinstance(host, ipaddress._BaseAddress): host_ip = ipaddress.ip_address(host).compressed host = host_ip except ValueError: log.debug('"%s" Not an IP address? Assuming it is a hostname.', host) if host != sanitize_host(host): log.error('bad hostname: "%s"', host) raise ValueError('bad hostname: "{}"'.format(host)) return host, port def is_fqdn(hostname): """ Verify if hostname conforms to be a FQDN. :param hostname: text string with the name of the host :return: bool, True if hostname is correct FQDN, False otherwise """ compliant = re.compile(r"(?!-)[A-Z\d\-\_]{1,63}(?<!-)$", re.IGNORECASE) return "." in hostname and len(hostname) < 0xff and all(compliant.match(x) for x in hostname.rstrip(".").split("."))
saltstack/salt
salt/utils/network.py
is_reachable_host
python
def is_reachable_host(entity_name): ''' Returns a bool telling if the entity name is a reachable host (IPv4/IPv6/FQDN/etc). :param hostname: :return: ''' try: assert type(socket.getaddrinfo(entity_name, 0, 0, 0, 0)) == list ret = True except socket.gaierror: ret = False return ret
Returns a bool telling if the entity name is a reachable host (IPv4/IPv6/FQDN/etc). :param hostname: :return:
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/network.py#L241-L253
null
# -*- coding: utf-8 -*- ''' Define some generic socket functions for network modules ''' # Import python libs from __future__ import absolute_import, unicode_literals, print_function import itertools import os import re import types import socket import logging import platform import random import subprocess from string import ascii_letters, digits # Import 3rd-party libs from salt.ext import six from salt.ext.six.moves import range # pylint: disable=import-error,redefined-builtin # Attempt to import wmi try: import wmi import salt.utils.winapi except ImportError: pass # Import salt libs import salt.utils.args import salt.utils.files import salt.utils.path import salt.utils.platform import salt.utils.stringutils import salt.utils.zeromq from salt._compat import ipaddress from salt.exceptions import SaltClientError, SaltSystemExit from salt.utils.decorators.jinja import jinja_filter from salt.utils.versions import LooseVersion # inet_pton does not exist in Windows, this is a workaround if salt.utils.platform.is_windows(): from salt.ext import win_inet_pton # pylint: disable=unused-import log = logging.getLogger(__name__) try: import ctypes import ctypes.util libc = ctypes.cdll.LoadLibrary(ctypes.util.find_library("c")) res_init = libc.__res_init except (ImportError, OSError, AttributeError, TypeError): pass # pylint: disable=C0103 def sanitize_host(host): ''' Sanitize host string. https://tools.ietf.org/html/rfc1123#section-2.1 ''' RFC952_characters = ascii_letters + digits + ".-" return "".join([c for c in host[0:255] if c in RFC952_characters]) def isportopen(host, port): ''' Return status of a port ''' if not 1 <= int(port) <= 65535: return False sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) out = sock.connect_ex((sanitize_host(host), int(port))) return out def host_to_ips(host): ''' Returns a list of IP addresses of a given hostname or None if not found. ''' ips = [] try: for family, socktype, proto, canonname, sockaddr in socket.getaddrinfo( host, 0, socket.AF_UNSPEC, socket.SOCK_STREAM): if family == socket.AF_INET: ip, port = sockaddr elif family == socket.AF_INET6: ip, port, flow_info, scope_id = sockaddr ips.append(ip) if not ips: ips = None except Exception: ips = None return ips def _generate_minion_id(): ''' Get list of possible host names and convention names. :return: ''' # There are three types of hostnames: # 1. Network names. How host is accessed from the network. # 2. Host aliases. They might be not available in all the network or only locally (/etc/hosts) # 3. Convention names, an internal nodename. class DistinctList(list): ''' List, which allows one to append only distinct objects. Needs to work on Python 2.6, because of collections.OrderedDict only since 2.7 version. Override 'filter()' for custom filtering. ''' localhost_matchers = [r'localhost.*', r'ip6-.*', r'127[.]\d', r'0\.0\.0\.0', r'::1.*', r'ipv6-.*', r'fe00::.*', r'fe02::.*', r'1.0.0.*.ip6.arpa'] def append(self, p_object): if p_object and p_object not in self and not self.filter(p_object): super(DistinctList, self).append(p_object) return self def extend(self, iterable): for obj in iterable: self.append(obj) return self def filter(self, element): 'Returns True if element needs to be filtered' for rgx in self.localhost_matchers: if re.match(rgx, element): return True def first(self): return self and self[0] or None hostname = socket.gethostname() hosts = DistinctList().append( salt.utils.stringutils.to_unicode(socket.getfqdn(salt.utils.stringutils.to_bytes(hostname))) ).append(platform.node()).append(hostname) if not hosts: try: for a_nfo in socket.getaddrinfo(hosts.first() or 'localhost', None, socket.AF_INET, socket.SOCK_RAW, socket.IPPROTO_IP, socket.AI_CANONNAME): if len(a_nfo) > 3: hosts.append(a_nfo[3]) except socket.gaierror: log.warning('Cannot resolve address %s info via socket: %s', hosts.first() or 'localhost (N/A)', socket.gaierror) # Universal method for everywhere (Linux, Slowlaris, Windows etc) for f_name in ('/etc/hostname', '/etc/nodename', '/etc/hosts', r'{win}\system32\drivers\etc\hosts'.format(win=os.getenv('WINDIR'))): try: with salt.utils.files.fopen(f_name) as f_hdl: for line in f_hdl: line = salt.utils.stringutils.to_unicode(line) hst = line.strip().split('#')[0].strip().split() if hst: if hst[0][:4] in ('127.', '::1') or len(hst) == 1: hosts.extend(hst) except IOError: pass # include public and private ipaddresses return hosts.extend([addr for addr in ip_addrs() if not ipaddress.ip_address(addr).is_loopback]) def generate_minion_id(): ''' Return only first element of the hostname from all possible list. :return: ''' try: ret = salt.utils.stringutils.to_unicode(_generate_minion_id().first()) except TypeError: ret = None return ret or 'localhost' def get_socket(addr, type=socket.SOCK_STREAM, proto=0): ''' Return a socket object for the addr IP-version agnostic ''' version = ipaddress.ip_address(addr).version if version == 4: family = socket.AF_INET elif version == 6: family = socket.AF_INET6 return socket.socket(family, type, proto) def get_fqhostname(): ''' Returns the fully qualified hostname ''' # try getaddrinfo() fqdn = None try: addrinfo = socket.getaddrinfo( socket.gethostname(), 0, socket.AF_UNSPEC, socket.SOCK_STREAM, socket.SOL_TCP, socket.AI_CANONNAME ) for info in addrinfo: # info struct [family, socktype, proto, canonname, sockaddr] # On Windows `canonname` can be an empty string # This can cause the function to return `None` if len(info) > 3 and info[3]: fqdn = info[3] break except socket.gaierror: pass # NOTE: this used to log.error() but it was later disabled except socket.error as err: log.debug('socket.getaddrinfo() failure while finding fqdn: %s', err) if fqdn is None: fqdn = socket.getfqdn() return fqdn def ip_to_host(ip): ''' Returns the hostname of a given IP ''' try: hostname, aliaslist, ipaddrlist = socket.gethostbyaddr(ip) except Exception as exc: log.debug('salt.utils.network.ip_to_host(%r) failed: %s', ip, exc) hostname = None return hostname # pylint: enable=C0103 def is_ip(ip): ''' Returns a bool telling if the passed IP is a valid IPv4 or IPv6 address. ''' return is_ipv4(ip) or is_ipv6(ip) def is_ipv4(ip): ''' Returns a bool telling if the value passed to it was a valid IPv4 address ''' try: return ipaddress.ip_address(ip).version == 4 except ValueError: return False def is_ipv6(ip): ''' Returns a bool telling if the value passed to it was a valid IPv6 address ''' try: return ipaddress.ip_address(ip).version == 6 except ValueError: return False def is_subnet(cidr): ''' Returns a bool telling if the passed string is an IPv4 or IPv6 subnet ''' return is_ipv4_subnet(cidr) or is_ipv6_subnet(cidr) def is_ipv4_subnet(cidr): ''' Returns a bool telling if the passed string is an IPv4 subnet ''' try: return '/' in cidr and bool(ipaddress.IPv4Network(cidr)) except Exception: return False def is_ipv6_subnet(cidr): ''' Returns a bool telling if the passed string is an IPv6 subnet ''' try: return '/' in cidr and bool(ipaddress.IPv6Network(cidr)) except Exception: return False @jinja_filter('is_ip') def is_ip_filter(ip, options=None): ''' Returns a bool telling if the passed IP is a valid IPv4 or IPv6 address. ''' return is_ipv4_filter(ip, options=options) or is_ipv6_filter(ip, options=options) def _ip_options_global(ip_obj, version): return not ip_obj.is_private def _ip_options_multicast(ip_obj, version): return ip_obj.is_multicast def _ip_options_loopback(ip_obj, version): return ip_obj.is_loopback def _ip_options_link_local(ip_obj, version): return ip_obj.is_link_local def _ip_options_private(ip_obj, version): return ip_obj.is_private def _ip_options_reserved(ip_obj, version): return ip_obj.is_reserved def _ip_options_site_local(ip_obj, version): if version == 6: return ip_obj.is_site_local return False def _ip_options_unspecified(ip_obj, version): return ip_obj.is_unspecified def _ip_options(ip_obj, version, options=None): # will process and IP options options_fun_map = { 'global': _ip_options_global, 'link-local': _ip_options_link_local, 'linklocal': _ip_options_link_local, 'll': _ip_options_link_local, 'link_local': _ip_options_link_local, 'loopback': _ip_options_loopback, 'lo': _ip_options_loopback, 'multicast': _ip_options_multicast, 'private': _ip_options_private, 'public': _ip_options_global, 'reserved': _ip_options_reserved, 'site-local': _ip_options_site_local, 'sl': _ip_options_site_local, 'site_local': _ip_options_site_local, 'unspecified': _ip_options_unspecified } if not options: return six.text_type(ip_obj) # IP version already checked options_list = [option.strip() for option in options.split(',')] for option, fun in options_fun_map.items(): if option in options_list: fun_res = fun(ip_obj, version) if not fun_res: return None # stop at first failed test # else continue return six.text_type(ip_obj) def _is_ipv(ip, version, options=None): if not version: version = 4 if version not in (4, 6): return None try: ip_obj = ipaddress.ip_address(ip) except ValueError: # maybe it is an IP network try: ip_obj = ipaddress.ip_interface(ip) except ValueError: # nope, still not :( return None if not ip_obj.version == version: return None # has the right version, let's move on return _ip_options(ip_obj, version, options=options) @jinja_filter('is_ipv4') def is_ipv4_filter(ip, options=None): ''' Returns a bool telling if the value passed to it was a valid IPv4 address. ip The IP address. net: False Consider IP addresses followed by netmask. options CSV of options regarding the nature of the IP address. E.g.: loopback, multicast, private etc. ''' _is_ipv4 = _is_ipv(ip, 4, options=options) return isinstance(_is_ipv4, six.string_types) @jinja_filter('is_ipv6') def is_ipv6_filter(ip, options=None): ''' Returns a bool telling if the value passed to it was a valid IPv6 address. ip The IP address. net: False Consider IP addresses followed by netmask. options CSV of options regarding the nature of the IP address. E.g.: loopback, multicast, private etc. ''' _is_ipv6 = _is_ipv(ip, 6, options=options) return isinstance(_is_ipv6, six.string_types) def _ipv_filter(value, version, options=None): if version not in (4, 6): return if isinstance(value, (six.string_types, six.text_type, six.binary_type)): return _is_ipv(value, version, options=options) # calls is_ipv4 or is_ipv6 for `value` elif isinstance(value, (list, tuple, types.GeneratorType)): # calls is_ipv4 or is_ipv6 for each element in the list # os it filters and returns only those elements having the desired IP version return [ _is_ipv(addr, version, options=options) for addr in value if _is_ipv(addr, version, options=options) is not None ] return None @jinja_filter('ipv4') def ipv4(value, options=None): ''' Filters a list and returns IPv4 values only. ''' return _ipv_filter(value, 4, options=options) @jinja_filter('ipv6') def ipv6(value, options=None): ''' Filters a list and returns IPv6 values only. ''' return _ipv_filter(value, 6, options=options) @jinja_filter('ipaddr') def ipaddr(value, options=None): ''' Filters and returns only valid IP objects. ''' ipv4_obj = ipv4(value, options=options) ipv6_obj = ipv6(value, options=options) if ipv4_obj is None or ipv6_obj is None: # an IP address can be either IPv4 either IPv6 # therefofe if the value passed as arg is not a list, at least one of the calls above will return None # if one of them is none, means that we should return only one of them return ipv4_obj or ipv6_obj # one of them else: return ipv4_obj + ipv6_obj # extend lists def _filter_ipaddr(value, options, version=None): ipaddr_filter_out = None if version: if version == 4: ipaddr_filter_out = ipv4(value, options) elif version == 6: ipaddr_filter_out = ipv6(value, options) else: ipaddr_filter_out = ipaddr(value, options) if not ipaddr_filter_out: return if not isinstance(ipaddr_filter_out, (list, tuple, types.GeneratorType)): ipaddr_filter_out = [ipaddr_filter_out] return ipaddr_filter_out @jinja_filter('ip_host') def ip_host(value, options=None, version=None): ''' Returns the interfaces IP address, e.g.: 192.168.0.1/28. ''' ipaddr_filter_out = _filter_ipaddr(value, options=options, version=version) if not ipaddr_filter_out: return if not isinstance(value, (list, tuple, types.GeneratorType)): return six.text_type(ipaddress.ip_interface(ipaddr_filter_out[0])) return [six.text_type(ipaddress.ip_interface(ip_a)) for ip_a in ipaddr_filter_out] def _network_hosts(ip_addr_entry): return [ six.text_type(host) for host in ipaddress.ip_network(ip_addr_entry, strict=False).hosts() ] @jinja_filter('network_hosts') def network_hosts(value, options=None, version=None): ''' Return the list of hosts within a network. .. note:: When running this command with a large IPv6 network, the command will take a long time to gather all of the hosts. ''' ipaddr_filter_out = _filter_ipaddr(value, options=options, version=version) if not ipaddr_filter_out: return if not isinstance(value, (list, tuple, types.GeneratorType)): return _network_hosts(ipaddr_filter_out[0]) return [ _network_hosts(ip_a) for ip_a in ipaddr_filter_out ] def _network_size(ip_addr_entry): return ipaddress.ip_network(ip_addr_entry, strict=False).num_addresses @jinja_filter('network_size') def network_size(value, options=None, version=None): ''' Get the size of a network. ''' ipaddr_filter_out = _filter_ipaddr(value, options=options, version=version) if not ipaddr_filter_out: return if not isinstance(value, (list, tuple, types.GeneratorType)): return _network_size(ipaddr_filter_out[0]) return [ _network_size(ip_a) for ip_a in ipaddr_filter_out ] def natural_ipv4_netmask(ip, fmt='prefixlen'): ''' Returns the "natural" mask of an IPv4 address ''' bits = _ipv4_to_bits(ip) if bits.startswith('11'): mask = '24' elif bits.startswith('1'): mask = '16' else: mask = '8' if fmt == 'netmask': return cidr_to_ipv4_netmask(mask) else: return '/' + mask def rpad_ipv4_network(ip): ''' Returns an IP network address padded with zeros. Ex: '192.168.3' -> '192.168.3.0' '10.209' -> '10.209.0.0' ''' return '.'.join(itertools.islice(itertools.chain(ip.split('.'), '0000'), 0, 4)) def cidr_to_ipv4_netmask(cidr_bits): ''' Returns an IPv4 netmask ''' try: cidr_bits = int(cidr_bits) if not 1 <= cidr_bits <= 32: return '' except ValueError: return '' netmask = '' for idx in range(4): if idx: netmask += '.' if cidr_bits >= 8: netmask += '255' cidr_bits -= 8 else: netmask += '{0:d}'.format(256 - (2 ** (8 - cidr_bits))) cidr_bits = 0 return netmask def _number_of_set_bits_to_ipv4_netmask(set_bits): # pylint: disable=C0103 ''' Returns an IPv4 netmask from the integer representation of that mask. Ex. 0xffffff00 -> '255.255.255.0' ''' return cidr_to_ipv4_netmask(_number_of_set_bits(set_bits)) # pylint: disable=C0103 def _number_of_set_bits(x): ''' Returns the number of bits that are set in a 32bit int ''' # Taken from http://stackoverflow.com/a/4912729. Many thanks! x -= (x >> 1) & 0x55555555 x = ((x >> 2) & 0x33333333) + (x & 0x33333333) x = ((x >> 4) + x) & 0x0f0f0f0f x += x >> 8 x += x >> 16 return x & 0x0000003f # pylint: enable=C0103 def _interfaces_ip(out): ''' Uses ip to return a dictionary of interfaces with various information about each (up/down state, ip address, netmask, and hwaddr) ''' ret = dict() def parse_network(value, cols): ''' Return a tuple of ip, netmask, broadcast based on the current set of cols ''' brd = None scope = None if '/' in value: # we have a CIDR in this address ip, cidr = value.split('/') # pylint: disable=C0103 else: ip = value # pylint: disable=C0103 cidr = 32 if type_ == 'inet': mask = cidr_to_ipv4_netmask(int(cidr)) if 'brd' in cols: brd = cols[cols.index('brd') + 1] elif type_ == 'inet6': mask = cidr if 'scope' in cols: scope = cols[cols.index('scope') + 1] return (ip, mask, brd, scope) groups = re.compile('\r?\n\\d').split(out) for group in groups: iface = None data = dict() for line in group.splitlines(): if ' ' not in line: continue match = re.match(r'^\d*:\s+([\w.\-]+)(?:@)?([\w.\-]+)?:\s+<(.+)>', line) if match: iface, parent, attrs = match.groups() if 'UP' in attrs.split(','): data['up'] = True else: data['up'] = False if parent: data['parent'] = parent continue cols = line.split() if len(cols) >= 2: type_, value = tuple(cols[0:2]) iflabel = cols[-1:][0] if type_ in ('inet', 'inet6'): if 'secondary' not in cols: ipaddr, netmask, broadcast, scope = parse_network(value, cols) if type_ == 'inet': if 'inet' not in data: data['inet'] = list() addr_obj = dict() addr_obj['address'] = ipaddr addr_obj['netmask'] = netmask addr_obj['broadcast'] = broadcast addr_obj['label'] = iflabel data['inet'].append(addr_obj) elif type_ == 'inet6': if 'inet6' not in data: data['inet6'] = list() addr_obj = dict() addr_obj['address'] = ipaddr addr_obj['prefixlen'] = netmask addr_obj['scope'] = scope data['inet6'].append(addr_obj) else: if 'secondary' not in data: data['secondary'] = list() ip_, mask, brd, scp = parse_network(value, cols) data['secondary'].append({ 'type': type_, 'address': ip_, 'netmask': mask, 'broadcast': brd, 'label': iflabel, }) del ip_, mask, brd, scp elif type_.startswith('link'): data['hwaddr'] = value if iface: ret[iface] = data del iface, data return ret def _interfaces_ifconfig(out): ''' Uses ifconfig to return a dictionary of interfaces with various information about each (up/down state, ip address, netmask, and hwaddr) ''' ret = dict() piface = re.compile(r'^([^\s:]+)') pmac = re.compile('.*?(?:HWaddr|ether|address:|lladdr) ([0-9a-fA-F:]+)') if salt.utils.platform.is_sunos(): pip = re.compile(r'.*?(?:inet\s+)([0-9]+\.[0-9]+\.[0-9]+\.[0-9]+)(.*)') pip6 = re.compile('.*?(?:inet6 )([0-9a-fA-F:]+)') pmask6 = re.compile(r'.*?(?:inet6 [0-9a-fA-F:]+/(\d+)).*') else: pip = re.compile(r'.*?(?:inet addr:|inet [^\d]*)(.*?)\s') pip6 = re.compile('.*?(?:inet6 addr: (.*?)/|inet6 )([0-9a-fA-F:]+)') pmask6 = re.compile(r'.*?(?:inet6 addr: [0-9a-fA-F:]+/(\d+)|prefixlen (\d+))(?: Scope:([a-zA-Z]+)| scopeid (0x[0-9a-fA-F]))?') pmask = re.compile(r'.*?(?:Mask:|netmask )(?:((?:0x)?[0-9a-fA-F]{8})|([\d\.]+))') pupdown = re.compile('UP') pbcast = re.compile(r'.*?(?:Bcast:|broadcast )([\d\.]+)') groups = re.compile('\r?\n(?=\\S)').split(out) for group in groups: data = dict() iface = '' updown = False for line in group.splitlines(): miface = piface.match(line) mmac = pmac.match(line) mip = pip.match(line) mip6 = pip6.match(line) mupdown = pupdown.search(line) if miface: iface = miface.group(1) if mmac: data['hwaddr'] = mmac.group(1) if salt.utils.platform.is_sunos(): expand_mac = [] for chunk in data['hwaddr'].split(':'): expand_mac.append('0{0}'.format(chunk) if len(chunk) < 2 else '{0}'.format(chunk)) data['hwaddr'] = ':'.join(expand_mac) if mip: if 'inet' not in data: data['inet'] = list() addr_obj = dict() addr_obj['address'] = mip.group(1) mmask = pmask.match(line) if mmask: if mmask.group(1): mmask = _number_of_set_bits_to_ipv4_netmask( int(mmask.group(1), 16)) else: mmask = mmask.group(2) addr_obj['netmask'] = mmask mbcast = pbcast.match(line) if mbcast: addr_obj['broadcast'] = mbcast.group(1) data['inet'].append(addr_obj) if mupdown: updown = True if mip6: if 'inet6' not in data: data['inet6'] = list() addr_obj = dict() addr_obj['address'] = mip6.group(1) or mip6.group(2) mmask6 = pmask6.match(line) if mmask6: addr_obj['prefixlen'] = mmask6.group(1) or mmask6.group(2) if not salt.utils.platform.is_sunos(): ipv6scope = mmask6.group(3) or mmask6.group(4) addr_obj['scope'] = ipv6scope.lower() if ipv6scope is not None else ipv6scope # SunOS sometimes has ::/0 as inet6 addr when using addrconf if not salt.utils.platform.is_sunos() \ or addr_obj['address'] != '::' \ and addr_obj['prefixlen'] != 0: data['inet6'].append(addr_obj) data['up'] = updown if iface in ret: # SunOS optimization, where interfaces occur twice in 'ifconfig -a' # output with the same name: for ipv4 and then for ipv6 addr family. # Every instance has it's own 'UP' status and we assume that ipv4 # status determines global interface status. # # merge items with higher priority for older values # after that merge the inet and inet6 sub items for both ret[iface] = dict(list(data.items()) + list(ret[iface].items())) if 'inet' in data: ret[iface]['inet'].extend(x for x in data['inet'] if x not in ret[iface]['inet']) if 'inet6' in data: ret[iface]['inet6'].extend(x for x in data['inet6'] if x not in ret[iface]['inet6']) else: ret[iface] = data del data return ret def linux_interfaces(): ''' Obtain interface information for *NIX/BSD variants ''' ifaces = dict() ip_path = salt.utils.path.which('ip') ifconfig_path = None if ip_path else salt.utils.path.which('ifconfig') if ip_path: cmd1 = subprocess.Popen( '{0} link show'.format(ip_path), shell=True, close_fds=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate()[0] cmd2 = subprocess.Popen( '{0} addr show'.format(ip_path), shell=True, close_fds=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate()[0] ifaces = _interfaces_ip("{0}\n{1}".format( salt.utils.stringutils.to_str(cmd1), salt.utils.stringutils.to_str(cmd2))) elif ifconfig_path: cmd = subprocess.Popen( '{0} -a'.format(ifconfig_path), shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate()[0] ifaces = _interfaces_ifconfig(salt.utils.stringutils.to_str(cmd)) return ifaces def _netbsd_interfaces_ifconfig(out): ''' Uses ifconfig to return a dictionary of interfaces with various information about each (up/down state, ip address, netmask, and hwaddr) ''' ret = dict() piface = re.compile(r'^([^\s:]+)') pmac = re.compile('.*?address: ([0-9a-f:]+)') pip = re.compile(r'.*?inet [^\d]*(.*?)/([\d]*)\s') pip6 = re.compile(r'.*?inet6 ([0-9a-f:]+)%([a-zA-Z0-9]*)/([\d]*)\s') pupdown = re.compile('UP') pbcast = re.compile(r'.*?broadcast ([\d\.]+)') groups = re.compile('\r?\n(?=\\S)').split(out) for group in groups: data = dict() iface = '' updown = False for line in group.splitlines(): miface = piface.match(line) mmac = pmac.match(line) mip = pip.match(line) mip6 = pip6.match(line) mupdown = pupdown.search(line) if miface: iface = miface.group(1) if mmac: data['hwaddr'] = mmac.group(1) if mip: if 'inet' not in data: data['inet'] = list() addr_obj = dict() addr_obj['address'] = mip.group(1) mmask = mip.group(2) if mip.group(2): addr_obj['netmask'] = cidr_to_ipv4_netmask(mip.group(2)) mbcast = pbcast.match(line) if mbcast: addr_obj['broadcast'] = mbcast.group(1) data['inet'].append(addr_obj) if mupdown: updown = True if mip6: if 'inet6' not in data: data['inet6'] = list() addr_obj = dict() addr_obj['address'] = mip6.group(1) mmask6 = mip6.group(3) addr_obj['scope'] = mip6.group(2) addr_obj['prefixlen'] = mip6.group(3) data['inet6'].append(addr_obj) data['up'] = updown ret[iface] = data del data return ret def netbsd_interfaces(): ''' Obtain interface information for NetBSD >= 8 where the ifconfig output diverged from other BSD variants (Netmask is now part of the address) ''' # NetBSD versions prior to 8.0 can still use linux_interfaces() if LooseVersion(os.uname()[2]) < LooseVersion('8.0'): return linux_interfaces() ifconfig_path = salt.utils.path.which('ifconfig') cmd = subprocess.Popen( '{0} -a'.format(ifconfig_path), shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate()[0] return _netbsd_interfaces_ifconfig(salt.utils.stringutils.to_str(cmd)) def _interfaces_ipconfig(out): ''' Returns a dictionary of interfaces with various information about each (up/down state, ip address, netmask, and hwaddr) NOTE: This is not used by any function and may be able to be removed in the future. ''' ifaces = dict() iface = None adapter_iface_regex = re.compile(r'adapter (\S.+):$') for line in out.splitlines(): if not line: continue # TODO what does Windows call Infiniband and 10/40gige adapters if line.startswith('Ethernet'): iface = ifaces[adapter_iface_regex.search(line).group(1)] iface['up'] = True addr = None continue if iface: key, val = line.split(',', 1) key = key.strip(' .') val = val.strip() if addr and key == 'Subnet Mask': addr['netmask'] = val elif key in ('IP Address', 'IPv4 Address'): if 'inet' not in iface: iface['inet'] = list() addr = {'address': val.rstrip('(Preferred)'), 'netmask': None, 'broadcast': None} # TODO find the broadcast iface['inet'].append(addr) elif 'IPv6 Address' in key: if 'inet6' not in iface: iface['inet'] = list() # XXX What is the prefixlen!? addr = {'address': val.rstrip('(Preferred)'), 'prefixlen': None} iface['inet6'].append(addr) elif key == 'Physical Address': iface['hwaddr'] = val elif key == 'Media State': # XXX seen used for tunnel adaptors # might be useful iface['up'] = (val != 'Media disconnected') def win_interfaces(): ''' Obtain interface information for Windows systems ''' with salt.utils.winapi.Com(): c = wmi.WMI() ifaces = {} for iface in c.Win32_NetworkAdapterConfiguration(IPEnabled=1): ifaces[iface.Description] = dict() if iface.MACAddress: ifaces[iface.Description]['hwaddr'] = iface.MACAddress if iface.IPEnabled: ifaces[iface.Description]['up'] = True for ip in iface.IPAddress: if '.' in ip: if 'inet' not in ifaces[iface.Description]: ifaces[iface.Description]['inet'] = [] item = {'address': ip, 'label': iface.Description} if iface.DefaultIPGateway: broadcast = next((i for i in iface.DefaultIPGateway if '.' in i), '') if broadcast: item['broadcast'] = broadcast if iface.IPSubnet: netmask = next((i for i in iface.IPSubnet if '.' in i), '') if netmask: item['netmask'] = netmask ifaces[iface.Description]['inet'].append(item) if ':' in ip: if 'inet6' not in ifaces[iface.Description]: ifaces[iface.Description]['inet6'] = [] item = {'address': ip} if iface.DefaultIPGateway: broadcast = next((i for i in iface.DefaultIPGateway if ':' in i), '') if broadcast: item['broadcast'] = broadcast if iface.IPSubnet: netmask = next((i for i in iface.IPSubnet if ':' in i), '') if netmask: item['netmask'] = netmask ifaces[iface.Description]['inet6'].append(item) else: ifaces[iface.Description]['up'] = False return ifaces def interfaces(): ''' Return a dictionary of information about all the interfaces on the minion ''' if salt.utils.platform.is_windows(): return win_interfaces() elif salt.utils.platform.is_netbsd(): return netbsd_interfaces() else: return linux_interfaces() def get_net_start(ipaddr, netmask): ''' Return the address of the network ''' net = ipaddress.ip_network('{0}/{1}'.format(ipaddr, netmask), strict=False) return six.text_type(net.network_address) def get_net_size(mask): ''' Turns an IPv4 netmask into it's corresponding prefix length (255.255.255.0 -> 24 as in 192.168.1.10/24). ''' binary_str = '' for octet in mask.split('.'): binary_str += bin(int(octet))[2:].zfill(8) return len(binary_str.rstrip('0')) def calc_net(ipaddr, netmask=None): ''' Takes IP (CIDR notation supported) and optionally netmask and returns the network in CIDR-notation. (The IP can be any IP inside the subnet) ''' if netmask is not None: ipaddr = '{0}/{1}'.format(ipaddr, netmask) return six.text_type(ipaddress.ip_network(ipaddr, strict=False)) def _ipv4_to_bits(ipaddr): ''' Accepts an IPv4 dotted quad and returns a string representing its binary counterpart ''' return ''.join([bin(int(x))[2:].rjust(8, '0') for x in ipaddr.split('.')]) def _get_iface_info(iface): ''' If `iface` is available, return interface info and no error, otherwise return no info and log and return an error ''' iface_info = interfaces() if iface in iface_info.keys(): return iface_info, False else: error_msg = ('Interface "{0}" not in available interfaces: "{1}"' ''.format(iface, '", "'.join(iface_info.keys()))) log.error(error_msg) return None, error_msg def _hw_addr_aix(iface): ''' Return the hardware address (a.k.a. MAC address) for a given interface on AIX MAC address not available in through interfaces ''' cmd = subprocess.Popen( 'entstat -d {0} | grep \'Hardware Address\''.format(iface), shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate()[0] if cmd: comps = cmd.split(' ') if len(comps) == 3: mac_addr = comps[2].strip('\'').strip() return mac_addr error_msg = ('Interface "{0}" either not available or does not contain a hardware address'.format(iface)) log.error(error_msg) return error_msg def hw_addr(iface): ''' Return the hardware address (a.k.a. MAC address) for a given interface .. versionchanged:: 2016.11.4 Added support for AIX ''' if salt.utils.platform.is_aix(): return _hw_addr_aix iface_info, error = _get_iface_info(iface) if error is False: return iface_info.get(iface, {}).get('hwaddr', '') else: return error def interface(iface): ''' Return the details of `iface` or an error if it does not exist ''' iface_info, error = _get_iface_info(iface) if error is False: return iface_info.get(iface, {}).get('inet', '') else: return error def interface_ip(iface): ''' Return `iface` IPv4 addr or an error if `iface` does not exist ''' iface_info, error = _get_iface_info(iface) if error is False: inet = iface_info.get(iface, {}).get('inet', None) return inet[0].get('address', '') if inet else '' else: return error def _subnets(proto='inet', interfaces_=None): ''' Returns a list of subnets to which the host belongs ''' if interfaces_ is None: ifaces = interfaces() elif isinstance(interfaces_, list): ifaces = {} for key, value in six.iteritems(interfaces()): if key in interfaces_: ifaces[key] = value else: ifaces = {interfaces_: interfaces().get(interfaces_, {})} ret = set() if proto == 'inet': subnet = 'netmask' dflt_cidr = 32 elif proto == 'inet6': subnet = 'prefixlen' dflt_cidr = 128 else: log.error('Invalid proto %s calling subnets()', proto) return for ip_info in six.itervalues(ifaces): addrs = ip_info.get(proto, []) addrs.extend([addr for addr in ip_info.get('secondary', []) if addr.get('type') == proto]) for intf in addrs: if subnet in intf: intf = ipaddress.ip_interface('{0}/{1}'.format(intf['address'], intf[subnet])) else: intf = ipaddress.ip_interface('{0}/{1}'.format(intf['address'], dflt_cidr)) if not intf.is_loopback: ret.add(intf.network) return [six.text_type(net) for net in sorted(ret)] def subnets(interfaces=None): ''' Returns a list of IPv4 subnets to which the host belongs ''' return _subnets('inet', interfaces_=interfaces) def subnets6(): ''' Returns a list of IPv6 subnets to which the host belongs ''' return _subnets('inet6') def in_subnet(cidr, addr=None): ''' Returns True if host or (any of) addrs is within specified subnet, otherwise False ''' try: cidr = ipaddress.ip_network(cidr) except ValueError: log.error('Invalid CIDR \'%s\'', cidr) return False if addr is None: addr = ip_addrs() addr.extend(ip_addrs6()) elif not isinstance(addr, (list, tuple)): addr = (addr,) return any(ipaddress.ip_address(item) in cidr for item in addr) def _ip_addrs(interface=None, include_loopback=False, interface_data=None, proto='inet'): ''' Return the full list of IP adresses matching the criteria proto = inet|inet6 ''' ret = set() ifaces = interface_data \ if isinstance(interface_data, dict) \ else interfaces() if interface is None: target_ifaces = ifaces else: target_ifaces = dict([(k, v) for k, v in six.iteritems(ifaces) if k == interface]) if not target_ifaces: log.error('Interface %s not found.', interface) for ip_info in six.itervalues(target_ifaces): addrs = ip_info.get(proto, []) addrs.extend([addr for addr in ip_info.get('secondary', []) if addr.get('type') == proto]) for addr in addrs: addr = ipaddress.ip_address(addr.get('address')) if not addr.is_loopback or include_loopback: ret.add(addr) return [six.text_type(addr) for addr in sorted(ret)] def ip_addrs(interface=None, include_loopback=False, interface_data=None): ''' Returns a list of IPv4 addresses assigned to the host. 127.0.0.1 is ignored, unless 'include_loopback=True' is indicated. If 'interface' is provided, then only IP addresses from that interface will be returned. ''' return _ip_addrs(interface, include_loopback, interface_data, 'inet') def ip_addrs6(interface=None, include_loopback=False, interface_data=None): ''' Returns a list of IPv6 addresses assigned to the host. ::1 is ignored, unless 'include_loopback=True' is indicated. If 'interface' is provided, then only IP addresses from that interface will be returned. ''' return _ip_addrs(interface, include_loopback, interface_data, 'inet6') def hex2ip(hex_ip, invert=False): ''' Convert a hex string to an ip, if a failure occurs the original hex is returned. If 'invert=True' assume that ip from /proc/net/<proto> ''' if len(hex_ip) == 32: # ipv6 ip = [] for i in range(0, 32, 8): ip_part = hex_ip[i:i + 8] ip_part = [ip_part[x:x + 2] for x in range(0, 8, 2)] if invert: ip.append("{0[3]}{0[2]}:{0[1]}{0[0]}".format(ip_part)) else: ip.append("{0[0]}{0[1]}:{0[2]}{0[3]}".format(ip_part)) try: address = ipaddress.IPv6Address(":".join(ip)) if address.ipv4_mapped: return str(address.ipv4_mapped) else: return address.compressed except ipaddress.AddressValueError as ex: log.error('hex2ip - ipv6 address error: %s', ex) return hex_ip try: hip = int(hex_ip, 16) except ValueError: return hex_ip if invert: return '{3}.{2}.{1}.{0}'.format(hip >> 24 & 255, hip >> 16 & 255, hip >> 8 & 255, hip & 255) return '{0}.{1}.{2}.{3}'.format(hip >> 24 & 255, hip >> 16 & 255, hip >> 8 & 255, hip & 255) def mac2eui64(mac, prefix=None): ''' Convert a MAC address to a EUI64 identifier or, with prefix provided, a full IPv6 address ''' # http://tools.ietf.org/html/rfc4291#section-2.5.1 eui64 = re.sub(r'[.:-]', '', mac).lower() eui64 = eui64[0:6] + 'fffe' + eui64[6:] eui64 = hex(int(eui64[0:2], 16) | 2)[2:].zfill(2) + eui64[2:] if prefix is None: return ':'.join(re.findall(r'.{4}', eui64)) else: try: net = ipaddress.ip_network(prefix, strict=False) euil = int('0x{0}'.format(eui64), 16) return '{0}/{1}'.format(net[euil], net.prefixlen) except Exception: return def active_tcp(): ''' Return a dict describing all active tcp connections as quickly as possible ''' ret = {} for statf in ['/proc/net/tcp', '/proc/net/tcp6']: if os.path.isfile(statf): with salt.utils.files.fopen(statf, 'rb') as fp_: for line in fp_: line = salt.utils.stringutils.to_unicode(line) if line.strip().startswith('sl'): continue iret = _parse_tcp_line(line) sl = next(iter(iret)) if iret[sl]['state'] == 1: # 1 is ESTABLISHED del iret[sl]['state'] ret[len(ret)] = iret[sl] return ret def local_port_tcp(port): ''' Return a set of remote ip addrs attached to the specified local port ''' ret = _remotes_on(port, 'local_port') return ret def remote_port_tcp(port): ''' Return a set of ip addrs the current host is connected to on given port ''' ret = _remotes_on(port, 'remote_port') return ret def _remotes_on(port, which_end): ''' Return a set of ip addrs active tcp connections ''' port = int(port) ret = _netlink_tool_remote_on(port, which_end) if ret is not None: return ret ret = set() proc_available = False for statf in ['/proc/net/tcp', '/proc/net/tcp6']: if os.path.isfile(statf): proc_available = True with salt.utils.files.fopen(statf, 'r') as fp_: for line in fp_: line = salt.utils.stringutils.to_unicode(line) if line.strip().startswith('sl'): continue iret = _parse_tcp_line(line) sl = next(iter(iret)) if iret[sl][which_end] == port and iret[sl]['state'] == 1: # 1 is ESTABLISHED ret.add(iret[sl]['remote_addr']) if not proc_available: # Fallback to use OS specific tools if salt.utils.platform.is_sunos(): return _sunos_remotes_on(port, which_end) if salt.utils.platform.is_freebsd(): return _freebsd_remotes_on(port, which_end) if salt.utils.platform.is_netbsd(): return _netbsd_remotes_on(port, which_end) if salt.utils.platform.is_openbsd(): return _openbsd_remotes_on(port, which_end) if salt.utils.platform.is_windows(): return _windows_remotes_on(port, which_end) if salt.utils.platform.is_aix(): return _aix_remotes_on(port, which_end) return _linux_remotes_on(port, which_end) return ret def _parse_tcp_line(line): ''' Parse a single line from the contents of /proc/net/tcp or /proc/net/tcp6 ''' ret = {} comps = line.strip().split() sl = comps[0].rstrip(':') ret[sl] = {} l_addr, l_port = comps[1].split(':') r_addr, r_port = comps[2].split(':') ret[sl]['local_addr'] = hex2ip(l_addr, True) ret[sl]['local_port'] = int(l_port, 16) ret[sl]['remote_addr'] = hex2ip(r_addr, True) ret[sl]['remote_port'] = int(r_port, 16) ret[sl]['state'] = int(comps[3], 16) return ret def _netlink_tool_remote_on(port, which_end): ''' Returns set of ipv4 host addresses of remote established connections on local or remote tcp port. Parses output of shell 'ss' to get connections [root@salt-master ~]# ss -ant State Recv-Q Send-Q Local Address:Port Peer Address:Port LISTEN 0 511 *:80 *:* LISTEN 0 128 *:22 *:* ESTAB 0 0 127.0.0.1:56726 127.0.0.1:4505 ''' remotes = set() valid = False try: data = subprocess.check_output(['ss', '-ant']) # pylint: disable=minimum-python-version except subprocess.CalledProcessError: log.error('Failed ss') raise except OSError: # not command "No such file or directory" return None lines = salt.utils.stringutils.to_str(data).split('\n') for line in lines: if 'Address:Port' in line: # ss tools may not be valid valid = True continue elif 'ESTAB' not in line: continue chunks = line.split() local_host, local_port = chunks[3].split(':', 1) remote_host, remote_port = chunks[4].split(':', 1) if which_end == 'remote_port' and int(remote_port) != port: continue if which_end == 'local_port' and int(local_port) != port: continue remotes.add(remote_host) if valid is False: remotes = None return remotes def _sunos_remotes_on(port, which_end): ''' SunOS specific helper function. Returns set of ipv4 host addresses of remote established connections on local or remote tcp port. Parses output of shell 'netstat' to get connections [root@salt-master ~]# netstat -f inet -n TCP: IPv4 Local Address Remote Address Swind Send-Q Rwind Recv-Q State -------------------- -------------------- ----- ------ ----- ------ ----------- 10.0.0.101.4505 10.0.0.1.45329 1064800 0 1055864 0 ESTABLISHED 10.0.0.101.4505 10.0.0.100.50798 1064800 0 1055864 0 ESTABLISHED ''' remotes = set() try: data = subprocess.check_output(['netstat', '-f', 'inet', '-n']) # pylint: disable=minimum-python-version except subprocess.CalledProcessError: log.error('Failed netstat') raise lines = salt.utils.stringutils.to_str(data).split('\n') for line in lines: if 'ESTABLISHED' not in line: continue chunks = line.split() local_host, local_port = chunks[0].rsplit('.', 1) remote_host, remote_port = chunks[1].rsplit('.', 1) if which_end == 'remote_port' and int(remote_port) != port: continue if which_end == 'local_port' and int(local_port) != port: continue remotes.add(remote_host) return remotes def _freebsd_remotes_on(port, which_end): ''' Returns set of ipv4 host addresses of remote established connections on local tcp port port. Parses output of shell 'sockstat' (FreeBSD) to get connections $ sudo sockstat -4 USER COMMAND PID FD PROTO LOCAL ADDRESS FOREIGN ADDRESS root python2.7 1456 29 tcp4 *:4505 *:* root python2.7 1445 17 tcp4 *:4506 *:* root python2.7 1294 14 tcp4 127.0.0.1:11813 127.0.0.1:4505 root python2.7 1294 41 tcp4 127.0.0.1:61115 127.0.0.1:4506 $ sudo sockstat -4 -c -p 4506 USER COMMAND PID FD PROTO LOCAL ADDRESS FOREIGN ADDRESS root python2.7 1294 41 tcp4 127.0.0.1:61115 127.0.0.1:4506 ''' port = int(port) remotes = set() try: cmd = salt.utils.args.shlex_split('sockstat -4 -c -p {0}'.format(port)) data = subprocess.check_output(cmd) # pylint: disable=minimum-python-version except subprocess.CalledProcessError as ex: log.error('Failed "sockstat" with returncode = %s', ex.returncode) raise lines = salt.utils.stringutils.to_str(data).split('\n') for line in lines: chunks = line.split() if not chunks: continue # ['root', 'python2.7', '1456', '37', 'tcp4', # '127.0.0.1:4505-', '127.0.0.1:55703'] # print chunks if 'COMMAND' in chunks[1]: continue # ignore header if len(chunks) < 2: continue # sockstat -4 -c -p 4506 does this with high PIDs: # USER COMMAND PID FD PROTO LOCAL ADDRESS FOREIGN ADDRESS # salt-master python2.781106 35 tcp4 192.168.12.34:4506 192.168.12.45:60143 local = chunks[-2] remote = chunks[-1] lhost, lport = local.split(':') rhost, rport = remote.split(':') if which_end == 'local' and int(lport) != port: # ignore if local port not port continue if which_end == 'remote' and int(rport) != port: # ignore if remote port not port continue remotes.add(rhost) return remotes def _netbsd_remotes_on(port, which_end): ''' Returns set of ipv4 host addresses of remote established connections on local tcp port port. Parses output of shell 'sockstat' (NetBSD) to get connections $ sudo sockstat -4 -n USER COMMAND PID FD PROTO LOCAL ADDRESS FOREIGN ADDRESS root python2.7 1456 29 tcp *.4505 *.* root python2.7 1445 17 tcp *.4506 *.* root python2.7 1294 14 tcp 127.0.0.1.11813 127.0.0.1.4505 root python2.7 1294 41 tcp 127.0.0.1.61115 127.0.0.1.4506 $ sudo sockstat -4 -c -n -p 4506 USER COMMAND PID FD PROTO LOCAL ADDRESS FOREIGN ADDRESS root python2.7 1294 41 tcp 127.0.0.1.61115 127.0.0.1.4506 ''' port = int(port) remotes = set() try: cmd = salt.utils.args.shlex_split('sockstat -4 -c -n -p {0}'.format(port)) data = subprocess.check_output(cmd) # pylint: disable=minimum-python-version except subprocess.CalledProcessError as ex: log.error('Failed "sockstat" with returncode = %s', ex.returncode) raise lines = salt.utils.stringutils.to_str(data).split('\n') for line in lines: chunks = line.split() if not chunks: continue # ['root', 'python2.7', '1456', '37', 'tcp', # '127.0.0.1.4505-', '127.0.0.1.55703'] # print chunks if 'COMMAND' in chunks[1]: continue # ignore header if len(chunks) < 2: continue local = chunks[5].split('.') lport = local.pop() lhost = '.'.join(local) remote = chunks[6].split('.') rport = remote.pop() rhost = '.'.join(remote) if which_end == 'local' and int(lport) != port: # ignore if local port not port continue if which_end == 'remote' and int(rport) != port: # ignore if remote port not port continue remotes.add(rhost) return remotes def _openbsd_remotes_on(port, which_end): ''' OpenBSD specific helper function. Returns set of ipv4 host addresses of remote established connections on local or remote tcp port. Parses output of shell 'netstat' to get connections $ netstat -nf inet Active Internet connections Proto Recv-Q Send-Q Local Address Foreign Address (state) tcp 0 0 10.0.0.101.4505 10.0.0.1.45329 ESTABLISHED tcp 0 0 10.0.0.101.4505 10.0.0.100.50798 ESTABLISHED ''' remotes = set() try: data = subprocess.check_output(['netstat', '-nf', 'inet']) # pylint: disable=minimum-python-version except subprocess.CalledProcessError: log.error('Failed netstat') raise lines = data.split('\n') for line in lines: if 'ESTABLISHED' not in line: continue chunks = line.split() local_host, local_port = chunks[3].rsplit('.', 1) remote_host, remote_port = chunks[4].rsplit('.', 1) if which_end == 'remote_port' and int(remote_port) != port: continue if which_end == 'local_port' and int(local_port) != port: continue remotes.add(remote_host) return remotes def _windows_remotes_on(port, which_end): r''' Windows specific helper function. Returns set of ipv4 host addresses of remote established connections on local or remote tcp port. Parses output of shell 'netstat' to get connections C:\>netstat -n Active Connections Proto Local Address Foreign Address State TCP 10.2.33.17:3007 130.164.12.233:10123 ESTABLISHED TCP 10.2.33.17:3389 130.164.30.5:10378 ESTABLISHED ''' remotes = set() try: data = subprocess.check_output(['netstat', '-n']) # pylint: disable=minimum-python-version except subprocess.CalledProcessError: log.error('Failed netstat') raise lines = salt.utils.stringutils.to_str(data).split('\n') for line in lines: if 'ESTABLISHED' not in line: continue chunks = line.split() local_host, local_port = chunks[1].rsplit(':', 1) remote_host, remote_port = chunks[2].rsplit(':', 1) if which_end == 'remote_port' and int(remote_port) != port: continue if which_end == 'local_port' and int(local_port) != port: continue remotes.add(remote_host) return remotes def _linux_remotes_on(port, which_end): ''' Linux specific helper function. Returns set of ip host addresses of remote established connections on local tcp port port. Parses output of shell 'lsof' to get connections $ sudo lsof -iTCP:4505 -n COMMAND PID USER FD TYPE DEVICE SIZE/OFF NODE NAME Python 9971 root 35u IPv4 0x18a8464a29ca329d 0t0 TCP *:4505 (LISTEN) Python 9971 root 37u IPv4 0x18a8464a29b2b29d 0t0 TCP 127.0.0.1:4505->127.0.0.1:55703 (ESTABLISHED) Python 10152 root 22u IPv4 0x18a8464a29c8cab5 0t0 TCP 127.0.0.1:55703->127.0.0.1:4505 (ESTABLISHED) Python 10153 root 22u IPv4 0x18a8464a29c8cab5 0t0 TCP [fe80::249a]:4505->[fe80::150]:59367 (ESTABLISHED) ''' remotes = set() try: data = subprocess.check_output( ['lsof', '-iTCP:{0:d}'.format(port), '-n', '-P'] # pylint: disable=minimum-python-version ) except subprocess.CalledProcessError as ex: if ex.returncode == 1: # Lsof return 1 if any error was detected, including the failure # to locate Internet addresses, and it is not an error in this case. log.warning('"lsof" returncode = 1, likely no active TCP sessions.') return remotes log.error('Failed "lsof" with returncode = %s', ex.returncode) raise lines = salt.utils.stringutils.to_str(data).split('\n') for line in lines: chunks = line.split() if not chunks: continue # ['Python', '9971', 'root', '37u', 'IPv4', '0x18a8464a29b2b29d', '0t0', # 'TCP', '127.0.0.1:4505->127.0.0.1:55703', '(ESTABLISHED)'] # print chunks if 'COMMAND' in chunks[0]: continue # ignore header if 'ESTABLISHED' not in chunks[-1]: continue # ignore if not ESTABLISHED # '127.0.0.1:4505->127.0.0.1:55703' local, remote = chunks[8].split('->') _, lport = local.rsplit(':', 1) rhost, rport = remote.rsplit(':', 1) if which_end == 'remote_port' and int(rport) != port: continue if which_end == 'local_port' and int(lport) != port: continue remotes.add(rhost.strip("[]")) return remotes def _aix_remotes_on(port, which_end): ''' AIX specific helper function. Returns set of ipv4 host addresses of remote established connections on local or remote tcp port. Parses output of shell 'netstat' to get connections root@la68pp002_pub:/opt/salt/lib/python2.7/site-packages/salt/modules# netstat -f inet -n Active Internet connections Proto Recv-Q Send-Q Local Address Foreign Address (state) tcp4 0 0 172.29.149.95.50093 209.41.78.13.4505 ESTABLISHED tcp4 0 0 127.0.0.1.9514 *.* LISTEN tcp4 0 0 127.0.0.1.9515 *.* LISTEN tcp4 0 0 127.0.0.1.199 127.0.0.1.32779 ESTABLISHED tcp4 0 0 127.0.0.1.32779 127.0.0.1.199 ESTABLISHED tcp4 0 40 172.29.149.95.22 172.29.96.83.41022 ESTABLISHED tcp4 0 0 172.29.149.95.22 172.29.96.83.41032 ESTABLISHED tcp4 0 0 127.0.0.1.32771 127.0.0.1.32775 ESTABLISHED tcp 0 0 127.0.0.1.32775 127.0.0.1.32771 ESTABLISHED tcp4 0 0 127.0.0.1.32771 127.0.0.1.32776 ESTABLISHED tcp 0 0 127.0.0.1.32776 127.0.0.1.32771 ESTABLISHED tcp4 0 0 127.0.0.1.32771 127.0.0.1.32777 ESTABLISHED tcp 0 0 127.0.0.1.32777 127.0.0.1.32771 ESTABLISHED tcp4 0 0 127.0.0.1.32771 127.0.0.1.32778 ESTABLISHED tcp 0 0 127.0.0.1.32778 127.0.0.1.32771 ESTABLISHED ''' remotes = set() try: data = subprocess.check_output(['netstat', '-f', 'inet', '-n']) # pylint: disable=minimum-python-version except subprocess.CalledProcessError: log.error('Failed netstat') raise lines = salt.utils.stringutils.to_str(data).split('\n') for line in lines: if 'ESTABLISHED' not in line: continue chunks = line.split() local_host, local_port = chunks[3].rsplit('.', 1) remote_host, remote_port = chunks[4].rsplit('.', 1) if which_end == 'remote_port' and int(remote_port) != port: continue if which_end == 'local_port' and int(local_port) != port: continue remotes.add(remote_host) return remotes @jinja_filter('gen_mac') def gen_mac(prefix='AC:DE:48'): ''' Generates a MAC address with the defined OUI prefix. Common prefixes: - ``00:16:3E`` -- Xen - ``00:18:51`` -- OpenVZ - ``00:50:56`` -- VMware (manually generated) - ``52:54:00`` -- QEMU/KVM - ``AC:DE:48`` -- PRIVATE References: - http://standards.ieee.org/develop/regauth/oui/oui.txt - https://www.wireshark.org/tools/oui-lookup.html - https://en.wikipedia.org/wiki/MAC_address ''' return '{0}:{1:02X}:{2:02X}:{3:02X}'.format(prefix, random.randint(0, 0xff), random.randint(0, 0xff), random.randint(0, 0xff)) @jinja_filter('mac_str_to_bytes') def mac_str_to_bytes(mac_str): ''' Convert a MAC address string into bytes. Works with or without separators: b1 = mac_str_to_bytes('08:00:27:13:69:77') b2 = mac_str_to_bytes('080027136977') assert b1 == b2 assert isinstance(b1, bytes) ''' if len(mac_str) == 12: pass elif len(mac_str) == 17: sep = mac_str[2] mac_str = mac_str.replace(sep, '') else: raise ValueError('Invalid MAC address') chars = (int(mac_str[s:s+2], 16) for s in range(0, 12, 2)) return bytes(chars) if six.PY3 else b''.join(chr(x) for x in chars) def refresh_dns(): ''' issue #21397: force glibc to re-read resolv.conf ''' try: res_init() except NameError: # Exception raised loading the library, thus res_init is not defined pass @jinja_filter('connection_check') def connection_check(addr, port=80, safe=False, ipv6=None): ''' Provides a convenient alias for the dns_check filter. ''' return dns_check(addr, port, safe, ipv6) @jinja_filter('dns_check') def dns_check(addr, port=80, safe=False, ipv6=None, attempt_connect=True): ''' Return the ip resolved by dns, but do not exit on failure, only raise an exception. Obeys system preference for IPv4/6 address resolution - this can be overridden by the ipv6 flag. Tries to connect to the address before considering it useful. If no address can be reached, the first one resolved is used as a fallback. ''' error = False lookup = addr seen_ipv6 = False family = socket.AF_INET6 if ipv6 else socket.AF_INET if ipv6 is False else socket.AF_UNSPEC hostnames = [] try: refresh_dns() hostnames = socket.getaddrinfo(addr, port, family, socket.SOCK_STREAM) except TypeError: err = ('Attempt to resolve address \'{0}\' failed. Invalid or unresolveable address').format(lookup) raise SaltSystemExit(code=42, msg=err) except socket.error: error = True # If ipv6 is set to True, attempt another lookup using the IPv4 family, # just in case we're attempting to lookup an IPv4 IP # as an IPv6 hostname. if error and ipv6: try: refresh_dns() hostnames = socket.getaddrinfo(addr, port, socket.AF_INET, socket.SOCK_STREAM) except TypeError: err = ('Attempt to resolve address \'{0}\' failed. Invalid or unresolveable address').format(lookup) raise SaltSystemExit(code=42, msg=err) except socket.error: error = True try: if not hostnames: error = True else: resolved = False candidates = [] for h in hostnames: # Input is IP address, passed through unchanged, just return it if h[4][0] == addr: resolved = salt.utils.zeromq.ip_bracket(addr) break candidate_addr = salt.utils.zeromq.ip_bracket(h[4][0]) # sometimes /etc/hosts contains ::1 localhost if not ipv6 and candidate_addr == '[::1]': continue candidates.append(candidate_addr) if attempt_connect: try: s = socket.socket(h[0], socket.SOCK_STREAM) s.settimeout(2) s.connect((candidate_addr.strip('[]'), h[4][1])) s.close() resolved = candidate_addr break except socket.error: pass if not resolved: if candidates: resolved = candidates[0] else: error = True except TypeError: err = ('Attempt to resolve address \'{0}\' failed. Invalid or unresolveable address').format(lookup) raise SaltSystemExit(code=42, msg=err) except socket.error: error = True if error: err = ('DNS lookup or connection check of \'{0}\' failed.').format(addr) if safe: if salt.log.is_console_configured(): # If logging is not configured it also means that either # the master or minion instance calling this hasn't even # started running log.error(err) raise SaltClientError() raise SaltSystemExit(code=42, msg=err) return resolved def parse_host_port(host_port): """ Takes a string argument specifying host or host:port. Returns a (hostname, port) or (ip_address, port) tuple. If no port is given, the second (port) element of the returned tuple will be None. host:port argument, for example, is accepted in the forms of: - hostname - hostname:1234 - hostname.domain.tld - hostname.domain.tld:5678 - [1234::5]:5678 - 1234::5 - 10.11.12.13:4567 - 10.11.12.13 """ host, port = None, None # default _s_ = host_port[:] if _s_[0] == "[": if "]" in host_port: host, _s_ = _s_.lstrip("[").rsplit("]", 1) host = ipaddress.IPv6Address(host).compressed if _s_[0] == ":": port = int(_s_.lstrip(":")) else: if len(_s_) > 1: raise ValueError('found ambiguous "{}" port in "{}"'.format(_s_, host_port)) else: if _s_.count(":") == 1: host, _hostport_separator_, port = _s_.partition(":") try: port = int(port) except ValueError as _e_: log.error('host_port "%s" port value "%s" is not an integer.', host_port, port) raise _e_ else: host = _s_ try: if not isinstance(host, ipaddress._BaseAddress): host_ip = ipaddress.ip_address(host).compressed host = host_ip except ValueError: log.debug('"%s" Not an IP address? Assuming it is a hostname.', host) if host != sanitize_host(host): log.error('bad hostname: "%s"', host) raise ValueError('bad hostname: "{}"'.format(host)) return host, port def is_fqdn(hostname): """ Verify if hostname conforms to be a FQDN. :param hostname: text string with the name of the host :return: bool, True if hostname is correct FQDN, False otherwise """ compliant = re.compile(r"(?!-)[A-Z\d\-\_]{1,63}(?<!-)$", re.IGNORECASE) return "." in hostname and len(hostname) < 0xff and all(compliant.match(x) for x in hostname.rstrip(".").split("."))
saltstack/salt
salt/utils/network.py
is_ip_filter
python
def is_ip_filter(ip, options=None): ''' Returns a bool telling if the passed IP is a valid IPv4 or IPv6 address. ''' return is_ipv4_filter(ip, options=options) or is_ipv6_filter(ip, options=options)
Returns a bool telling if the passed IP is a valid IPv4 or IPv6 address.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/network.py#L311-L315
null
# -*- coding: utf-8 -*- ''' Define some generic socket functions for network modules ''' # Import python libs from __future__ import absolute_import, unicode_literals, print_function import itertools import os import re import types import socket import logging import platform import random import subprocess from string import ascii_letters, digits # Import 3rd-party libs from salt.ext import six from salt.ext.six.moves import range # pylint: disable=import-error,redefined-builtin # Attempt to import wmi try: import wmi import salt.utils.winapi except ImportError: pass # Import salt libs import salt.utils.args import salt.utils.files import salt.utils.path import salt.utils.platform import salt.utils.stringutils import salt.utils.zeromq from salt._compat import ipaddress from salt.exceptions import SaltClientError, SaltSystemExit from salt.utils.decorators.jinja import jinja_filter from salt.utils.versions import LooseVersion # inet_pton does not exist in Windows, this is a workaround if salt.utils.platform.is_windows(): from salt.ext import win_inet_pton # pylint: disable=unused-import log = logging.getLogger(__name__) try: import ctypes import ctypes.util libc = ctypes.cdll.LoadLibrary(ctypes.util.find_library("c")) res_init = libc.__res_init except (ImportError, OSError, AttributeError, TypeError): pass # pylint: disable=C0103 def sanitize_host(host): ''' Sanitize host string. https://tools.ietf.org/html/rfc1123#section-2.1 ''' RFC952_characters = ascii_letters + digits + ".-" return "".join([c for c in host[0:255] if c in RFC952_characters]) def isportopen(host, port): ''' Return status of a port ''' if not 1 <= int(port) <= 65535: return False sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) out = sock.connect_ex((sanitize_host(host), int(port))) return out def host_to_ips(host): ''' Returns a list of IP addresses of a given hostname or None if not found. ''' ips = [] try: for family, socktype, proto, canonname, sockaddr in socket.getaddrinfo( host, 0, socket.AF_UNSPEC, socket.SOCK_STREAM): if family == socket.AF_INET: ip, port = sockaddr elif family == socket.AF_INET6: ip, port, flow_info, scope_id = sockaddr ips.append(ip) if not ips: ips = None except Exception: ips = None return ips def _generate_minion_id(): ''' Get list of possible host names and convention names. :return: ''' # There are three types of hostnames: # 1. Network names. How host is accessed from the network. # 2. Host aliases. They might be not available in all the network or only locally (/etc/hosts) # 3. Convention names, an internal nodename. class DistinctList(list): ''' List, which allows one to append only distinct objects. Needs to work on Python 2.6, because of collections.OrderedDict only since 2.7 version. Override 'filter()' for custom filtering. ''' localhost_matchers = [r'localhost.*', r'ip6-.*', r'127[.]\d', r'0\.0\.0\.0', r'::1.*', r'ipv6-.*', r'fe00::.*', r'fe02::.*', r'1.0.0.*.ip6.arpa'] def append(self, p_object): if p_object and p_object not in self and not self.filter(p_object): super(DistinctList, self).append(p_object) return self def extend(self, iterable): for obj in iterable: self.append(obj) return self def filter(self, element): 'Returns True if element needs to be filtered' for rgx in self.localhost_matchers: if re.match(rgx, element): return True def first(self): return self and self[0] or None hostname = socket.gethostname() hosts = DistinctList().append( salt.utils.stringutils.to_unicode(socket.getfqdn(salt.utils.stringutils.to_bytes(hostname))) ).append(platform.node()).append(hostname) if not hosts: try: for a_nfo in socket.getaddrinfo(hosts.first() or 'localhost', None, socket.AF_INET, socket.SOCK_RAW, socket.IPPROTO_IP, socket.AI_CANONNAME): if len(a_nfo) > 3: hosts.append(a_nfo[3]) except socket.gaierror: log.warning('Cannot resolve address %s info via socket: %s', hosts.first() or 'localhost (N/A)', socket.gaierror) # Universal method for everywhere (Linux, Slowlaris, Windows etc) for f_name in ('/etc/hostname', '/etc/nodename', '/etc/hosts', r'{win}\system32\drivers\etc\hosts'.format(win=os.getenv('WINDIR'))): try: with salt.utils.files.fopen(f_name) as f_hdl: for line in f_hdl: line = salt.utils.stringutils.to_unicode(line) hst = line.strip().split('#')[0].strip().split() if hst: if hst[0][:4] in ('127.', '::1') or len(hst) == 1: hosts.extend(hst) except IOError: pass # include public and private ipaddresses return hosts.extend([addr for addr in ip_addrs() if not ipaddress.ip_address(addr).is_loopback]) def generate_minion_id(): ''' Return only first element of the hostname from all possible list. :return: ''' try: ret = salt.utils.stringutils.to_unicode(_generate_minion_id().first()) except TypeError: ret = None return ret or 'localhost' def get_socket(addr, type=socket.SOCK_STREAM, proto=0): ''' Return a socket object for the addr IP-version agnostic ''' version = ipaddress.ip_address(addr).version if version == 4: family = socket.AF_INET elif version == 6: family = socket.AF_INET6 return socket.socket(family, type, proto) def get_fqhostname(): ''' Returns the fully qualified hostname ''' # try getaddrinfo() fqdn = None try: addrinfo = socket.getaddrinfo( socket.gethostname(), 0, socket.AF_UNSPEC, socket.SOCK_STREAM, socket.SOL_TCP, socket.AI_CANONNAME ) for info in addrinfo: # info struct [family, socktype, proto, canonname, sockaddr] # On Windows `canonname` can be an empty string # This can cause the function to return `None` if len(info) > 3 and info[3]: fqdn = info[3] break except socket.gaierror: pass # NOTE: this used to log.error() but it was later disabled except socket.error as err: log.debug('socket.getaddrinfo() failure while finding fqdn: %s', err) if fqdn is None: fqdn = socket.getfqdn() return fqdn def ip_to_host(ip): ''' Returns the hostname of a given IP ''' try: hostname, aliaslist, ipaddrlist = socket.gethostbyaddr(ip) except Exception as exc: log.debug('salt.utils.network.ip_to_host(%r) failed: %s', ip, exc) hostname = None return hostname # pylint: enable=C0103 def is_reachable_host(entity_name): ''' Returns a bool telling if the entity name is a reachable host (IPv4/IPv6/FQDN/etc). :param hostname: :return: ''' try: assert type(socket.getaddrinfo(entity_name, 0, 0, 0, 0)) == list ret = True except socket.gaierror: ret = False return ret def is_ip(ip): ''' Returns a bool telling if the passed IP is a valid IPv4 or IPv6 address. ''' return is_ipv4(ip) or is_ipv6(ip) def is_ipv4(ip): ''' Returns a bool telling if the value passed to it was a valid IPv4 address ''' try: return ipaddress.ip_address(ip).version == 4 except ValueError: return False def is_ipv6(ip): ''' Returns a bool telling if the value passed to it was a valid IPv6 address ''' try: return ipaddress.ip_address(ip).version == 6 except ValueError: return False def is_subnet(cidr): ''' Returns a bool telling if the passed string is an IPv4 or IPv6 subnet ''' return is_ipv4_subnet(cidr) or is_ipv6_subnet(cidr) def is_ipv4_subnet(cidr): ''' Returns a bool telling if the passed string is an IPv4 subnet ''' try: return '/' in cidr and bool(ipaddress.IPv4Network(cidr)) except Exception: return False def is_ipv6_subnet(cidr): ''' Returns a bool telling if the passed string is an IPv6 subnet ''' try: return '/' in cidr and bool(ipaddress.IPv6Network(cidr)) except Exception: return False @jinja_filter('is_ip') def _ip_options_global(ip_obj, version): return not ip_obj.is_private def _ip_options_multicast(ip_obj, version): return ip_obj.is_multicast def _ip_options_loopback(ip_obj, version): return ip_obj.is_loopback def _ip_options_link_local(ip_obj, version): return ip_obj.is_link_local def _ip_options_private(ip_obj, version): return ip_obj.is_private def _ip_options_reserved(ip_obj, version): return ip_obj.is_reserved def _ip_options_site_local(ip_obj, version): if version == 6: return ip_obj.is_site_local return False def _ip_options_unspecified(ip_obj, version): return ip_obj.is_unspecified def _ip_options(ip_obj, version, options=None): # will process and IP options options_fun_map = { 'global': _ip_options_global, 'link-local': _ip_options_link_local, 'linklocal': _ip_options_link_local, 'll': _ip_options_link_local, 'link_local': _ip_options_link_local, 'loopback': _ip_options_loopback, 'lo': _ip_options_loopback, 'multicast': _ip_options_multicast, 'private': _ip_options_private, 'public': _ip_options_global, 'reserved': _ip_options_reserved, 'site-local': _ip_options_site_local, 'sl': _ip_options_site_local, 'site_local': _ip_options_site_local, 'unspecified': _ip_options_unspecified } if not options: return six.text_type(ip_obj) # IP version already checked options_list = [option.strip() for option in options.split(',')] for option, fun in options_fun_map.items(): if option in options_list: fun_res = fun(ip_obj, version) if not fun_res: return None # stop at first failed test # else continue return six.text_type(ip_obj) def _is_ipv(ip, version, options=None): if not version: version = 4 if version not in (4, 6): return None try: ip_obj = ipaddress.ip_address(ip) except ValueError: # maybe it is an IP network try: ip_obj = ipaddress.ip_interface(ip) except ValueError: # nope, still not :( return None if not ip_obj.version == version: return None # has the right version, let's move on return _ip_options(ip_obj, version, options=options) @jinja_filter('is_ipv4') def is_ipv4_filter(ip, options=None): ''' Returns a bool telling if the value passed to it was a valid IPv4 address. ip The IP address. net: False Consider IP addresses followed by netmask. options CSV of options regarding the nature of the IP address. E.g.: loopback, multicast, private etc. ''' _is_ipv4 = _is_ipv(ip, 4, options=options) return isinstance(_is_ipv4, six.string_types) @jinja_filter('is_ipv6') def is_ipv6_filter(ip, options=None): ''' Returns a bool telling if the value passed to it was a valid IPv6 address. ip The IP address. net: False Consider IP addresses followed by netmask. options CSV of options regarding the nature of the IP address. E.g.: loopback, multicast, private etc. ''' _is_ipv6 = _is_ipv(ip, 6, options=options) return isinstance(_is_ipv6, six.string_types) def _ipv_filter(value, version, options=None): if version not in (4, 6): return if isinstance(value, (six.string_types, six.text_type, six.binary_type)): return _is_ipv(value, version, options=options) # calls is_ipv4 or is_ipv6 for `value` elif isinstance(value, (list, tuple, types.GeneratorType)): # calls is_ipv4 or is_ipv6 for each element in the list # os it filters and returns only those elements having the desired IP version return [ _is_ipv(addr, version, options=options) for addr in value if _is_ipv(addr, version, options=options) is not None ] return None @jinja_filter('ipv4') def ipv4(value, options=None): ''' Filters a list and returns IPv4 values only. ''' return _ipv_filter(value, 4, options=options) @jinja_filter('ipv6') def ipv6(value, options=None): ''' Filters a list and returns IPv6 values only. ''' return _ipv_filter(value, 6, options=options) @jinja_filter('ipaddr') def ipaddr(value, options=None): ''' Filters and returns only valid IP objects. ''' ipv4_obj = ipv4(value, options=options) ipv6_obj = ipv6(value, options=options) if ipv4_obj is None or ipv6_obj is None: # an IP address can be either IPv4 either IPv6 # therefofe if the value passed as arg is not a list, at least one of the calls above will return None # if one of them is none, means that we should return only one of them return ipv4_obj or ipv6_obj # one of them else: return ipv4_obj + ipv6_obj # extend lists def _filter_ipaddr(value, options, version=None): ipaddr_filter_out = None if version: if version == 4: ipaddr_filter_out = ipv4(value, options) elif version == 6: ipaddr_filter_out = ipv6(value, options) else: ipaddr_filter_out = ipaddr(value, options) if not ipaddr_filter_out: return if not isinstance(ipaddr_filter_out, (list, tuple, types.GeneratorType)): ipaddr_filter_out = [ipaddr_filter_out] return ipaddr_filter_out @jinja_filter('ip_host') def ip_host(value, options=None, version=None): ''' Returns the interfaces IP address, e.g.: 192.168.0.1/28. ''' ipaddr_filter_out = _filter_ipaddr(value, options=options, version=version) if not ipaddr_filter_out: return if not isinstance(value, (list, tuple, types.GeneratorType)): return six.text_type(ipaddress.ip_interface(ipaddr_filter_out[0])) return [six.text_type(ipaddress.ip_interface(ip_a)) for ip_a in ipaddr_filter_out] def _network_hosts(ip_addr_entry): return [ six.text_type(host) for host in ipaddress.ip_network(ip_addr_entry, strict=False).hosts() ] @jinja_filter('network_hosts') def network_hosts(value, options=None, version=None): ''' Return the list of hosts within a network. .. note:: When running this command with a large IPv6 network, the command will take a long time to gather all of the hosts. ''' ipaddr_filter_out = _filter_ipaddr(value, options=options, version=version) if not ipaddr_filter_out: return if not isinstance(value, (list, tuple, types.GeneratorType)): return _network_hosts(ipaddr_filter_out[0]) return [ _network_hosts(ip_a) for ip_a in ipaddr_filter_out ] def _network_size(ip_addr_entry): return ipaddress.ip_network(ip_addr_entry, strict=False).num_addresses @jinja_filter('network_size') def network_size(value, options=None, version=None): ''' Get the size of a network. ''' ipaddr_filter_out = _filter_ipaddr(value, options=options, version=version) if not ipaddr_filter_out: return if not isinstance(value, (list, tuple, types.GeneratorType)): return _network_size(ipaddr_filter_out[0]) return [ _network_size(ip_a) for ip_a in ipaddr_filter_out ] def natural_ipv4_netmask(ip, fmt='prefixlen'): ''' Returns the "natural" mask of an IPv4 address ''' bits = _ipv4_to_bits(ip) if bits.startswith('11'): mask = '24' elif bits.startswith('1'): mask = '16' else: mask = '8' if fmt == 'netmask': return cidr_to_ipv4_netmask(mask) else: return '/' + mask def rpad_ipv4_network(ip): ''' Returns an IP network address padded with zeros. Ex: '192.168.3' -> '192.168.3.0' '10.209' -> '10.209.0.0' ''' return '.'.join(itertools.islice(itertools.chain(ip.split('.'), '0000'), 0, 4)) def cidr_to_ipv4_netmask(cidr_bits): ''' Returns an IPv4 netmask ''' try: cidr_bits = int(cidr_bits) if not 1 <= cidr_bits <= 32: return '' except ValueError: return '' netmask = '' for idx in range(4): if idx: netmask += '.' if cidr_bits >= 8: netmask += '255' cidr_bits -= 8 else: netmask += '{0:d}'.format(256 - (2 ** (8 - cidr_bits))) cidr_bits = 0 return netmask def _number_of_set_bits_to_ipv4_netmask(set_bits): # pylint: disable=C0103 ''' Returns an IPv4 netmask from the integer representation of that mask. Ex. 0xffffff00 -> '255.255.255.0' ''' return cidr_to_ipv4_netmask(_number_of_set_bits(set_bits)) # pylint: disable=C0103 def _number_of_set_bits(x): ''' Returns the number of bits that are set in a 32bit int ''' # Taken from http://stackoverflow.com/a/4912729. Many thanks! x -= (x >> 1) & 0x55555555 x = ((x >> 2) & 0x33333333) + (x & 0x33333333) x = ((x >> 4) + x) & 0x0f0f0f0f x += x >> 8 x += x >> 16 return x & 0x0000003f # pylint: enable=C0103 def _interfaces_ip(out): ''' Uses ip to return a dictionary of interfaces with various information about each (up/down state, ip address, netmask, and hwaddr) ''' ret = dict() def parse_network(value, cols): ''' Return a tuple of ip, netmask, broadcast based on the current set of cols ''' brd = None scope = None if '/' in value: # we have a CIDR in this address ip, cidr = value.split('/') # pylint: disable=C0103 else: ip = value # pylint: disable=C0103 cidr = 32 if type_ == 'inet': mask = cidr_to_ipv4_netmask(int(cidr)) if 'brd' in cols: brd = cols[cols.index('brd') + 1] elif type_ == 'inet6': mask = cidr if 'scope' in cols: scope = cols[cols.index('scope') + 1] return (ip, mask, brd, scope) groups = re.compile('\r?\n\\d').split(out) for group in groups: iface = None data = dict() for line in group.splitlines(): if ' ' not in line: continue match = re.match(r'^\d*:\s+([\w.\-]+)(?:@)?([\w.\-]+)?:\s+<(.+)>', line) if match: iface, parent, attrs = match.groups() if 'UP' in attrs.split(','): data['up'] = True else: data['up'] = False if parent: data['parent'] = parent continue cols = line.split() if len(cols) >= 2: type_, value = tuple(cols[0:2]) iflabel = cols[-1:][0] if type_ in ('inet', 'inet6'): if 'secondary' not in cols: ipaddr, netmask, broadcast, scope = parse_network(value, cols) if type_ == 'inet': if 'inet' not in data: data['inet'] = list() addr_obj = dict() addr_obj['address'] = ipaddr addr_obj['netmask'] = netmask addr_obj['broadcast'] = broadcast addr_obj['label'] = iflabel data['inet'].append(addr_obj) elif type_ == 'inet6': if 'inet6' not in data: data['inet6'] = list() addr_obj = dict() addr_obj['address'] = ipaddr addr_obj['prefixlen'] = netmask addr_obj['scope'] = scope data['inet6'].append(addr_obj) else: if 'secondary' not in data: data['secondary'] = list() ip_, mask, brd, scp = parse_network(value, cols) data['secondary'].append({ 'type': type_, 'address': ip_, 'netmask': mask, 'broadcast': brd, 'label': iflabel, }) del ip_, mask, brd, scp elif type_.startswith('link'): data['hwaddr'] = value if iface: ret[iface] = data del iface, data return ret def _interfaces_ifconfig(out): ''' Uses ifconfig to return a dictionary of interfaces with various information about each (up/down state, ip address, netmask, and hwaddr) ''' ret = dict() piface = re.compile(r'^([^\s:]+)') pmac = re.compile('.*?(?:HWaddr|ether|address:|lladdr) ([0-9a-fA-F:]+)') if salt.utils.platform.is_sunos(): pip = re.compile(r'.*?(?:inet\s+)([0-9]+\.[0-9]+\.[0-9]+\.[0-9]+)(.*)') pip6 = re.compile('.*?(?:inet6 )([0-9a-fA-F:]+)') pmask6 = re.compile(r'.*?(?:inet6 [0-9a-fA-F:]+/(\d+)).*') else: pip = re.compile(r'.*?(?:inet addr:|inet [^\d]*)(.*?)\s') pip6 = re.compile('.*?(?:inet6 addr: (.*?)/|inet6 )([0-9a-fA-F:]+)') pmask6 = re.compile(r'.*?(?:inet6 addr: [0-9a-fA-F:]+/(\d+)|prefixlen (\d+))(?: Scope:([a-zA-Z]+)| scopeid (0x[0-9a-fA-F]))?') pmask = re.compile(r'.*?(?:Mask:|netmask )(?:((?:0x)?[0-9a-fA-F]{8})|([\d\.]+))') pupdown = re.compile('UP') pbcast = re.compile(r'.*?(?:Bcast:|broadcast )([\d\.]+)') groups = re.compile('\r?\n(?=\\S)').split(out) for group in groups: data = dict() iface = '' updown = False for line in group.splitlines(): miface = piface.match(line) mmac = pmac.match(line) mip = pip.match(line) mip6 = pip6.match(line) mupdown = pupdown.search(line) if miface: iface = miface.group(1) if mmac: data['hwaddr'] = mmac.group(1) if salt.utils.platform.is_sunos(): expand_mac = [] for chunk in data['hwaddr'].split(':'): expand_mac.append('0{0}'.format(chunk) if len(chunk) < 2 else '{0}'.format(chunk)) data['hwaddr'] = ':'.join(expand_mac) if mip: if 'inet' not in data: data['inet'] = list() addr_obj = dict() addr_obj['address'] = mip.group(1) mmask = pmask.match(line) if mmask: if mmask.group(1): mmask = _number_of_set_bits_to_ipv4_netmask( int(mmask.group(1), 16)) else: mmask = mmask.group(2) addr_obj['netmask'] = mmask mbcast = pbcast.match(line) if mbcast: addr_obj['broadcast'] = mbcast.group(1) data['inet'].append(addr_obj) if mupdown: updown = True if mip6: if 'inet6' not in data: data['inet6'] = list() addr_obj = dict() addr_obj['address'] = mip6.group(1) or mip6.group(2) mmask6 = pmask6.match(line) if mmask6: addr_obj['prefixlen'] = mmask6.group(1) or mmask6.group(2) if not salt.utils.platform.is_sunos(): ipv6scope = mmask6.group(3) or mmask6.group(4) addr_obj['scope'] = ipv6scope.lower() if ipv6scope is not None else ipv6scope # SunOS sometimes has ::/0 as inet6 addr when using addrconf if not salt.utils.platform.is_sunos() \ or addr_obj['address'] != '::' \ and addr_obj['prefixlen'] != 0: data['inet6'].append(addr_obj) data['up'] = updown if iface in ret: # SunOS optimization, where interfaces occur twice in 'ifconfig -a' # output with the same name: for ipv4 and then for ipv6 addr family. # Every instance has it's own 'UP' status and we assume that ipv4 # status determines global interface status. # # merge items with higher priority for older values # after that merge the inet and inet6 sub items for both ret[iface] = dict(list(data.items()) + list(ret[iface].items())) if 'inet' in data: ret[iface]['inet'].extend(x for x in data['inet'] if x not in ret[iface]['inet']) if 'inet6' in data: ret[iface]['inet6'].extend(x for x in data['inet6'] if x not in ret[iface]['inet6']) else: ret[iface] = data del data return ret def linux_interfaces(): ''' Obtain interface information for *NIX/BSD variants ''' ifaces = dict() ip_path = salt.utils.path.which('ip') ifconfig_path = None if ip_path else salt.utils.path.which('ifconfig') if ip_path: cmd1 = subprocess.Popen( '{0} link show'.format(ip_path), shell=True, close_fds=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate()[0] cmd2 = subprocess.Popen( '{0} addr show'.format(ip_path), shell=True, close_fds=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate()[0] ifaces = _interfaces_ip("{0}\n{1}".format( salt.utils.stringutils.to_str(cmd1), salt.utils.stringutils.to_str(cmd2))) elif ifconfig_path: cmd = subprocess.Popen( '{0} -a'.format(ifconfig_path), shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate()[0] ifaces = _interfaces_ifconfig(salt.utils.stringutils.to_str(cmd)) return ifaces def _netbsd_interfaces_ifconfig(out): ''' Uses ifconfig to return a dictionary of interfaces with various information about each (up/down state, ip address, netmask, and hwaddr) ''' ret = dict() piface = re.compile(r'^([^\s:]+)') pmac = re.compile('.*?address: ([0-9a-f:]+)') pip = re.compile(r'.*?inet [^\d]*(.*?)/([\d]*)\s') pip6 = re.compile(r'.*?inet6 ([0-9a-f:]+)%([a-zA-Z0-9]*)/([\d]*)\s') pupdown = re.compile('UP') pbcast = re.compile(r'.*?broadcast ([\d\.]+)') groups = re.compile('\r?\n(?=\\S)').split(out) for group in groups: data = dict() iface = '' updown = False for line in group.splitlines(): miface = piface.match(line) mmac = pmac.match(line) mip = pip.match(line) mip6 = pip6.match(line) mupdown = pupdown.search(line) if miface: iface = miface.group(1) if mmac: data['hwaddr'] = mmac.group(1) if mip: if 'inet' not in data: data['inet'] = list() addr_obj = dict() addr_obj['address'] = mip.group(1) mmask = mip.group(2) if mip.group(2): addr_obj['netmask'] = cidr_to_ipv4_netmask(mip.group(2)) mbcast = pbcast.match(line) if mbcast: addr_obj['broadcast'] = mbcast.group(1) data['inet'].append(addr_obj) if mupdown: updown = True if mip6: if 'inet6' not in data: data['inet6'] = list() addr_obj = dict() addr_obj['address'] = mip6.group(1) mmask6 = mip6.group(3) addr_obj['scope'] = mip6.group(2) addr_obj['prefixlen'] = mip6.group(3) data['inet6'].append(addr_obj) data['up'] = updown ret[iface] = data del data return ret def netbsd_interfaces(): ''' Obtain interface information for NetBSD >= 8 where the ifconfig output diverged from other BSD variants (Netmask is now part of the address) ''' # NetBSD versions prior to 8.0 can still use linux_interfaces() if LooseVersion(os.uname()[2]) < LooseVersion('8.0'): return linux_interfaces() ifconfig_path = salt.utils.path.which('ifconfig') cmd = subprocess.Popen( '{0} -a'.format(ifconfig_path), shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate()[0] return _netbsd_interfaces_ifconfig(salt.utils.stringutils.to_str(cmd)) def _interfaces_ipconfig(out): ''' Returns a dictionary of interfaces with various information about each (up/down state, ip address, netmask, and hwaddr) NOTE: This is not used by any function and may be able to be removed in the future. ''' ifaces = dict() iface = None adapter_iface_regex = re.compile(r'adapter (\S.+):$') for line in out.splitlines(): if not line: continue # TODO what does Windows call Infiniband and 10/40gige adapters if line.startswith('Ethernet'): iface = ifaces[adapter_iface_regex.search(line).group(1)] iface['up'] = True addr = None continue if iface: key, val = line.split(',', 1) key = key.strip(' .') val = val.strip() if addr and key == 'Subnet Mask': addr['netmask'] = val elif key in ('IP Address', 'IPv4 Address'): if 'inet' not in iface: iface['inet'] = list() addr = {'address': val.rstrip('(Preferred)'), 'netmask': None, 'broadcast': None} # TODO find the broadcast iface['inet'].append(addr) elif 'IPv6 Address' in key: if 'inet6' not in iface: iface['inet'] = list() # XXX What is the prefixlen!? addr = {'address': val.rstrip('(Preferred)'), 'prefixlen': None} iface['inet6'].append(addr) elif key == 'Physical Address': iface['hwaddr'] = val elif key == 'Media State': # XXX seen used for tunnel adaptors # might be useful iface['up'] = (val != 'Media disconnected') def win_interfaces(): ''' Obtain interface information for Windows systems ''' with salt.utils.winapi.Com(): c = wmi.WMI() ifaces = {} for iface in c.Win32_NetworkAdapterConfiguration(IPEnabled=1): ifaces[iface.Description] = dict() if iface.MACAddress: ifaces[iface.Description]['hwaddr'] = iface.MACAddress if iface.IPEnabled: ifaces[iface.Description]['up'] = True for ip in iface.IPAddress: if '.' in ip: if 'inet' not in ifaces[iface.Description]: ifaces[iface.Description]['inet'] = [] item = {'address': ip, 'label': iface.Description} if iface.DefaultIPGateway: broadcast = next((i for i in iface.DefaultIPGateway if '.' in i), '') if broadcast: item['broadcast'] = broadcast if iface.IPSubnet: netmask = next((i for i in iface.IPSubnet if '.' in i), '') if netmask: item['netmask'] = netmask ifaces[iface.Description]['inet'].append(item) if ':' in ip: if 'inet6' not in ifaces[iface.Description]: ifaces[iface.Description]['inet6'] = [] item = {'address': ip} if iface.DefaultIPGateway: broadcast = next((i for i in iface.DefaultIPGateway if ':' in i), '') if broadcast: item['broadcast'] = broadcast if iface.IPSubnet: netmask = next((i for i in iface.IPSubnet if ':' in i), '') if netmask: item['netmask'] = netmask ifaces[iface.Description]['inet6'].append(item) else: ifaces[iface.Description]['up'] = False return ifaces def interfaces(): ''' Return a dictionary of information about all the interfaces on the minion ''' if salt.utils.platform.is_windows(): return win_interfaces() elif salt.utils.platform.is_netbsd(): return netbsd_interfaces() else: return linux_interfaces() def get_net_start(ipaddr, netmask): ''' Return the address of the network ''' net = ipaddress.ip_network('{0}/{1}'.format(ipaddr, netmask), strict=False) return six.text_type(net.network_address) def get_net_size(mask): ''' Turns an IPv4 netmask into it's corresponding prefix length (255.255.255.0 -> 24 as in 192.168.1.10/24). ''' binary_str = '' for octet in mask.split('.'): binary_str += bin(int(octet))[2:].zfill(8) return len(binary_str.rstrip('0')) def calc_net(ipaddr, netmask=None): ''' Takes IP (CIDR notation supported) and optionally netmask and returns the network in CIDR-notation. (The IP can be any IP inside the subnet) ''' if netmask is not None: ipaddr = '{0}/{1}'.format(ipaddr, netmask) return six.text_type(ipaddress.ip_network(ipaddr, strict=False)) def _ipv4_to_bits(ipaddr): ''' Accepts an IPv4 dotted quad and returns a string representing its binary counterpart ''' return ''.join([bin(int(x))[2:].rjust(8, '0') for x in ipaddr.split('.')]) def _get_iface_info(iface): ''' If `iface` is available, return interface info and no error, otherwise return no info and log and return an error ''' iface_info = interfaces() if iface in iface_info.keys(): return iface_info, False else: error_msg = ('Interface "{0}" not in available interfaces: "{1}"' ''.format(iface, '", "'.join(iface_info.keys()))) log.error(error_msg) return None, error_msg def _hw_addr_aix(iface): ''' Return the hardware address (a.k.a. MAC address) for a given interface on AIX MAC address not available in through interfaces ''' cmd = subprocess.Popen( 'entstat -d {0} | grep \'Hardware Address\''.format(iface), shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate()[0] if cmd: comps = cmd.split(' ') if len(comps) == 3: mac_addr = comps[2].strip('\'').strip() return mac_addr error_msg = ('Interface "{0}" either not available or does not contain a hardware address'.format(iface)) log.error(error_msg) return error_msg def hw_addr(iface): ''' Return the hardware address (a.k.a. MAC address) for a given interface .. versionchanged:: 2016.11.4 Added support for AIX ''' if salt.utils.platform.is_aix(): return _hw_addr_aix iface_info, error = _get_iface_info(iface) if error is False: return iface_info.get(iface, {}).get('hwaddr', '') else: return error def interface(iface): ''' Return the details of `iface` or an error if it does not exist ''' iface_info, error = _get_iface_info(iface) if error is False: return iface_info.get(iface, {}).get('inet', '') else: return error def interface_ip(iface): ''' Return `iface` IPv4 addr or an error if `iface` does not exist ''' iface_info, error = _get_iface_info(iface) if error is False: inet = iface_info.get(iface, {}).get('inet', None) return inet[0].get('address', '') if inet else '' else: return error def _subnets(proto='inet', interfaces_=None): ''' Returns a list of subnets to which the host belongs ''' if interfaces_ is None: ifaces = interfaces() elif isinstance(interfaces_, list): ifaces = {} for key, value in six.iteritems(interfaces()): if key in interfaces_: ifaces[key] = value else: ifaces = {interfaces_: interfaces().get(interfaces_, {})} ret = set() if proto == 'inet': subnet = 'netmask' dflt_cidr = 32 elif proto == 'inet6': subnet = 'prefixlen' dflt_cidr = 128 else: log.error('Invalid proto %s calling subnets()', proto) return for ip_info in six.itervalues(ifaces): addrs = ip_info.get(proto, []) addrs.extend([addr for addr in ip_info.get('secondary', []) if addr.get('type') == proto]) for intf in addrs: if subnet in intf: intf = ipaddress.ip_interface('{0}/{1}'.format(intf['address'], intf[subnet])) else: intf = ipaddress.ip_interface('{0}/{1}'.format(intf['address'], dflt_cidr)) if not intf.is_loopback: ret.add(intf.network) return [six.text_type(net) for net in sorted(ret)] def subnets(interfaces=None): ''' Returns a list of IPv4 subnets to which the host belongs ''' return _subnets('inet', interfaces_=interfaces) def subnets6(): ''' Returns a list of IPv6 subnets to which the host belongs ''' return _subnets('inet6') def in_subnet(cidr, addr=None): ''' Returns True if host or (any of) addrs is within specified subnet, otherwise False ''' try: cidr = ipaddress.ip_network(cidr) except ValueError: log.error('Invalid CIDR \'%s\'', cidr) return False if addr is None: addr = ip_addrs() addr.extend(ip_addrs6()) elif not isinstance(addr, (list, tuple)): addr = (addr,) return any(ipaddress.ip_address(item) in cidr for item in addr) def _ip_addrs(interface=None, include_loopback=False, interface_data=None, proto='inet'): ''' Return the full list of IP adresses matching the criteria proto = inet|inet6 ''' ret = set() ifaces = interface_data \ if isinstance(interface_data, dict) \ else interfaces() if interface is None: target_ifaces = ifaces else: target_ifaces = dict([(k, v) for k, v in six.iteritems(ifaces) if k == interface]) if not target_ifaces: log.error('Interface %s not found.', interface) for ip_info in six.itervalues(target_ifaces): addrs = ip_info.get(proto, []) addrs.extend([addr for addr in ip_info.get('secondary', []) if addr.get('type') == proto]) for addr in addrs: addr = ipaddress.ip_address(addr.get('address')) if not addr.is_loopback or include_loopback: ret.add(addr) return [six.text_type(addr) for addr in sorted(ret)] def ip_addrs(interface=None, include_loopback=False, interface_data=None): ''' Returns a list of IPv4 addresses assigned to the host. 127.0.0.1 is ignored, unless 'include_loopback=True' is indicated. If 'interface' is provided, then only IP addresses from that interface will be returned. ''' return _ip_addrs(interface, include_loopback, interface_data, 'inet') def ip_addrs6(interface=None, include_loopback=False, interface_data=None): ''' Returns a list of IPv6 addresses assigned to the host. ::1 is ignored, unless 'include_loopback=True' is indicated. If 'interface' is provided, then only IP addresses from that interface will be returned. ''' return _ip_addrs(interface, include_loopback, interface_data, 'inet6') def hex2ip(hex_ip, invert=False): ''' Convert a hex string to an ip, if a failure occurs the original hex is returned. If 'invert=True' assume that ip from /proc/net/<proto> ''' if len(hex_ip) == 32: # ipv6 ip = [] for i in range(0, 32, 8): ip_part = hex_ip[i:i + 8] ip_part = [ip_part[x:x + 2] for x in range(0, 8, 2)] if invert: ip.append("{0[3]}{0[2]}:{0[1]}{0[0]}".format(ip_part)) else: ip.append("{0[0]}{0[1]}:{0[2]}{0[3]}".format(ip_part)) try: address = ipaddress.IPv6Address(":".join(ip)) if address.ipv4_mapped: return str(address.ipv4_mapped) else: return address.compressed except ipaddress.AddressValueError as ex: log.error('hex2ip - ipv6 address error: %s', ex) return hex_ip try: hip = int(hex_ip, 16) except ValueError: return hex_ip if invert: return '{3}.{2}.{1}.{0}'.format(hip >> 24 & 255, hip >> 16 & 255, hip >> 8 & 255, hip & 255) return '{0}.{1}.{2}.{3}'.format(hip >> 24 & 255, hip >> 16 & 255, hip >> 8 & 255, hip & 255) def mac2eui64(mac, prefix=None): ''' Convert a MAC address to a EUI64 identifier or, with prefix provided, a full IPv6 address ''' # http://tools.ietf.org/html/rfc4291#section-2.5.1 eui64 = re.sub(r'[.:-]', '', mac).lower() eui64 = eui64[0:6] + 'fffe' + eui64[6:] eui64 = hex(int(eui64[0:2], 16) | 2)[2:].zfill(2) + eui64[2:] if prefix is None: return ':'.join(re.findall(r'.{4}', eui64)) else: try: net = ipaddress.ip_network(prefix, strict=False) euil = int('0x{0}'.format(eui64), 16) return '{0}/{1}'.format(net[euil], net.prefixlen) except Exception: return def active_tcp(): ''' Return a dict describing all active tcp connections as quickly as possible ''' ret = {} for statf in ['/proc/net/tcp', '/proc/net/tcp6']: if os.path.isfile(statf): with salt.utils.files.fopen(statf, 'rb') as fp_: for line in fp_: line = salt.utils.stringutils.to_unicode(line) if line.strip().startswith('sl'): continue iret = _parse_tcp_line(line) sl = next(iter(iret)) if iret[sl]['state'] == 1: # 1 is ESTABLISHED del iret[sl]['state'] ret[len(ret)] = iret[sl] return ret def local_port_tcp(port): ''' Return a set of remote ip addrs attached to the specified local port ''' ret = _remotes_on(port, 'local_port') return ret def remote_port_tcp(port): ''' Return a set of ip addrs the current host is connected to on given port ''' ret = _remotes_on(port, 'remote_port') return ret def _remotes_on(port, which_end): ''' Return a set of ip addrs active tcp connections ''' port = int(port) ret = _netlink_tool_remote_on(port, which_end) if ret is not None: return ret ret = set() proc_available = False for statf in ['/proc/net/tcp', '/proc/net/tcp6']: if os.path.isfile(statf): proc_available = True with salt.utils.files.fopen(statf, 'r') as fp_: for line in fp_: line = salt.utils.stringutils.to_unicode(line) if line.strip().startswith('sl'): continue iret = _parse_tcp_line(line) sl = next(iter(iret)) if iret[sl][which_end] == port and iret[sl]['state'] == 1: # 1 is ESTABLISHED ret.add(iret[sl]['remote_addr']) if not proc_available: # Fallback to use OS specific tools if salt.utils.platform.is_sunos(): return _sunos_remotes_on(port, which_end) if salt.utils.platform.is_freebsd(): return _freebsd_remotes_on(port, which_end) if salt.utils.platform.is_netbsd(): return _netbsd_remotes_on(port, which_end) if salt.utils.platform.is_openbsd(): return _openbsd_remotes_on(port, which_end) if salt.utils.platform.is_windows(): return _windows_remotes_on(port, which_end) if salt.utils.platform.is_aix(): return _aix_remotes_on(port, which_end) return _linux_remotes_on(port, which_end) return ret def _parse_tcp_line(line): ''' Parse a single line from the contents of /proc/net/tcp or /proc/net/tcp6 ''' ret = {} comps = line.strip().split() sl = comps[0].rstrip(':') ret[sl] = {} l_addr, l_port = comps[1].split(':') r_addr, r_port = comps[2].split(':') ret[sl]['local_addr'] = hex2ip(l_addr, True) ret[sl]['local_port'] = int(l_port, 16) ret[sl]['remote_addr'] = hex2ip(r_addr, True) ret[sl]['remote_port'] = int(r_port, 16) ret[sl]['state'] = int(comps[3], 16) return ret def _netlink_tool_remote_on(port, which_end): ''' Returns set of ipv4 host addresses of remote established connections on local or remote tcp port. Parses output of shell 'ss' to get connections [root@salt-master ~]# ss -ant State Recv-Q Send-Q Local Address:Port Peer Address:Port LISTEN 0 511 *:80 *:* LISTEN 0 128 *:22 *:* ESTAB 0 0 127.0.0.1:56726 127.0.0.1:4505 ''' remotes = set() valid = False try: data = subprocess.check_output(['ss', '-ant']) # pylint: disable=minimum-python-version except subprocess.CalledProcessError: log.error('Failed ss') raise except OSError: # not command "No such file or directory" return None lines = salt.utils.stringutils.to_str(data).split('\n') for line in lines: if 'Address:Port' in line: # ss tools may not be valid valid = True continue elif 'ESTAB' not in line: continue chunks = line.split() local_host, local_port = chunks[3].split(':', 1) remote_host, remote_port = chunks[4].split(':', 1) if which_end == 'remote_port' and int(remote_port) != port: continue if which_end == 'local_port' and int(local_port) != port: continue remotes.add(remote_host) if valid is False: remotes = None return remotes def _sunos_remotes_on(port, which_end): ''' SunOS specific helper function. Returns set of ipv4 host addresses of remote established connections on local or remote tcp port. Parses output of shell 'netstat' to get connections [root@salt-master ~]# netstat -f inet -n TCP: IPv4 Local Address Remote Address Swind Send-Q Rwind Recv-Q State -------------------- -------------------- ----- ------ ----- ------ ----------- 10.0.0.101.4505 10.0.0.1.45329 1064800 0 1055864 0 ESTABLISHED 10.0.0.101.4505 10.0.0.100.50798 1064800 0 1055864 0 ESTABLISHED ''' remotes = set() try: data = subprocess.check_output(['netstat', '-f', 'inet', '-n']) # pylint: disable=minimum-python-version except subprocess.CalledProcessError: log.error('Failed netstat') raise lines = salt.utils.stringutils.to_str(data).split('\n') for line in lines: if 'ESTABLISHED' not in line: continue chunks = line.split() local_host, local_port = chunks[0].rsplit('.', 1) remote_host, remote_port = chunks[1].rsplit('.', 1) if which_end == 'remote_port' and int(remote_port) != port: continue if which_end == 'local_port' and int(local_port) != port: continue remotes.add(remote_host) return remotes def _freebsd_remotes_on(port, which_end): ''' Returns set of ipv4 host addresses of remote established connections on local tcp port port. Parses output of shell 'sockstat' (FreeBSD) to get connections $ sudo sockstat -4 USER COMMAND PID FD PROTO LOCAL ADDRESS FOREIGN ADDRESS root python2.7 1456 29 tcp4 *:4505 *:* root python2.7 1445 17 tcp4 *:4506 *:* root python2.7 1294 14 tcp4 127.0.0.1:11813 127.0.0.1:4505 root python2.7 1294 41 tcp4 127.0.0.1:61115 127.0.0.1:4506 $ sudo sockstat -4 -c -p 4506 USER COMMAND PID FD PROTO LOCAL ADDRESS FOREIGN ADDRESS root python2.7 1294 41 tcp4 127.0.0.1:61115 127.0.0.1:4506 ''' port = int(port) remotes = set() try: cmd = salt.utils.args.shlex_split('sockstat -4 -c -p {0}'.format(port)) data = subprocess.check_output(cmd) # pylint: disable=minimum-python-version except subprocess.CalledProcessError as ex: log.error('Failed "sockstat" with returncode = %s', ex.returncode) raise lines = salt.utils.stringutils.to_str(data).split('\n') for line in lines: chunks = line.split() if not chunks: continue # ['root', 'python2.7', '1456', '37', 'tcp4', # '127.0.0.1:4505-', '127.0.0.1:55703'] # print chunks if 'COMMAND' in chunks[1]: continue # ignore header if len(chunks) < 2: continue # sockstat -4 -c -p 4506 does this with high PIDs: # USER COMMAND PID FD PROTO LOCAL ADDRESS FOREIGN ADDRESS # salt-master python2.781106 35 tcp4 192.168.12.34:4506 192.168.12.45:60143 local = chunks[-2] remote = chunks[-1] lhost, lport = local.split(':') rhost, rport = remote.split(':') if which_end == 'local' and int(lport) != port: # ignore if local port not port continue if which_end == 'remote' and int(rport) != port: # ignore if remote port not port continue remotes.add(rhost) return remotes def _netbsd_remotes_on(port, which_end): ''' Returns set of ipv4 host addresses of remote established connections on local tcp port port. Parses output of shell 'sockstat' (NetBSD) to get connections $ sudo sockstat -4 -n USER COMMAND PID FD PROTO LOCAL ADDRESS FOREIGN ADDRESS root python2.7 1456 29 tcp *.4505 *.* root python2.7 1445 17 tcp *.4506 *.* root python2.7 1294 14 tcp 127.0.0.1.11813 127.0.0.1.4505 root python2.7 1294 41 tcp 127.0.0.1.61115 127.0.0.1.4506 $ sudo sockstat -4 -c -n -p 4506 USER COMMAND PID FD PROTO LOCAL ADDRESS FOREIGN ADDRESS root python2.7 1294 41 tcp 127.0.0.1.61115 127.0.0.1.4506 ''' port = int(port) remotes = set() try: cmd = salt.utils.args.shlex_split('sockstat -4 -c -n -p {0}'.format(port)) data = subprocess.check_output(cmd) # pylint: disable=minimum-python-version except subprocess.CalledProcessError as ex: log.error('Failed "sockstat" with returncode = %s', ex.returncode) raise lines = salt.utils.stringutils.to_str(data).split('\n') for line in lines: chunks = line.split() if not chunks: continue # ['root', 'python2.7', '1456', '37', 'tcp', # '127.0.0.1.4505-', '127.0.0.1.55703'] # print chunks if 'COMMAND' in chunks[1]: continue # ignore header if len(chunks) < 2: continue local = chunks[5].split('.') lport = local.pop() lhost = '.'.join(local) remote = chunks[6].split('.') rport = remote.pop() rhost = '.'.join(remote) if which_end == 'local' and int(lport) != port: # ignore if local port not port continue if which_end == 'remote' and int(rport) != port: # ignore if remote port not port continue remotes.add(rhost) return remotes def _openbsd_remotes_on(port, which_end): ''' OpenBSD specific helper function. Returns set of ipv4 host addresses of remote established connections on local or remote tcp port. Parses output of shell 'netstat' to get connections $ netstat -nf inet Active Internet connections Proto Recv-Q Send-Q Local Address Foreign Address (state) tcp 0 0 10.0.0.101.4505 10.0.0.1.45329 ESTABLISHED tcp 0 0 10.0.0.101.4505 10.0.0.100.50798 ESTABLISHED ''' remotes = set() try: data = subprocess.check_output(['netstat', '-nf', 'inet']) # pylint: disable=minimum-python-version except subprocess.CalledProcessError: log.error('Failed netstat') raise lines = data.split('\n') for line in lines: if 'ESTABLISHED' not in line: continue chunks = line.split() local_host, local_port = chunks[3].rsplit('.', 1) remote_host, remote_port = chunks[4].rsplit('.', 1) if which_end == 'remote_port' and int(remote_port) != port: continue if which_end == 'local_port' and int(local_port) != port: continue remotes.add(remote_host) return remotes def _windows_remotes_on(port, which_end): r''' Windows specific helper function. Returns set of ipv4 host addresses of remote established connections on local or remote tcp port. Parses output of shell 'netstat' to get connections C:\>netstat -n Active Connections Proto Local Address Foreign Address State TCP 10.2.33.17:3007 130.164.12.233:10123 ESTABLISHED TCP 10.2.33.17:3389 130.164.30.5:10378 ESTABLISHED ''' remotes = set() try: data = subprocess.check_output(['netstat', '-n']) # pylint: disable=minimum-python-version except subprocess.CalledProcessError: log.error('Failed netstat') raise lines = salt.utils.stringutils.to_str(data).split('\n') for line in lines: if 'ESTABLISHED' not in line: continue chunks = line.split() local_host, local_port = chunks[1].rsplit(':', 1) remote_host, remote_port = chunks[2].rsplit(':', 1) if which_end == 'remote_port' and int(remote_port) != port: continue if which_end == 'local_port' and int(local_port) != port: continue remotes.add(remote_host) return remotes def _linux_remotes_on(port, which_end): ''' Linux specific helper function. Returns set of ip host addresses of remote established connections on local tcp port port. Parses output of shell 'lsof' to get connections $ sudo lsof -iTCP:4505 -n COMMAND PID USER FD TYPE DEVICE SIZE/OFF NODE NAME Python 9971 root 35u IPv4 0x18a8464a29ca329d 0t0 TCP *:4505 (LISTEN) Python 9971 root 37u IPv4 0x18a8464a29b2b29d 0t0 TCP 127.0.0.1:4505->127.0.0.1:55703 (ESTABLISHED) Python 10152 root 22u IPv4 0x18a8464a29c8cab5 0t0 TCP 127.0.0.1:55703->127.0.0.1:4505 (ESTABLISHED) Python 10153 root 22u IPv4 0x18a8464a29c8cab5 0t0 TCP [fe80::249a]:4505->[fe80::150]:59367 (ESTABLISHED) ''' remotes = set() try: data = subprocess.check_output( ['lsof', '-iTCP:{0:d}'.format(port), '-n', '-P'] # pylint: disable=minimum-python-version ) except subprocess.CalledProcessError as ex: if ex.returncode == 1: # Lsof return 1 if any error was detected, including the failure # to locate Internet addresses, and it is not an error in this case. log.warning('"lsof" returncode = 1, likely no active TCP sessions.') return remotes log.error('Failed "lsof" with returncode = %s', ex.returncode) raise lines = salt.utils.stringutils.to_str(data).split('\n') for line in lines: chunks = line.split() if not chunks: continue # ['Python', '9971', 'root', '37u', 'IPv4', '0x18a8464a29b2b29d', '0t0', # 'TCP', '127.0.0.1:4505->127.0.0.1:55703', '(ESTABLISHED)'] # print chunks if 'COMMAND' in chunks[0]: continue # ignore header if 'ESTABLISHED' not in chunks[-1]: continue # ignore if not ESTABLISHED # '127.0.0.1:4505->127.0.0.1:55703' local, remote = chunks[8].split('->') _, lport = local.rsplit(':', 1) rhost, rport = remote.rsplit(':', 1) if which_end == 'remote_port' and int(rport) != port: continue if which_end == 'local_port' and int(lport) != port: continue remotes.add(rhost.strip("[]")) return remotes def _aix_remotes_on(port, which_end): ''' AIX specific helper function. Returns set of ipv4 host addresses of remote established connections on local or remote tcp port. Parses output of shell 'netstat' to get connections root@la68pp002_pub:/opt/salt/lib/python2.7/site-packages/salt/modules# netstat -f inet -n Active Internet connections Proto Recv-Q Send-Q Local Address Foreign Address (state) tcp4 0 0 172.29.149.95.50093 209.41.78.13.4505 ESTABLISHED tcp4 0 0 127.0.0.1.9514 *.* LISTEN tcp4 0 0 127.0.0.1.9515 *.* LISTEN tcp4 0 0 127.0.0.1.199 127.0.0.1.32779 ESTABLISHED tcp4 0 0 127.0.0.1.32779 127.0.0.1.199 ESTABLISHED tcp4 0 40 172.29.149.95.22 172.29.96.83.41022 ESTABLISHED tcp4 0 0 172.29.149.95.22 172.29.96.83.41032 ESTABLISHED tcp4 0 0 127.0.0.1.32771 127.0.0.1.32775 ESTABLISHED tcp 0 0 127.0.0.1.32775 127.0.0.1.32771 ESTABLISHED tcp4 0 0 127.0.0.1.32771 127.0.0.1.32776 ESTABLISHED tcp 0 0 127.0.0.1.32776 127.0.0.1.32771 ESTABLISHED tcp4 0 0 127.0.0.1.32771 127.0.0.1.32777 ESTABLISHED tcp 0 0 127.0.0.1.32777 127.0.0.1.32771 ESTABLISHED tcp4 0 0 127.0.0.1.32771 127.0.0.1.32778 ESTABLISHED tcp 0 0 127.0.0.1.32778 127.0.0.1.32771 ESTABLISHED ''' remotes = set() try: data = subprocess.check_output(['netstat', '-f', 'inet', '-n']) # pylint: disable=minimum-python-version except subprocess.CalledProcessError: log.error('Failed netstat') raise lines = salt.utils.stringutils.to_str(data).split('\n') for line in lines: if 'ESTABLISHED' not in line: continue chunks = line.split() local_host, local_port = chunks[3].rsplit('.', 1) remote_host, remote_port = chunks[4].rsplit('.', 1) if which_end == 'remote_port' and int(remote_port) != port: continue if which_end == 'local_port' and int(local_port) != port: continue remotes.add(remote_host) return remotes @jinja_filter('gen_mac') def gen_mac(prefix='AC:DE:48'): ''' Generates a MAC address with the defined OUI prefix. Common prefixes: - ``00:16:3E`` -- Xen - ``00:18:51`` -- OpenVZ - ``00:50:56`` -- VMware (manually generated) - ``52:54:00`` -- QEMU/KVM - ``AC:DE:48`` -- PRIVATE References: - http://standards.ieee.org/develop/regauth/oui/oui.txt - https://www.wireshark.org/tools/oui-lookup.html - https://en.wikipedia.org/wiki/MAC_address ''' return '{0}:{1:02X}:{2:02X}:{3:02X}'.format(prefix, random.randint(0, 0xff), random.randint(0, 0xff), random.randint(0, 0xff)) @jinja_filter('mac_str_to_bytes') def mac_str_to_bytes(mac_str): ''' Convert a MAC address string into bytes. Works with or without separators: b1 = mac_str_to_bytes('08:00:27:13:69:77') b2 = mac_str_to_bytes('080027136977') assert b1 == b2 assert isinstance(b1, bytes) ''' if len(mac_str) == 12: pass elif len(mac_str) == 17: sep = mac_str[2] mac_str = mac_str.replace(sep, '') else: raise ValueError('Invalid MAC address') chars = (int(mac_str[s:s+2], 16) for s in range(0, 12, 2)) return bytes(chars) if six.PY3 else b''.join(chr(x) for x in chars) def refresh_dns(): ''' issue #21397: force glibc to re-read resolv.conf ''' try: res_init() except NameError: # Exception raised loading the library, thus res_init is not defined pass @jinja_filter('connection_check') def connection_check(addr, port=80, safe=False, ipv6=None): ''' Provides a convenient alias for the dns_check filter. ''' return dns_check(addr, port, safe, ipv6) @jinja_filter('dns_check') def dns_check(addr, port=80, safe=False, ipv6=None, attempt_connect=True): ''' Return the ip resolved by dns, but do not exit on failure, only raise an exception. Obeys system preference for IPv4/6 address resolution - this can be overridden by the ipv6 flag. Tries to connect to the address before considering it useful. If no address can be reached, the first one resolved is used as a fallback. ''' error = False lookup = addr seen_ipv6 = False family = socket.AF_INET6 if ipv6 else socket.AF_INET if ipv6 is False else socket.AF_UNSPEC hostnames = [] try: refresh_dns() hostnames = socket.getaddrinfo(addr, port, family, socket.SOCK_STREAM) except TypeError: err = ('Attempt to resolve address \'{0}\' failed. Invalid or unresolveable address').format(lookup) raise SaltSystemExit(code=42, msg=err) except socket.error: error = True # If ipv6 is set to True, attempt another lookup using the IPv4 family, # just in case we're attempting to lookup an IPv4 IP # as an IPv6 hostname. if error and ipv6: try: refresh_dns() hostnames = socket.getaddrinfo(addr, port, socket.AF_INET, socket.SOCK_STREAM) except TypeError: err = ('Attempt to resolve address \'{0}\' failed. Invalid or unresolveable address').format(lookup) raise SaltSystemExit(code=42, msg=err) except socket.error: error = True try: if not hostnames: error = True else: resolved = False candidates = [] for h in hostnames: # Input is IP address, passed through unchanged, just return it if h[4][0] == addr: resolved = salt.utils.zeromq.ip_bracket(addr) break candidate_addr = salt.utils.zeromq.ip_bracket(h[4][0]) # sometimes /etc/hosts contains ::1 localhost if not ipv6 and candidate_addr == '[::1]': continue candidates.append(candidate_addr) if attempt_connect: try: s = socket.socket(h[0], socket.SOCK_STREAM) s.settimeout(2) s.connect((candidate_addr.strip('[]'), h[4][1])) s.close() resolved = candidate_addr break except socket.error: pass if not resolved: if candidates: resolved = candidates[0] else: error = True except TypeError: err = ('Attempt to resolve address \'{0}\' failed. Invalid or unresolveable address').format(lookup) raise SaltSystemExit(code=42, msg=err) except socket.error: error = True if error: err = ('DNS lookup or connection check of \'{0}\' failed.').format(addr) if safe: if salt.log.is_console_configured(): # If logging is not configured it also means that either # the master or minion instance calling this hasn't even # started running log.error(err) raise SaltClientError() raise SaltSystemExit(code=42, msg=err) return resolved def parse_host_port(host_port): """ Takes a string argument specifying host or host:port. Returns a (hostname, port) or (ip_address, port) tuple. If no port is given, the second (port) element of the returned tuple will be None. host:port argument, for example, is accepted in the forms of: - hostname - hostname:1234 - hostname.domain.tld - hostname.domain.tld:5678 - [1234::5]:5678 - 1234::5 - 10.11.12.13:4567 - 10.11.12.13 """ host, port = None, None # default _s_ = host_port[:] if _s_[0] == "[": if "]" in host_port: host, _s_ = _s_.lstrip("[").rsplit("]", 1) host = ipaddress.IPv6Address(host).compressed if _s_[0] == ":": port = int(_s_.lstrip(":")) else: if len(_s_) > 1: raise ValueError('found ambiguous "{}" port in "{}"'.format(_s_, host_port)) else: if _s_.count(":") == 1: host, _hostport_separator_, port = _s_.partition(":") try: port = int(port) except ValueError as _e_: log.error('host_port "%s" port value "%s" is not an integer.', host_port, port) raise _e_ else: host = _s_ try: if not isinstance(host, ipaddress._BaseAddress): host_ip = ipaddress.ip_address(host).compressed host = host_ip except ValueError: log.debug('"%s" Not an IP address? Assuming it is a hostname.', host) if host != sanitize_host(host): log.error('bad hostname: "%s"', host) raise ValueError('bad hostname: "{}"'.format(host)) return host, port def is_fqdn(hostname): """ Verify if hostname conforms to be a FQDN. :param hostname: text string with the name of the host :return: bool, True if hostname is correct FQDN, False otherwise """ compliant = re.compile(r"(?!-)[A-Z\d\-\_]{1,63}(?<!-)$", re.IGNORECASE) return "." in hostname and len(hostname) < 0xff and all(compliant.match(x) for x in hostname.rstrip(".").split("."))
saltstack/salt
salt/utils/network.py
is_ipv4_filter
python
def is_ipv4_filter(ip, options=None): ''' Returns a bool telling if the value passed to it was a valid IPv4 address. ip The IP address. net: False Consider IP addresses followed by netmask. options CSV of options regarding the nature of the IP address. E.g.: loopback, multicast, private etc. ''' _is_ipv4 = _is_ipv(ip, 4, options=options) return isinstance(_is_ipv4, six.string_types)
Returns a bool telling if the value passed to it was a valid IPv4 address. ip The IP address. net: False Consider IP addresses followed by netmask. options CSV of options regarding the nature of the IP address. E.g.: loopback, multicast, private etc.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/network.py#L414-L428
[ "def _is_ipv(ip, version, options=None):\n\n if not version:\n version = 4\n\n if version not in (4, 6):\n return None\n\n try:\n ip_obj = ipaddress.ip_address(ip)\n except ValueError:\n # maybe it is an IP network\n try:\n ip_obj = ipaddress.ip_interface(ip)\n except ValueError:\n # nope, still not :(\n return None\n\n if not ip_obj.version == version:\n return None\n\n # has the right version, let's move on\n return _ip_options(ip_obj, version, options=options)\n" ]
# -*- coding: utf-8 -*- ''' Define some generic socket functions for network modules ''' # Import python libs from __future__ import absolute_import, unicode_literals, print_function import itertools import os import re import types import socket import logging import platform import random import subprocess from string import ascii_letters, digits # Import 3rd-party libs from salt.ext import six from salt.ext.six.moves import range # pylint: disable=import-error,redefined-builtin # Attempt to import wmi try: import wmi import salt.utils.winapi except ImportError: pass # Import salt libs import salt.utils.args import salt.utils.files import salt.utils.path import salt.utils.platform import salt.utils.stringutils import salt.utils.zeromq from salt._compat import ipaddress from salt.exceptions import SaltClientError, SaltSystemExit from salt.utils.decorators.jinja import jinja_filter from salt.utils.versions import LooseVersion # inet_pton does not exist in Windows, this is a workaround if salt.utils.platform.is_windows(): from salt.ext import win_inet_pton # pylint: disable=unused-import log = logging.getLogger(__name__) try: import ctypes import ctypes.util libc = ctypes.cdll.LoadLibrary(ctypes.util.find_library("c")) res_init = libc.__res_init except (ImportError, OSError, AttributeError, TypeError): pass # pylint: disable=C0103 def sanitize_host(host): ''' Sanitize host string. https://tools.ietf.org/html/rfc1123#section-2.1 ''' RFC952_characters = ascii_letters + digits + ".-" return "".join([c for c in host[0:255] if c in RFC952_characters]) def isportopen(host, port): ''' Return status of a port ''' if not 1 <= int(port) <= 65535: return False sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) out = sock.connect_ex((sanitize_host(host), int(port))) return out def host_to_ips(host): ''' Returns a list of IP addresses of a given hostname or None if not found. ''' ips = [] try: for family, socktype, proto, canonname, sockaddr in socket.getaddrinfo( host, 0, socket.AF_UNSPEC, socket.SOCK_STREAM): if family == socket.AF_INET: ip, port = sockaddr elif family == socket.AF_INET6: ip, port, flow_info, scope_id = sockaddr ips.append(ip) if not ips: ips = None except Exception: ips = None return ips def _generate_minion_id(): ''' Get list of possible host names and convention names. :return: ''' # There are three types of hostnames: # 1. Network names. How host is accessed from the network. # 2. Host aliases. They might be not available in all the network or only locally (/etc/hosts) # 3. Convention names, an internal nodename. class DistinctList(list): ''' List, which allows one to append only distinct objects. Needs to work on Python 2.6, because of collections.OrderedDict only since 2.7 version. Override 'filter()' for custom filtering. ''' localhost_matchers = [r'localhost.*', r'ip6-.*', r'127[.]\d', r'0\.0\.0\.0', r'::1.*', r'ipv6-.*', r'fe00::.*', r'fe02::.*', r'1.0.0.*.ip6.arpa'] def append(self, p_object): if p_object and p_object not in self and not self.filter(p_object): super(DistinctList, self).append(p_object) return self def extend(self, iterable): for obj in iterable: self.append(obj) return self def filter(self, element): 'Returns True if element needs to be filtered' for rgx in self.localhost_matchers: if re.match(rgx, element): return True def first(self): return self and self[0] or None hostname = socket.gethostname() hosts = DistinctList().append( salt.utils.stringutils.to_unicode(socket.getfqdn(salt.utils.stringutils.to_bytes(hostname))) ).append(platform.node()).append(hostname) if not hosts: try: for a_nfo in socket.getaddrinfo(hosts.first() or 'localhost', None, socket.AF_INET, socket.SOCK_RAW, socket.IPPROTO_IP, socket.AI_CANONNAME): if len(a_nfo) > 3: hosts.append(a_nfo[3]) except socket.gaierror: log.warning('Cannot resolve address %s info via socket: %s', hosts.first() or 'localhost (N/A)', socket.gaierror) # Universal method for everywhere (Linux, Slowlaris, Windows etc) for f_name in ('/etc/hostname', '/etc/nodename', '/etc/hosts', r'{win}\system32\drivers\etc\hosts'.format(win=os.getenv('WINDIR'))): try: with salt.utils.files.fopen(f_name) as f_hdl: for line in f_hdl: line = salt.utils.stringutils.to_unicode(line) hst = line.strip().split('#')[0].strip().split() if hst: if hst[0][:4] in ('127.', '::1') or len(hst) == 1: hosts.extend(hst) except IOError: pass # include public and private ipaddresses return hosts.extend([addr for addr in ip_addrs() if not ipaddress.ip_address(addr).is_loopback]) def generate_minion_id(): ''' Return only first element of the hostname from all possible list. :return: ''' try: ret = salt.utils.stringutils.to_unicode(_generate_minion_id().first()) except TypeError: ret = None return ret or 'localhost' def get_socket(addr, type=socket.SOCK_STREAM, proto=0): ''' Return a socket object for the addr IP-version agnostic ''' version = ipaddress.ip_address(addr).version if version == 4: family = socket.AF_INET elif version == 6: family = socket.AF_INET6 return socket.socket(family, type, proto) def get_fqhostname(): ''' Returns the fully qualified hostname ''' # try getaddrinfo() fqdn = None try: addrinfo = socket.getaddrinfo( socket.gethostname(), 0, socket.AF_UNSPEC, socket.SOCK_STREAM, socket.SOL_TCP, socket.AI_CANONNAME ) for info in addrinfo: # info struct [family, socktype, proto, canonname, sockaddr] # On Windows `canonname` can be an empty string # This can cause the function to return `None` if len(info) > 3 and info[3]: fqdn = info[3] break except socket.gaierror: pass # NOTE: this used to log.error() but it was later disabled except socket.error as err: log.debug('socket.getaddrinfo() failure while finding fqdn: %s', err) if fqdn is None: fqdn = socket.getfqdn() return fqdn def ip_to_host(ip): ''' Returns the hostname of a given IP ''' try: hostname, aliaslist, ipaddrlist = socket.gethostbyaddr(ip) except Exception as exc: log.debug('salt.utils.network.ip_to_host(%r) failed: %s', ip, exc) hostname = None return hostname # pylint: enable=C0103 def is_reachable_host(entity_name): ''' Returns a bool telling if the entity name is a reachable host (IPv4/IPv6/FQDN/etc). :param hostname: :return: ''' try: assert type(socket.getaddrinfo(entity_name, 0, 0, 0, 0)) == list ret = True except socket.gaierror: ret = False return ret def is_ip(ip): ''' Returns a bool telling if the passed IP is a valid IPv4 or IPv6 address. ''' return is_ipv4(ip) or is_ipv6(ip) def is_ipv4(ip): ''' Returns a bool telling if the value passed to it was a valid IPv4 address ''' try: return ipaddress.ip_address(ip).version == 4 except ValueError: return False def is_ipv6(ip): ''' Returns a bool telling if the value passed to it was a valid IPv6 address ''' try: return ipaddress.ip_address(ip).version == 6 except ValueError: return False def is_subnet(cidr): ''' Returns a bool telling if the passed string is an IPv4 or IPv6 subnet ''' return is_ipv4_subnet(cidr) or is_ipv6_subnet(cidr) def is_ipv4_subnet(cidr): ''' Returns a bool telling if the passed string is an IPv4 subnet ''' try: return '/' in cidr and bool(ipaddress.IPv4Network(cidr)) except Exception: return False def is_ipv6_subnet(cidr): ''' Returns a bool telling if the passed string is an IPv6 subnet ''' try: return '/' in cidr and bool(ipaddress.IPv6Network(cidr)) except Exception: return False @jinja_filter('is_ip') def is_ip_filter(ip, options=None): ''' Returns a bool telling if the passed IP is a valid IPv4 or IPv6 address. ''' return is_ipv4_filter(ip, options=options) or is_ipv6_filter(ip, options=options) def _ip_options_global(ip_obj, version): return not ip_obj.is_private def _ip_options_multicast(ip_obj, version): return ip_obj.is_multicast def _ip_options_loopback(ip_obj, version): return ip_obj.is_loopback def _ip_options_link_local(ip_obj, version): return ip_obj.is_link_local def _ip_options_private(ip_obj, version): return ip_obj.is_private def _ip_options_reserved(ip_obj, version): return ip_obj.is_reserved def _ip_options_site_local(ip_obj, version): if version == 6: return ip_obj.is_site_local return False def _ip_options_unspecified(ip_obj, version): return ip_obj.is_unspecified def _ip_options(ip_obj, version, options=None): # will process and IP options options_fun_map = { 'global': _ip_options_global, 'link-local': _ip_options_link_local, 'linklocal': _ip_options_link_local, 'll': _ip_options_link_local, 'link_local': _ip_options_link_local, 'loopback': _ip_options_loopback, 'lo': _ip_options_loopback, 'multicast': _ip_options_multicast, 'private': _ip_options_private, 'public': _ip_options_global, 'reserved': _ip_options_reserved, 'site-local': _ip_options_site_local, 'sl': _ip_options_site_local, 'site_local': _ip_options_site_local, 'unspecified': _ip_options_unspecified } if not options: return six.text_type(ip_obj) # IP version already checked options_list = [option.strip() for option in options.split(',')] for option, fun in options_fun_map.items(): if option in options_list: fun_res = fun(ip_obj, version) if not fun_res: return None # stop at first failed test # else continue return six.text_type(ip_obj) def _is_ipv(ip, version, options=None): if not version: version = 4 if version not in (4, 6): return None try: ip_obj = ipaddress.ip_address(ip) except ValueError: # maybe it is an IP network try: ip_obj = ipaddress.ip_interface(ip) except ValueError: # nope, still not :( return None if not ip_obj.version == version: return None # has the right version, let's move on return _ip_options(ip_obj, version, options=options) @jinja_filter('is_ipv4') @jinja_filter('is_ipv6') def is_ipv6_filter(ip, options=None): ''' Returns a bool telling if the value passed to it was a valid IPv6 address. ip The IP address. net: False Consider IP addresses followed by netmask. options CSV of options regarding the nature of the IP address. E.g.: loopback, multicast, private etc. ''' _is_ipv6 = _is_ipv(ip, 6, options=options) return isinstance(_is_ipv6, six.string_types) def _ipv_filter(value, version, options=None): if version not in (4, 6): return if isinstance(value, (six.string_types, six.text_type, six.binary_type)): return _is_ipv(value, version, options=options) # calls is_ipv4 or is_ipv6 for `value` elif isinstance(value, (list, tuple, types.GeneratorType)): # calls is_ipv4 or is_ipv6 for each element in the list # os it filters and returns only those elements having the desired IP version return [ _is_ipv(addr, version, options=options) for addr in value if _is_ipv(addr, version, options=options) is not None ] return None @jinja_filter('ipv4') def ipv4(value, options=None): ''' Filters a list and returns IPv4 values only. ''' return _ipv_filter(value, 4, options=options) @jinja_filter('ipv6') def ipv6(value, options=None): ''' Filters a list and returns IPv6 values only. ''' return _ipv_filter(value, 6, options=options) @jinja_filter('ipaddr') def ipaddr(value, options=None): ''' Filters and returns only valid IP objects. ''' ipv4_obj = ipv4(value, options=options) ipv6_obj = ipv6(value, options=options) if ipv4_obj is None or ipv6_obj is None: # an IP address can be either IPv4 either IPv6 # therefofe if the value passed as arg is not a list, at least one of the calls above will return None # if one of them is none, means that we should return only one of them return ipv4_obj or ipv6_obj # one of them else: return ipv4_obj + ipv6_obj # extend lists def _filter_ipaddr(value, options, version=None): ipaddr_filter_out = None if version: if version == 4: ipaddr_filter_out = ipv4(value, options) elif version == 6: ipaddr_filter_out = ipv6(value, options) else: ipaddr_filter_out = ipaddr(value, options) if not ipaddr_filter_out: return if not isinstance(ipaddr_filter_out, (list, tuple, types.GeneratorType)): ipaddr_filter_out = [ipaddr_filter_out] return ipaddr_filter_out @jinja_filter('ip_host') def ip_host(value, options=None, version=None): ''' Returns the interfaces IP address, e.g.: 192.168.0.1/28. ''' ipaddr_filter_out = _filter_ipaddr(value, options=options, version=version) if not ipaddr_filter_out: return if not isinstance(value, (list, tuple, types.GeneratorType)): return six.text_type(ipaddress.ip_interface(ipaddr_filter_out[0])) return [six.text_type(ipaddress.ip_interface(ip_a)) for ip_a in ipaddr_filter_out] def _network_hosts(ip_addr_entry): return [ six.text_type(host) for host in ipaddress.ip_network(ip_addr_entry, strict=False).hosts() ] @jinja_filter('network_hosts') def network_hosts(value, options=None, version=None): ''' Return the list of hosts within a network. .. note:: When running this command with a large IPv6 network, the command will take a long time to gather all of the hosts. ''' ipaddr_filter_out = _filter_ipaddr(value, options=options, version=version) if not ipaddr_filter_out: return if not isinstance(value, (list, tuple, types.GeneratorType)): return _network_hosts(ipaddr_filter_out[0]) return [ _network_hosts(ip_a) for ip_a in ipaddr_filter_out ] def _network_size(ip_addr_entry): return ipaddress.ip_network(ip_addr_entry, strict=False).num_addresses @jinja_filter('network_size') def network_size(value, options=None, version=None): ''' Get the size of a network. ''' ipaddr_filter_out = _filter_ipaddr(value, options=options, version=version) if not ipaddr_filter_out: return if not isinstance(value, (list, tuple, types.GeneratorType)): return _network_size(ipaddr_filter_out[0]) return [ _network_size(ip_a) for ip_a in ipaddr_filter_out ] def natural_ipv4_netmask(ip, fmt='prefixlen'): ''' Returns the "natural" mask of an IPv4 address ''' bits = _ipv4_to_bits(ip) if bits.startswith('11'): mask = '24' elif bits.startswith('1'): mask = '16' else: mask = '8' if fmt == 'netmask': return cidr_to_ipv4_netmask(mask) else: return '/' + mask def rpad_ipv4_network(ip): ''' Returns an IP network address padded with zeros. Ex: '192.168.3' -> '192.168.3.0' '10.209' -> '10.209.0.0' ''' return '.'.join(itertools.islice(itertools.chain(ip.split('.'), '0000'), 0, 4)) def cidr_to_ipv4_netmask(cidr_bits): ''' Returns an IPv4 netmask ''' try: cidr_bits = int(cidr_bits) if not 1 <= cidr_bits <= 32: return '' except ValueError: return '' netmask = '' for idx in range(4): if idx: netmask += '.' if cidr_bits >= 8: netmask += '255' cidr_bits -= 8 else: netmask += '{0:d}'.format(256 - (2 ** (8 - cidr_bits))) cidr_bits = 0 return netmask def _number_of_set_bits_to_ipv4_netmask(set_bits): # pylint: disable=C0103 ''' Returns an IPv4 netmask from the integer representation of that mask. Ex. 0xffffff00 -> '255.255.255.0' ''' return cidr_to_ipv4_netmask(_number_of_set_bits(set_bits)) # pylint: disable=C0103 def _number_of_set_bits(x): ''' Returns the number of bits that are set in a 32bit int ''' # Taken from http://stackoverflow.com/a/4912729. Many thanks! x -= (x >> 1) & 0x55555555 x = ((x >> 2) & 0x33333333) + (x & 0x33333333) x = ((x >> 4) + x) & 0x0f0f0f0f x += x >> 8 x += x >> 16 return x & 0x0000003f # pylint: enable=C0103 def _interfaces_ip(out): ''' Uses ip to return a dictionary of interfaces with various information about each (up/down state, ip address, netmask, and hwaddr) ''' ret = dict() def parse_network(value, cols): ''' Return a tuple of ip, netmask, broadcast based on the current set of cols ''' brd = None scope = None if '/' in value: # we have a CIDR in this address ip, cidr = value.split('/') # pylint: disable=C0103 else: ip = value # pylint: disable=C0103 cidr = 32 if type_ == 'inet': mask = cidr_to_ipv4_netmask(int(cidr)) if 'brd' in cols: brd = cols[cols.index('brd') + 1] elif type_ == 'inet6': mask = cidr if 'scope' in cols: scope = cols[cols.index('scope') + 1] return (ip, mask, brd, scope) groups = re.compile('\r?\n\\d').split(out) for group in groups: iface = None data = dict() for line in group.splitlines(): if ' ' not in line: continue match = re.match(r'^\d*:\s+([\w.\-]+)(?:@)?([\w.\-]+)?:\s+<(.+)>', line) if match: iface, parent, attrs = match.groups() if 'UP' in attrs.split(','): data['up'] = True else: data['up'] = False if parent: data['parent'] = parent continue cols = line.split() if len(cols) >= 2: type_, value = tuple(cols[0:2]) iflabel = cols[-1:][0] if type_ in ('inet', 'inet6'): if 'secondary' not in cols: ipaddr, netmask, broadcast, scope = parse_network(value, cols) if type_ == 'inet': if 'inet' not in data: data['inet'] = list() addr_obj = dict() addr_obj['address'] = ipaddr addr_obj['netmask'] = netmask addr_obj['broadcast'] = broadcast addr_obj['label'] = iflabel data['inet'].append(addr_obj) elif type_ == 'inet6': if 'inet6' not in data: data['inet6'] = list() addr_obj = dict() addr_obj['address'] = ipaddr addr_obj['prefixlen'] = netmask addr_obj['scope'] = scope data['inet6'].append(addr_obj) else: if 'secondary' not in data: data['secondary'] = list() ip_, mask, brd, scp = parse_network(value, cols) data['secondary'].append({ 'type': type_, 'address': ip_, 'netmask': mask, 'broadcast': brd, 'label': iflabel, }) del ip_, mask, brd, scp elif type_.startswith('link'): data['hwaddr'] = value if iface: ret[iface] = data del iface, data return ret def _interfaces_ifconfig(out): ''' Uses ifconfig to return a dictionary of interfaces with various information about each (up/down state, ip address, netmask, and hwaddr) ''' ret = dict() piface = re.compile(r'^([^\s:]+)') pmac = re.compile('.*?(?:HWaddr|ether|address:|lladdr) ([0-9a-fA-F:]+)') if salt.utils.platform.is_sunos(): pip = re.compile(r'.*?(?:inet\s+)([0-9]+\.[0-9]+\.[0-9]+\.[0-9]+)(.*)') pip6 = re.compile('.*?(?:inet6 )([0-9a-fA-F:]+)') pmask6 = re.compile(r'.*?(?:inet6 [0-9a-fA-F:]+/(\d+)).*') else: pip = re.compile(r'.*?(?:inet addr:|inet [^\d]*)(.*?)\s') pip6 = re.compile('.*?(?:inet6 addr: (.*?)/|inet6 )([0-9a-fA-F:]+)') pmask6 = re.compile(r'.*?(?:inet6 addr: [0-9a-fA-F:]+/(\d+)|prefixlen (\d+))(?: Scope:([a-zA-Z]+)| scopeid (0x[0-9a-fA-F]))?') pmask = re.compile(r'.*?(?:Mask:|netmask )(?:((?:0x)?[0-9a-fA-F]{8})|([\d\.]+))') pupdown = re.compile('UP') pbcast = re.compile(r'.*?(?:Bcast:|broadcast )([\d\.]+)') groups = re.compile('\r?\n(?=\\S)').split(out) for group in groups: data = dict() iface = '' updown = False for line in group.splitlines(): miface = piface.match(line) mmac = pmac.match(line) mip = pip.match(line) mip6 = pip6.match(line) mupdown = pupdown.search(line) if miface: iface = miface.group(1) if mmac: data['hwaddr'] = mmac.group(1) if salt.utils.platform.is_sunos(): expand_mac = [] for chunk in data['hwaddr'].split(':'): expand_mac.append('0{0}'.format(chunk) if len(chunk) < 2 else '{0}'.format(chunk)) data['hwaddr'] = ':'.join(expand_mac) if mip: if 'inet' not in data: data['inet'] = list() addr_obj = dict() addr_obj['address'] = mip.group(1) mmask = pmask.match(line) if mmask: if mmask.group(1): mmask = _number_of_set_bits_to_ipv4_netmask( int(mmask.group(1), 16)) else: mmask = mmask.group(2) addr_obj['netmask'] = mmask mbcast = pbcast.match(line) if mbcast: addr_obj['broadcast'] = mbcast.group(1) data['inet'].append(addr_obj) if mupdown: updown = True if mip6: if 'inet6' not in data: data['inet6'] = list() addr_obj = dict() addr_obj['address'] = mip6.group(1) or mip6.group(2) mmask6 = pmask6.match(line) if mmask6: addr_obj['prefixlen'] = mmask6.group(1) or mmask6.group(2) if not salt.utils.platform.is_sunos(): ipv6scope = mmask6.group(3) or mmask6.group(4) addr_obj['scope'] = ipv6scope.lower() if ipv6scope is not None else ipv6scope # SunOS sometimes has ::/0 as inet6 addr when using addrconf if not salt.utils.platform.is_sunos() \ or addr_obj['address'] != '::' \ and addr_obj['prefixlen'] != 0: data['inet6'].append(addr_obj) data['up'] = updown if iface in ret: # SunOS optimization, where interfaces occur twice in 'ifconfig -a' # output with the same name: for ipv4 and then for ipv6 addr family. # Every instance has it's own 'UP' status and we assume that ipv4 # status determines global interface status. # # merge items with higher priority for older values # after that merge the inet and inet6 sub items for both ret[iface] = dict(list(data.items()) + list(ret[iface].items())) if 'inet' in data: ret[iface]['inet'].extend(x for x in data['inet'] if x not in ret[iface]['inet']) if 'inet6' in data: ret[iface]['inet6'].extend(x for x in data['inet6'] if x not in ret[iface]['inet6']) else: ret[iface] = data del data return ret def linux_interfaces(): ''' Obtain interface information for *NIX/BSD variants ''' ifaces = dict() ip_path = salt.utils.path.which('ip') ifconfig_path = None if ip_path else salt.utils.path.which('ifconfig') if ip_path: cmd1 = subprocess.Popen( '{0} link show'.format(ip_path), shell=True, close_fds=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate()[0] cmd2 = subprocess.Popen( '{0} addr show'.format(ip_path), shell=True, close_fds=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate()[0] ifaces = _interfaces_ip("{0}\n{1}".format( salt.utils.stringutils.to_str(cmd1), salt.utils.stringutils.to_str(cmd2))) elif ifconfig_path: cmd = subprocess.Popen( '{0} -a'.format(ifconfig_path), shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate()[0] ifaces = _interfaces_ifconfig(salt.utils.stringutils.to_str(cmd)) return ifaces def _netbsd_interfaces_ifconfig(out): ''' Uses ifconfig to return a dictionary of interfaces with various information about each (up/down state, ip address, netmask, and hwaddr) ''' ret = dict() piface = re.compile(r'^([^\s:]+)') pmac = re.compile('.*?address: ([0-9a-f:]+)') pip = re.compile(r'.*?inet [^\d]*(.*?)/([\d]*)\s') pip6 = re.compile(r'.*?inet6 ([0-9a-f:]+)%([a-zA-Z0-9]*)/([\d]*)\s') pupdown = re.compile('UP') pbcast = re.compile(r'.*?broadcast ([\d\.]+)') groups = re.compile('\r?\n(?=\\S)').split(out) for group in groups: data = dict() iface = '' updown = False for line in group.splitlines(): miface = piface.match(line) mmac = pmac.match(line) mip = pip.match(line) mip6 = pip6.match(line) mupdown = pupdown.search(line) if miface: iface = miface.group(1) if mmac: data['hwaddr'] = mmac.group(1) if mip: if 'inet' not in data: data['inet'] = list() addr_obj = dict() addr_obj['address'] = mip.group(1) mmask = mip.group(2) if mip.group(2): addr_obj['netmask'] = cidr_to_ipv4_netmask(mip.group(2)) mbcast = pbcast.match(line) if mbcast: addr_obj['broadcast'] = mbcast.group(1) data['inet'].append(addr_obj) if mupdown: updown = True if mip6: if 'inet6' not in data: data['inet6'] = list() addr_obj = dict() addr_obj['address'] = mip6.group(1) mmask6 = mip6.group(3) addr_obj['scope'] = mip6.group(2) addr_obj['prefixlen'] = mip6.group(3) data['inet6'].append(addr_obj) data['up'] = updown ret[iface] = data del data return ret def netbsd_interfaces(): ''' Obtain interface information for NetBSD >= 8 where the ifconfig output diverged from other BSD variants (Netmask is now part of the address) ''' # NetBSD versions prior to 8.0 can still use linux_interfaces() if LooseVersion(os.uname()[2]) < LooseVersion('8.0'): return linux_interfaces() ifconfig_path = salt.utils.path.which('ifconfig') cmd = subprocess.Popen( '{0} -a'.format(ifconfig_path), shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate()[0] return _netbsd_interfaces_ifconfig(salt.utils.stringutils.to_str(cmd)) def _interfaces_ipconfig(out): ''' Returns a dictionary of interfaces with various information about each (up/down state, ip address, netmask, and hwaddr) NOTE: This is not used by any function and may be able to be removed in the future. ''' ifaces = dict() iface = None adapter_iface_regex = re.compile(r'adapter (\S.+):$') for line in out.splitlines(): if not line: continue # TODO what does Windows call Infiniband and 10/40gige adapters if line.startswith('Ethernet'): iface = ifaces[adapter_iface_regex.search(line).group(1)] iface['up'] = True addr = None continue if iface: key, val = line.split(',', 1) key = key.strip(' .') val = val.strip() if addr and key == 'Subnet Mask': addr['netmask'] = val elif key in ('IP Address', 'IPv4 Address'): if 'inet' not in iface: iface['inet'] = list() addr = {'address': val.rstrip('(Preferred)'), 'netmask': None, 'broadcast': None} # TODO find the broadcast iface['inet'].append(addr) elif 'IPv6 Address' in key: if 'inet6' not in iface: iface['inet'] = list() # XXX What is the prefixlen!? addr = {'address': val.rstrip('(Preferred)'), 'prefixlen': None} iface['inet6'].append(addr) elif key == 'Physical Address': iface['hwaddr'] = val elif key == 'Media State': # XXX seen used for tunnel adaptors # might be useful iface['up'] = (val != 'Media disconnected') def win_interfaces(): ''' Obtain interface information for Windows systems ''' with salt.utils.winapi.Com(): c = wmi.WMI() ifaces = {} for iface in c.Win32_NetworkAdapterConfiguration(IPEnabled=1): ifaces[iface.Description] = dict() if iface.MACAddress: ifaces[iface.Description]['hwaddr'] = iface.MACAddress if iface.IPEnabled: ifaces[iface.Description]['up'] = True for ip in iface.IPAddress: if '.' in ip: if 'inet' not in ifaces[iface.Description]: ifaces[iface.Description]['inet'] = [] item = {'address': ip, 'label': iface.Description} if iface.DefaultIPGateway: broadcast = next((i for i in iface.DefaultIPGateway if '.' in i), '') if broadcast: item['broadcast'] = broadcast if iface.IPSubnet: netmask = next((i for i in iface.IPSubnet if '.' in i), '') if netmask: item['netmask'] = netmask ifaces[iface.Description]['inet'].append(item) if ':' in ip: if 'inet6' not in ifaces[iface.Description]: ifaces[iface.Description]['inet6'] = [] item = {'address': ip} if iface.DefaultIPGateway: broadcast = next((i for i in iface.DefaultIPGateway if ':' in i), '') if broadcast: item['broadcast'] = broadcast if iface.IPSubnet: netmask = next((i for i in iface.IPSubnet if ':' in i), '') if netmask: item['netmask'] = netmask ifaces[iface.Description]['inet6'].append(item) else: ifaces[iface.Description]['up'] = False return ifaces def interfaces(): ''' Return a dictionary of information about all the interfaces on the minion ''' if salt.utils.platform.is_windows(): return win_interfaces() elif salt.utils.platform.is_netbsd(): return netbsd_interfaces() else: return linux_interfaces() def get_net_start(ipaddr, netmask): ''' Return the address of the network ''' net = ipaddress.ip_network('{0}/{1}'.format(ipaddr, netmask), strict=False) return six.text_type(net.network_address) def get_net_size(mask): ''' Turns an IPv4 netmask into it's corresponding prefix length (255.255.255.0 -> 24 as in 192.168.1.10/24). ''' binary_str = '' for octet in mask.split('.'): binary_str += bin(int(octet))[2:].zfill(8) return len(binary_str.rstrip('0')) def calc_net(ipaddr, netmask=None): ''' Takes IP (CIDR notation supported) and optionally netmask and returns the network in CIDR-notation. (The IP can be any IP inside the subnet) ''' if netmask is not None: ipaddr = '{0}/{1}'.format(ipaddr, netmask) return six.text_type(ipaddress.ip_network(ipaddr, strict=False)) def _ipv4_to_bits(ipaddr): ''' Accepts an IPv4 dotted quad and returns a string representing its binary counterpart ''' return ''.join([bin(int(x))[2:].rjust(8, '0') for x in ipaddr.split('.')]) def _get_iface_info(iface): ''' If `iface` is available, return interface info and no error, otherwise return no info and log and return an error ''' iface_info = interfaces() if iface in iface_info.keys(): return iface_info, False else: error_msg = ('Interface "{0}" not in available interfaces: "{1}"' ''.format(iface, '", "'.join(iface_info.keys()))) log.error(error_msg) return None, error_msg def _hw_addr_aix(iface): ''' Return the hardware address (a.k.a. MAC address) for a given interface on AIX MAC address not available in through interfaces ''' cmd = subprocess.Popen( 'entstat -d {0} | grep \'Hardware Address\''.format(iface), shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate()[0] if cmd: comps = cmd.split(' ') if len(comps) == 3: mac_addr = comps[2].strip('\'').strip() return mac_addr error_msg = ('Interface "{0}" either not available or does not contain a hardware address'.format(iface)) log.error(error_msg) return error_msg def hw_addr(iface): ''' Return the hardware address (a.k.a. MAC address) for a given interface .. versionchanged:: 2016.11.4 Added support for AIX ''' if salt.utils.platform.is_aix(): return _hw_addr_aix iface_info, error = _get_iface_info(iface) if error is False: return iface_info.get(iface, {}).get('hwaddr', '') else: return error def interface(iface): ''' Return the details of `iface` or an error if it does not exist ''' iface_info, error = _get_iface_info(iface) if error is False: return iface_info.get(iface, {}).get('inet', '') else: return error def interface_ip(iface): ''' Return `iface` IPv4 addr or an error if `iface` does not exist ''' iface_info, error = _get_iface_info(iface) if error is False: inet = iface_info.get(iface, {}).get('inet', None) return inet[0].get('address', '') if inet else '' else: return error def _subnets(proto='inet', interfaces_=None): ''' Returns a list of subnets to which the host belongs ''' if interfaces_ is None: ifaces = interfaces() elif isinstance(interfaces_, list): ifaces = {} for key, value in six.iteritems(interfaces()): if key in interfaces_: ifaces[key] = value else: ifaces = {interfaces_: interfaces().get(interfaces_, {})} ret = set() if proto == 'inet': subnet = 'netmask' dflt_cidr = 32 elif proto == 'inet6': subnet = 'prefixlen' dflt_cidr = 128 else: log.error('Invalid proto %s calling subnets()', proto) return for ip_info in six.itervalues(ifaces): addrs = ip_info.get(proto, []) addrs.extend([addr for addr in ip_info.get('secondary', []) if addr.get('type') == proto]) for intf in addrs: if subnet in intf: intf = ipaddress.ip_interface('{0}/{1}'.format(intf['address'], intf[subnet])) else: intf = ipaddress.ip_interface('{0}/{1}'.format(intf['address'], dflt_cidr)) if not intf.is_loopback: ret.add(intf.network) return [six.text_type(net) for net in sorted(ret)] def subnets(interfaces=None): ''' Returns a list of IPv4 subnets to which the host belongs ''' return _subnets('inet', interfaces_=interfaces) def subnets6(): ''' Returns a list of IPv6 subnets to which the host belongs ''' return _subnets('inet6') def in_subnet(cidr, addr=None): ''' Returns True if host or (any of) addrs is within specified subnet, otherwise False ''' try: cidr = ipaddress.ip_network(cidr) except ValueError: log.error('Invalid CIDR \'%s\'', cidr) return False if addr is None: addr = ip_addrs() addr.extend(ip_addrs6()) elif not isinstance(addr, (list, tuple)): addr = (addr,) return any(ipaddress.ip_address(item) in cidr for item in addr) def _ip_addrs(interface=None, include_loopback=False, interface_data=None, proto='inet'): ''' Return the full list of IP adresses matching the criteria proto = inet|inet6 ''' ret = set() ifaces = interface_data \ if isinstance(interface_data, dict) \ else interfaces() if interface is None: target_ifaces = ifaces else: target_ifaces = dict([(k, v) for k, v in six.iteritems(ifaces) if k == interface]) if not target_ifaces: log.error('Interface %s not found.', interface) for ip_info in six.itervalues(target_ifaces): addrs = ip_info.get(proto, []) addrs.extend([addr for addr in ip_info.get('secondary', []) if addr.get('type') == proto]) for addr in addrs: addr = ipaddress.ip_address(addr.get('address')) if not addr.is_loopback or include_loopback: ret.add(addr) return [six.text_type(addr) for addr in sorted(ret)] def ip_addrs(interface=None, include_loopback=False, interface_data=None): ''' Returns a list of IPv4 addresses assigned to the host. 127.0.0.1 is ignored, unless 'include_loopback=True' is indicated. If 'interface' is provided, then only IP addresses from that interface will be returned. ''' return _ip_addrs(interface, include_loopback, interface_data, 'inet') def ip_addrs6(interface=None, include_loopback=False, interface_data=None): ''' Returns a list of IPv6 addresses assigned to the host. ::1 is ignored, unless 'include_loopback=True' is indicated. If 'interface' is provided, then only IP addresses from that interface will be returned. ''' return _ip_addrs(interface, include_loopback, interface_data, 'inet6') def hex2ip(hex_ip, invert=False): ''' Convert a hex string to an ip, if a failure occurs the original hex is returned. If 'invert=True' assume that ip from /proc/net/<proto> ''' if len(hex_ip) == 32: # ipv6 ip = [] for i in range(0, 32, 8): ip_part = hex_ip[i:i + 8] ip_part = [ip_part[x:x + 2] for x in range(0, 8, 2)] if invert: ip.append("{0[3]}{0[2]}:{0[1]}{0[0]}".format(ip_part)) else: ip.append("{0[0]}{0[1]}:{0[2]}{0[3]}".format(ip_part)) try: address = ipaddress.IPv6Address(":".join(ip)) if address.ipv4_mapped: return str(address.ipv4_mapped) else: return address.compressed except ipaddress.AddressValueError as ex: log.error('hex2ip - ipv6 address error: %s', ex) return hex_ip try: hip = int(hex_ip, 16) except ValueError: return hex_ip if invert: return '{3}.{2}.{1}.{0}'.format(hip >> 24 & 255, hip >> 16 & 255, hip >> 8 & 255, hip & 255) return '{0}.{1}.{2}.{3}'.format(hip >> 24 & 255, hip >> 16 & 255, hip >> 8 & 255, hip & 255) def mac2eui64(mac, prefix=None): ''' Convert a MAC address to a EUI64 identifier or, with prefix provided, a full IPv6 address ''' # http://tools.ietf.org/html/rfc4291#section-2.5.1 eui64 = re.sub(r'[.:-]', '', mac).lower() eui64 = eui64[0:6] + 'fffe' + eui64[6:] eui64 = hex(int(eui64[0:2], 16) | 2)[2:].zfill(2) + eui64[2:] if prefix is None: return ':'.join(re.findall(r'.{4}', eui64)) else: try: net = ipaddress.ip_network(prefix, strict=False) euil = int('0x{0}'.format(eui64), 16) return '{0}/{1}'.format(net[euil], net.prefixlen) except Exception: return def active_tcp(): ''' Return a dict describing all active tcp connections as quickly as possible ''' ret = {} for statf in ['/proc/net/tcp', '/proc/net/tcp6']: if os.path.isfile(statf): with salt.utils.files.fopen(statf, 'rb') as fp_: for line in fp_: line = salt.utils.stringutils.to_unicode(line) if line.strip().startswith('sl'): continue iret = _parse_tcp_line(line) sl = next(iter(iret)) if iret[sl]['state'] == 1: # 1 is ESTABLISHED del iret[sl]['state'] ret[len(ret)] = iret[sl] return ret def local_port_tcp(port): ''' Return a set of remote ip addrs attached to the specified local port ''' ret = _remotes_on(port, 'local_port') return ret def remote_port_tcp(port): ''' Return a set of ip addrs the current host is connected to on given port ''' ret = _remotes_on(port, 'remote_port') return ret def _remotes_on(port, which_end): ''' Return a set of ip addrs active tcp connections ''' port = int(port) ret = _netlink_tool_remote_on(port, which_end) if ret is not None: return ret ret = set() proc_available = False for statf in ['/proc/net/tcp', '/proc/net/tcp6']: if os.path.isfile(statf): proc_available = True with salt.utils.files.fopen(statf, 'r') as fp_: for line in fp_: line = salt.utils.stringutils.to_unicode(line) if line.strip().startswith('sl'): continue iret = _parse_tcp_line(line) sl = next(iter(iret)) if iret[sl][which_end] == port and iret[sl]['state'] == 1: # 1 is ESTABLISHED ret.add(iret[sl]['remote_addr']) if not proc_available: # Fallback to use OS specific tools if salt.utils.platform.is_sunos(): return _sunos_remotes_on(port, which_end) if salt.utils.platform.is_freebsd(): return _freebsd_remotes_on(port, which_end) if salt.utils.platform.is_netbsd(): return _netbsd_remotes_on(port, which_end) if salt.utils.platform.is_openbsd(): return _openbsd_remotes_on(port, which_end) if salt.utils.platform.is_windows(): return _windows_remotes_on(port, which_end) if salt.utils.platform.is_aix(): return _aix_remotes_on(port, which_end) return _linux_remotes_on(port, which_end) return ret def _parse_tcp_line(line): ''' Parse a single line from the contents of /proc/net/tcp or /proc/net/tcp6 ''' ret = {} comps = line.strip().split() sl = comps[0].rstrip(':') ret[sl] = {} l_addr, l_port = comps[1].split(':') r_addr, r_port = comps[2].split(':') ret[sl]['local_addr'] = hex2ip(l_addr, True) ret[sl]['local_port'] = int(l_port, 16) ret[sl]['remote_addr'] = hex2ip(r_addr, True) ret[sl]['remote_port'] = int(r_port, 16) ret[sl]['state'] = int(comps[3], 16) return ret def _netlink_tool_remote_on(port, which_end): ''' Returns set of ipv4 host addresses of remote established connections on local or remote tcp port. Parses output of shell 'ss' to get connections [root@salt-master ~]# ss -ant State Recv-Q Send-Q Local Address:Port Peer Address:Port LISTEN 0 511 *:80 *:* LISTEN 0 128 *:22 *:* ESTAB 0 0 127.0.0.1:56726 127.0.0.1:4505 ''' remotes = set() valid = False try: data = subprocess.check_output(['ss', '-ant']) # pylint: disable=minimum-python-version except subprocess.CalledProcessError: log.error('Failed ss') raise except OSError: # not command "No such file or directory" return None lines = salt.utils.stringutils.to_str(data).split('\n') for line in lines: if 'Address:Port' in line: # ss tools may not be valid valid = True continue elif 'ESTAB' not in line: continue chunks = line.split() local_host, local_port = chunks[3].split(':', 1) remote_host, remote_port = chunks[4].split(':', 1) if which_end == 'remote_port' and int(remote_port) != port: continue if which_end == 'local_port' and int(local_port) != port: continue remotes.add(remote_host) if valid is False: remotes = None return remotes def _sunos_remotes_on(port, which_end): ''' SunOS specific helper function. Returns set of ipv4 host addresses of remote established connections on local or remote tcp port. Parses output of shell 'netstat' to get connections [root@salt-master ~]# netstat -f inet -n TCP: IPv4 Local Address Remote Address Swind Send-Q Rwind Recv-Q State -------------------- -------------------- ----- ------ ----- ------ ----------- 10.0.0.101.4505 10.0.0.1.45329 1064800 0 1055864 0 ESTABLISHED 10.0.0.101.4505 10.0.0.100.50798 1064800 0 1055864 0 ESTABLISHED ''' remotes = set() try: data = subprocess.check_output(['netstat', '-f', 'inet', '-n']) # pylint: disable=minimum-python-version except subprocess.CalledProcessError: log.error('Failed netstat') raise lines = salt.utils.stringutils.to_str(data).split('\n') for line in lines: if 'ESTABLISHED' not in line: continue chunks = line.split() local_host, local_port = chunks[0].rsplit('.', 1) remote_host, remote_port = chunks[1].rsplit('.', 1) if which_end == 'remote_port' and int(remote_port) != port: continue if which_end == 'local_port' and int(local_port) != port: continue remotes.add(remote_host) return remotes def _freebsd_remotes_on(port, which_end): ''' Returns set of ipv4 host addresses of remote established connections on local tcp port port. Parses output of shell 'sockstat' (FreeBSD) to get connections $ sudo sockstat -4 USER COMMAND PID FD PROTO LOCAL ADDRESS FOREIGN ADDRESS root python2.7 1456 29 tcp4 *:4505 *:* root python2.7 1445 17 tcp4 *:4506 *:* root python2.7 1294 14 tcp4 127.0.0.1:11813 127.0.0.1:4505 root python2.7 1294 41 tcp4 127.0.0.1:61115 127.0.0.1:4506 $ sudo sockstat -4 -c -p 4506 USER COMMAND PID FD PROTO LOCAL ADDRESS FOREIGN ADDRESS root python2.7 1294 41 tcp4 127.0.0.1:61115 127.0.0.1:4506 ''' port = int(port) remotes = set() try: cmd = salt.utils.args.shlex_split('sockstat -4 -c -p {0}'.format(port)) data = subprocess.check_output(cmd) # pylint: disable=minimum-python-version except subprocess.CalledProcessError as ex: log.error('Failed "sockstat" with returncode = %s', ex.returncode) raise lines = salt.utils.stringutils.to_str(data).split('\n') for line in lines: chunks = line.split() if not chunks: continue # ['root', 'python2.7', '1456', '37', 'tcp4', # '127.0.0.1:4505-', '127.0.0.1:55703'] # print chunks if 'COMMAND' in chunks[1]: continue # ignore header if len(chunks) < 2: continue # sockstat -4 -c -p 4506 does this with high PIDs: # USER COMMAND PID FD PROTO LOCAL ADDRESS FOREIGN ADDRESS # salt-master python2.781106 35 tcp4 192.168.12.34:4506 192.168.12.45:60143 local = chunks[-2] remote = chunks[-1] lhost, lport = local.split(':') rhost, rport = remote.split(':') if which_end == 'local' and int(lport) != port: # ignore if local port not port continue if which_end == 'remote' and int(rport) != port: # ignore if remote port not port continue remotes.add(rhost) return remotes def _netbsd_remotes_on(port, which_end): ''' Returns set of ipv4 host addresses of remote established connections on local tcp port port. Parses output of shell 'sockstat' (NetBSD) to get connections $ sudo sockstat -4 -n USER COMMAND PID FD PROTO LOCAL ADDRESS FOREIGN ADDRESS root python2.7 1456 29 tcp *.4505 *.* root python2.7 1445 17 tcp *.4506 *.* root python2.7 1294 14 tcp 127.0.0.1.11813 127.0.0.1.4505 root python2.7 1294 41 tcp 127.0.0.1.61115 127.0.0.1.4506 $ sudo sockstat -4 -c -n -p 4506 USER COMMAND PID FD PROTO LOCAL ADDRESS FOREIGN ADDRESS root python2.7 1294 41 tcp 127.0.0.1.61115 127.0.0.1.4506 ''' port = int(port) remotes = set() try: cmd = salt.utils.args.shlex_split('sockstat -4 -c -n -p {0}'.format(port)) data = subprocess.check_output(cmd) # pylint: disable=minimum-python-version except subprocess.CalledProcessError as ex: log.error('Failed "sockstat" with returncode = %s', ex.returncode) raise lines = salt.utils.stringutils.to_str(data).split('\n') for line in lines: chunks = line.split() if not chunks: continue # ['root', 'python2.7', '1456', '37', 'tcp', # '127.0.0.1.4505-', '127.0.0.1.55703'] # print chunks if 'COMMAND' in chunks[1]: continue # ignore header if len(chunks) < 2: continue local = chunks[5].split('.') lport = local.pop() lhost = '.'.join(local) remote = chunks[6].split('.') rport = remote.pop() rhost = '.'.join(remote) if which_end == 'local' and int(lport) != port: # ignore if local port not port continue if which_end == 'remote' and int(rport) != port: # ignore if remote port not port continue remotes.add(rhost) return remotes def _openbsd_remotes_on(port, which_end): ''' OpenBSD specific helper function. Returns set of ipv4 host addresses of remote established connections on local or remote tcp port. Parses output of shell 'netstat' to get connections $ netstat -nf inet Active Internet connections Proto Recv-Q Send-Q Local Address Foreign Address (state) tcp 0 0 10.0.0.101.4505 10.0.0.1.45329 ESTABLISHED tcp 0 0 10.0.0.101.4505 10.0.0.100.50798 ESTABLISHED ''' remotes = set() try: data = subprocess.check_output(['netstat', '-nf', 'inet']) # pylint: disable=minimum-python-version except subprocess.CalledProcessError: log.error('Failed netstat') raise lines = data.split('\n') for line in lines: if 'ESTABLISHED' not in line: continue chunks = line.split() local_host, local_port = chunks[3].rsplit('.', 1) remote_host, remote_port = chunks[4].rsplit('.', 1) if which_end == 'remote_port' and int(remote_port) != port: continue if which_end == 'local_port' and int(local_port) != port: continue remotes.add(remote_host) return remotes def _windows_remotes_on(port, which_end): r''' Windows specific helper function. Returns set of ipv4 host addresses of remote established connections on local or remote tcp port. Parses output of shell 'netstat' to get connections C:\>netstat -n Active Connections Proto Local Address Foreign Address State TCP 10.2.33.17:3007 130.164.12.233:10123 ESTABLISHED TCP 10.2.33.17:3389 130.164.30.5:10378 ESTABLISHED ''' remotes = set() try: data = subprocess.check_output(['netstat', '-n']) # pylint: disable=minimum-python-version except subprocess.CalledProcessError: log.error('Failed netstat') raise lines = salt.utils.stringutils.to_str(data).split('\n') for line in lines: if 'ESTABLISHED' not in line: continue chunks = line.split() local_host, local_port = chunks[1].rsplit(':', 1) remote_host, remote_port = chunks[2].rsplit(':', 1) if which_end == 'remote_port' and int(remote_port) != port: continue if which_end == 'local_port' and int(local_port) != port: continue remotes.add(remote_host) return remotes def _linux_remotes_on(port, which_end): ''' Linux specific helper function. Returns set of ip host addresses of remote established connections on local tcp port port. Parses output of shell 'lsof' to get connections $ sudo lsof -iTCP:4505 -n COMMAND PID USER FD TYPE DEVICE SIZE/OFF NODE NAME Python 9971 root 35u IPv4 0x18a8464a29ca329d 0t0 TCP *:4505 (LISTEN) Python 9971 root 37u IPv4 0x18a8464a29b2b29d 0t0 TCP 127.0.0.1:4505->127.0.0.1:55703 (ESTABLISHED) Python 10152 root 22u IPv4 0x18a8464a29c8cab5 0t0 TCP 127.0.0.1:55703->127.0.0.1:4505 (ESTABLISHED) Python 10153 root 22u IPv4 0x18a8464a29c8cab5 0t0 TCP [fe80::249a]:4505->[fe80::150]:59367 (ESTABLISHED) ''' remotes = set() try: data = subprocess.check_output( ['lsof', '-iTCP:{0:d}'.format(port), '-n', '-P'] # pylint: disable=minimum-python-version ) except subprocess.CalledProcessError as ex: if ex.returncode == 1: # Lsof return 1 if any error was detected, including the failure # to locate Internet addresses, and it is not an error in this case. log.warning('"lsof" returncode = 1, likely no active TCP sessions.') return remotes log.error('Failed "lsof" with returncode = %s', ex.returncode) raise lines = salt.utils.stringutils.to_str(data).split('\n') for line in lines: chunks = line.split() if not chunks: continue # ['Python', '9971', 'root', '37u', 'IPv4', '0x18a8464a29b2b29d', '0t0', # 'TCP', '127.0.0.1:4505->127.0.0.1:55703', '(ESTABLISHED)'] # print chunks if 'COMMAND' in chunks[0]: continue # ignore header if 'ESTABLISHED' not in chunks[-1]: continue # ignore if not ESTABLISHED # '127.0.0.1:4505->127.0.0.1:55703' local, remote = chunks[8].split('->') _, lport = local.rsplit(':', 1) rhost, rport = remote.rsplit(':', 1) if which_end == 'remote_port' and int(rport) != port: continue if which_end == 'local_port' and int(lport) != port: continue remotes.add(rhost.strip("[]")) return remotes def _aix_remotes_on(port, which_end): ''' AIX specific helper function. Returns set of ipv4 host addresses of remote established connections on local or remote tcp port. Parses output of shell 'netstat' to get connections root@la68pp002_pub:/opt/salt/lib/python2.7/site-packages/salt/modules# netstat -f inet -n Active Internet connections Proto Recv-Q Send-Q Local Address Foreign Address (state) tcp4 0 0 172.29.149.95.50093 209.41.78.13.4505 ESTABLISHED tcp4 0 0 127.0.0.1.9514 *.* LISTEN tcp4 0 0 127.0.0.1.9515 *.* LISTEN tcp4 0 0 127.0.0.1.199 127.0.0.1.32779 ESTABLISHED tcp4 0 0 127.0.0.1.32779 127.0.0.1.199 ESTABLISHED tcp4 0 40 172.29.149.95.22 172.29.96.83.41022 ESTABLISHED tcp4 0 0 172.29.149.95.22 172.29.96.83.41032 ESTABLISHED tcp4 0 0 127.0.0.1.32771 127.0.0.1.32775 ESTABLISHED tcp 0 0 127.0.0.1.32775 127.0.0.1.32771 ESTABLISHED tcp4 0 0 127.0.0.1.32771 127.0.0.1.32776 ESTABLISHED tcp 0 0 127.0.0.1.32776 127.0.0.1.32771 ESTABLISHED tcp4 0 0 127.0.0.1.32771 127.0.0.1.32777 ESTABLISHED tcp 0 0 127.0.0.1.32777 127.0.0.1.32771 ESTABLISHED tcp4 0 0 127.0.0.1.32771 127.0.0.1.32778 ESTABLISHED tcp 0 0 127.0.0.1.32778 127.0.0.1.32771 ESTABLISHED ''' remotes = set() try: data = subprocess.check_output(['netstat', '-f', 'inet', '-n']) # pylint: disable=minimum-python-version except subprocess.CalledProcessError: log.error('Failed netstat') raise lines = salt.utils.stringutils.to_str(data).split('\n') for line in lines: if 'ESTABLISHED' not in line: continue chunks = line.split() local_host, local_port = chunks[3].rsplit('.', 1) remote_host, remote_port = chunks[4].rsplit('.', 1) if which_end == 'remote_port' and int(remote_port) != port: continue if which_end == 'local_port' and int(local_port) != port: continue remotes.add(remote_host) return remotes @jinja_filter('gen_mac') def gen_mac(prefix='AC:DE:48'): ''' Generates a MAC address with the defined OUI prefix. Common prefixes: - ``00:16:3E`` -- Xen - ``00:18:51`` -- OpenVZ - ``00:50:56`` -- VMware (manually generated) - ``52:54:00`` -- QEMU/KVM - ``AC:DE:48`` -- PRIVATE References: - http://standards.ieee.org/develop/regauth/oui/oui.txt - https://www.wireshark.org/tools/oui-lookup.html - https://en.wikipedia.org/wiki/MAC_address ''' return '{0}:{1:02X}:{2:02X}:{3:02X}'.format(prefix, random.randint(0, 0xff), random.randint(0, 0xff), random.randint(0, 0xff)) @jinja_filter('mac_str_to_bytes') def mac_str_to_bytes(mac_str): ''' Convert a MAC address string into bytes. Works with or without separators: b1 = mac_str_to_bytes('08:00:27:13:69:77') b2 = mac_str_to_bytes('080027136977') assert b1 == b2 assert isinstance(b1, bytes) ''' if len(mac_str) == 12: pass elif len(mac_str) == 17: sep = mac_str[2] mac_str = mac_str.replace(sep, '') else: raise ValueError('Invalid MAC address') chars = (int(mac_str[s:s+2], 16) for s in range(0, 12, 2)) return bytes(chars) if six.PY3 else b''.join(chr(x) for x in chars) def refresh_dns(): ''' issue #21397: force glibc to re-read resolv.conf ''' try: res_init() except NameError: # Exception raised loading the library, thus res_init is not defined pass @jinja_filter('connection_check') def connection_check(addr, port=80, safe=False, ipv6=None): ''' Provides a convenient alias for the dns_check filter. ''' return dns_check(addr, port, safe, ipv6) @jinja_filter('dns_check') def dns_check(addr, port=80, safe=False, ipv6=None, attempt_connect=True): ''' Return the ip resolved by dns, but do not exit on failure, only raise an exception. Obeys system preference for IPv4/6 address resolution - this can be overridden by the ipv6 flag. Tries to connect to the address before considering it useful. If no address can be reached, the first one resolved is used as a fallback. ''' error = False lookup = addr seen_ipv6 = False family = socket.AF_INET6 if ipv6 else socket.AF_INET if ipv6 is False else socket.AF_UNSPEC hostnames = [] try: refresh_dns() hostnames = socket.getaddrinfo(addr, port, family, socket.SOCK_STREAM) except TypeError: err = ('Attempt to resolve address \'{0}\' failed. Invalid or unresolveable address').format(lookup) raise SaltSystemExit(code=42, msg=err) except socket.error: error = True # If ipv6 is set to True, attempt another lookup using the IPv4 family, # just in case we're attempting to lookup an IPv4 IP # as an IPv6 hostname. if error and ipv6: try: refresh_dns() hostnames = socket.getaddrinfo(addr, port, socket.AF_INET, socket.SOCK_STREAM) except TypeError: err = ('Attempt to resolve address \'{0}\' failed. Invalid or unresolveable address').format(lookup) raise SaltSystemExit(code=42, msg=err) except socket.error: error = True try: if not hostnames: error = True else: resolved = False candidates = [] for h in hostnames: # Input is IP address, passed through unchanged, just return it if h[4][0] == addr: resolved = salt.utils.zeromq.ip_bracket(addr) break candidate_addr = salt.utils.zeromq.ip_bracket(h[4][0]) # sometimes /etc/hosts contains ::1 localhost if not ipv6 and candidate_addr == '[::1]': continue candidates.append(candidate_addr) if attempt_connect: try: s = socket.socket(h[0], socket.SOCK_STREAM) s.settimeout(2) s.connect((candidate_addr.strip('[]'), h[4][1])) s.close() resolved = candidate_addr break except socket.error: pass if not resolved: if candidates: resolved = candidates[0] else: error = True except TypeError: err = ('Attempt to resolve address \'{0}\' failed. Invalid or unresolveable address').format(lookup) raise SaltSystemExit(code=42, msg=err) except socket.error: error = True if error: err = ('DNS lookup or connection check of \'{0}\' failed.').format(addr) if safe: if salt.log.is_console_configured(): # If logging is not configured it also means that either # the master or minion instance calling this hasn't even # started running log.error(err) raise SaltClientError() raise SaltSystemExit(code=42, msg=err) return resolved def parse_host_port(host_port): """ Takes a string argument specifying host or host:port. Returns a (hostname, port) or (ip_address, port) tuple. If no port is given, the second (port) element of the returned tuple will be None. host:port argument, for example, is accepted in the forms of: - hostname - hostname:1234 - hostname.domain.tld - hostname.domain.tld:5678 - [1234::5]:5678 - 1234::5 - 10.11.12.13:4567 - 10.11.12.13 """ host, port = None, None # default _s_ = host_port[:] if _s_[0] == "[": if "]" in host_port: host, _s_ = _s_.lstrip("[").rsplit("]", 1) host = ipaddress.IPv6Address(host).compressed if _s_[0] == ":": port = int(_s_.lstrip(":")) else: if len(_s_) > 1: raise ValueError('found ambiguous "{}" port in "{}"'.format(_s_, host_port)) else: if _s_.count(":") == 1: host, _hostport_separator_, port = _s_.partition(":") try: port = int(port) except ValueError as _e_: log.error('host_port "%s" port value "%s" is not an integer.', host_port, port) raise _e_ else: host = _s_ try: if not isinstance(host, ipaddress._BaseAddress): host_ip = ipaddress.ip_address(host).compressed host = host_ip except ValueError: log.debug('"%s" Not an IP address? Assuming it is a hostname.', host) if host != sanitize_host(host): log.error('bad hostname: "%s"', host) raise ValueError('bad hostname: "{}"'.format(host)) return host, port def is_fqdn(hostname): """ Verify if hostname conforms to be a FQDN. :param hostname: text string with the name of the host :return: bool, True if hostname is correct FQDN, False otherwise """ compliant = re.compile(r"(?!-)[A-Z\d\-\_]{1,63}(?<!-)$", re.IGNORECASE) return "." in hostname and len(hostname) < 0xff and all(compliant.match(x) for x in hostname.rstrip(".").split("."))
saltstack/salt
salt/utils/network.py
is_ipv6_filter
python
def is_ipv6_filter(ip, options=None): ''' Returns a bool telling if the value passed to it was a valid IPv6 address. ip The IP address. net: False Consider IP addresses followed by netmask. options CSV of options regarding the nature of the IP address. E.g.: loopback, multicast, private etc. ''' _is_ipv6 = _is_ipv(ip, 6, options=options) return isinstance(_is_ipv6, six.string_types)
Returns a bool telling if the value passed to it was a valid IPv6 address. ip The IP address. net: False Consider IP addresses followed by netmask. options CSV of options regarding the nature of the IP address. E.g.: loopback, multicast, private etc.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/network.py#L432-L446
[ "def _is_ipv(ip, version, options=None):\n\n if not version:\n version = 4\n\n if version not in (4, 6):\n return None\n\n try:\n ip_obj = ipaddress.ip_address(ip)\n except ValueError:\n # maybe it is an IP network\n try:\n ip_obj = ipaddress.ip_interface(ip)\n except ValueError:\n # nope, still not :(\n return None\n\n if not ip_obj.version == version:\n return None\n\n # has the right version, let's move on\n return _ip_options(ip_obj, version, options=options)\n" ]
# -*- coding: utf-8 -*- ''' Define some generic socket functions for network modules ''' # Import python libs from __future__ import absolute_import, unicode_literals, print_function import itertools import os import re import types import socket import logging import platform import random import subprocess from string import ascii_letters, digits # Import 3rd-party libs from salt.ext import six from salt.ext.six.moves import range # pylint: disable=import-error,redefined-builtin # Attempt to import wmi try: import wmi import salt.utils.winapi except ImportError: pass # Import salt libs import salt.utils.args import salt.utils.files import salt.utils.path import salt.utils.platform import salt.utils.stringutils import salt.utils.zeromq from salt._compat import ipaddress from salt.exceptions import SaltClientError, SaltSystemExit from salt.utils.decorators.jinja import jinja_filter from salt.utils.versions import LooseVersion # inet_pton does not exist in Windows, this is a workaround if salt.utils.platform.is_windows(): from salt.ext import win_inet_pton # pylint: disable=unused-import log = logging.getLogger(__name__) try: import ctypes import ctypes.util libc = ctypes.cdll.LoadLibrary(ctypes.util.find_library("c")) res_init = libc.__res_init except (ImportError, OSError, AttributeError, TypeError): pass # pylint: disable=C0103 def sanitize_host(host): ''' Sanitize host string. https://tools.ietf.org/html/rfc1123#section-2.1 ''' RFC952_characters = ascii_letters + digits + ".-" return "".join([c for c in host[0:255] if c in RFC952_characters]) def isportopen(host, port): ''' Return status of a port ''' if not 1 <= int(port) <= 65535: return False sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) out = sock.connect_ex((sanitize_host(host), int(port))) return out def host_to_ips(host): ''' Returns a list of IP addresses of a given hostname or None if not found. ''' ips = [] try: for family, socktype, proto, canonname, sockaddr in socket.getaddrinfo( host, 0, socket.AF_UNSPEC, socket.SOCK_STREAM): if family == socket.AF_INET: ip, port = sockaddr elif family == socket.AF_INET6: ip, port, flow_info, scope_id = sockaddr ips.append(ip) if not ips: ips = None except Exception: ips = None return ips def _generate_minion_id(): ''' Get list of possible host names and convention names. :return: ''' # There are three types of hostnames: # 1. Network names. How host is accessed from the network. # 2. Host aliases. They might be not available in all the network or only locally (/etc/hosts) # 3. Convention names, an internal nodename. class DistinctList(list): ''' List, which allows one to append only distinct objects. Needs to work on Python 2.6, because of collections.OrderedDict only since 2.7 version. Override 'filter()' for custom filtering. ''' localhost_matchers = [r'localhost.*', r'ip6-.*', r'127[.]\d', r'0\.0\.0\.0', r'::1.*', r'ipv6-.*', r'fe00::.*', r'fe02::.*', r'1.0.0.*.ip6.arpa'] def append(self, p_object): if p_object and p_object not in self and not self.filter(p_object): super(DistinctList, self).append(p_object) return self def extend(self, iterable): for obj in iterable: self.append(obj) return self def filter(self, element): 'Returns True if element needs to be filtered' for rgx in self.localhost_matchers: if re.match(rgx, element): return True def first(self): return self and self[0] or None hostname = socket.gethostname() hosts = DistinctList().append( salt.utils.stringutils.to_unicode(socket.getfqdn(salt.utils.stringutils.to_bytes(hostname))) ).append(platform.node()).append(hostname) if not hosts: try: for a_nfo in socket.getaddrinfo(hosts.first() or 'localhost', None, socket.AF_INET, socket.SOCK_RAW, socket.IPPROTO_IP, socket.AI_CANONNAME): if len(a_nfo) > 3: hosts.append(a_nfo[3]) except socket.gaierror: log.warning('Cannot resolve address %s info via socket: %s', hosts.first() or 'localhost (N/A)', socket.gaierror) # Universal method for everywhere (Linux, Slowlaris, Windows etc) for f_name in ('/etc/hostname', '/etc/nodename', '/etc/hosts', r'{win}\system32\drivers\etc\hosts'.format(win=os.getenv('WINDIR'))): try: with salt.utils.files.fopen(f_name) as f_hdl: for line in f_hdl: line = salt.utils.stringutils.to_unicode(line) hst = line.strip().split('#')[0].strip().split() if hst: if hst[0][:4] in ('127.', '::1') or len(hst) == 1: hosts.extend(hst) except IOError: pass # include public and private ipaddresses return hosts.extend([addr for addr in ip_addrs() if not ipaddress.ip_address(addr).is_loopback]) def generate_minion_id(): ''' Return only first element of the hostname from all possible list. :return: ''' try: ret = salt.utils.stringutils.to_unicode(_generate_minion_id().first()) except TypeError: ret = None return ret or 'localhost' def get_socket(addr, type=socket.SOCK_STREAM, proto=0): ''' Return a socket object for the addr IP-version agnostic ''' version = ipaddress.ip_address(addr).version if version == 4: family = socket.AF_INET elif version == 6: family = socket.AF_INET6 return socket.socket(family, type, proto) def get_fqhostname(): ''' Returns the fully qualified hostname ''' # try getaddrinfo() fqdn = None try: addrinfo = socket.getaddrinfo( socket.gethostname(), 0, socket.AF_UNSPEC, socket.SOCK_STREAM, socket.SOL_TCP, socket.AI_CANONNAME ) for info in addrinfo: # info struct [family, socktype, proto, canonname, sockaddr] # On Windows `canonname` can be an empty string # This can cause the function to return `None` if len(info) > 3 and info[3]: fqdn = info[3] break except socket.gaierror: pass # NOTE: this used to log.error() but it was later disabled except socket.error as err: log.debug('socket.getaddrinfo() failure while finding fqdn: %s', err) if fqdn is None: fqdn = socket.getfqdn() return fqdn def ip_to_host(ip): ''' Returns the hostname of a given IP ''' try: hostname, aliaslist, ipaddrlist = socket.gethostbyaddr(ip) except Exception as exc: log.debug('salt.utils.network.ip_to_host(%r) failed: %s', ip, exc) hostname = None return hostname # pylint: enable=C0103 def is_reachable_host(entity_name): ''' Returns a bool telling if the entity name is a reachable host (IPv4/IPv6/FQDN/etc). :param hostname: :return: ''' try: assert type(socket.getaddrinfo(entity_name, 0, 0, 0, 0)) == list ret = True except socket.gaierror: ret = False return ret def is_ip(ip): ''' Returns a bool telling if the passed IP is a valid IPv4 or IPv6 address. ''' return is_ipv4(ip) or is_ipv6(ip) def is_ipv4(ip): ''' Returns a bool telling if the value passed to it was a valid IPv4 address ''' try: return ipaddress.ip_address(ip).version == 4 except ValueError: return False def is_ipv6(ip): ''' Returns a bool telling if the value passed to it was a valid IPv6 address ''' try: return ipaddress.ip_address(ip).version == 6 except ValueError: return False def is_subnet(cidr): ''' Returns a bool telling if the passed string is an IPv4 or IPv6 subnet ''' return is_ipv4_subnet(cidr) or is_ipv6_subnet(cidr) def is_ipv4_subnet(cidr): ''' Returns a bool telling if the passed string is an IPv4 subnet ''' try: return '/' in cidr and bool(ipaddress.IPv4Network(cidr)) except Exception: return False def is_ipv6_subnet(cidr): ''' Returns a bool telling if the passed string is an IPv6 subnet ''' try: return '/' in cidr and bool(ipaddress.IPv6Network(cidr)) except Exception: return False @jinja_filter('is_ip') def is_ip_filter(ip, options=None): ''' Returns a bool telling if the passed IP is a valid IPv4 or IPv6 address. ''' return is_ipv4_filter(ip, options=options) or is_ipv6_filter(ip, options=options) def _ip_options_global(ip_obj, version): return not ip_obj.is_private def _ip_options_multicast(ip_obj, version): return ip_obj.is_multicast def _ip_options_loopback(ip_obj, version): return ip_obj.is_loopback def _ip_options_link_local(ip_obj, version): return ip_obj.is_link_local def _ip_options_private(ip_obj, version): return ip_obj.is_private def _ip_options_reserved(ip_obj, version): return ip_obj.is_reserved def _ip_options_site_local(ip_obj, version): if version == 6: return ip_obj.is_site_local return False def _ip_options_unspecified(ip_obj, version): return ip_obj.is_unspecified def _ip_options(ip_obj, version, options=None): # will process and IP options options_fun_map = { 'global': _ip_options_global, 'link-local': _ip_options_link_local, 'linklocal': _ip_options_link_local, 'll': _ip_options_link_local, 'link_local': _ip_options_link_local, 'loopback': _ip_options_loopback, 'lo': _ip_options_loopback, 'multicast': _ip_options_multicast, 'private': _ip_options_private, 'public': _ip_options_global, 'reserved': _ip_options_reserved, 'site-local': _ip_options_site_local, 'sl': _ip_options_site_local, 'site_local': _ip_options_site_local, 'unspecified': _ip_options_unspecified } if not options: return six.text_type(ip_obj) # IP version already checked options_list = [option.strip() for option in options.split(',')] for option, fun in options_fun_map.items(): if option in options_list: fun_res = fun(ip_obj, version) if not fun_res: return None # stop at first failed test # else continue return six.text_type(ip_obj) def _is_ipv(ip, version, options=None): if not version: version = 4 if version not in (4, 6): return None try: ip_obj = ipaddress.ip_address(ip) except ValueError: # maybe it is an IP network try: ip_obj = ipaddress.ip_interface(ip) except ValueError: # nope, still not :( return None if not ip_obj.version == version: return None # has the right version, let's move on return _ip_options(ip_obj, version, options=options) @jinja_filter('is_ipv4') def is_ipv4_filter(ip, options=None): ''' Returns a bool telling if the value passed to it was a valid IPv4 address. ip The IP address. net: False Consider IP addresses followed by netmask. options CSV of options regarding the nature of the IP address. E.g.: loopback, multicast, private etc. ''' _is_ipv4 = _is_ipv(ip, 4, options=options) return isinstance(_is_ipv4, six.string_types) @jinja_filter('is_ipv6') def _ipv_filter(value, version, options=None): if version not in (4, 6): return if isinstance(value, (six.string_types, six.text_type, six.binary_type)): return _is_ipv(value, version, options=options) # calls is_ipv4 or is_ipv6 for `value` elif isinstance(value, (list, tuple, types.GeneratorType)): # calls is_ipv4 or is_ipv6 for each element in the list # os it filters and returns only those elements having the desired IP version return [ _is_ipv(addr, version, options=options) for addr in value if _is_ipv(addr, version, options=options) is not None ] return None @jinja_filter('ipv4') def ipv4(value, options=None): ''' Filters a list and returns IPv4 values only. ''' return _ipv_filter(value, 4, options=options) @jinja_filter('ipv6') def ipv6(value, options=None): ''' Filters a list and returns IPv6 values only. ''' return _ipv_filter(value, 6, options=options) @jinja_filter('ipaddr') def ipaddr(value, options=None): ''' Filters and returns only valid IP objects. ''' ipv4_obj = ipv4(value, options=options) ipv6_obj = ipv6(value, options=options) if ipv4_obj is None or ipv6_obj is None: # an IP address can be either IPv4 either IPv6 # therefofe if the value passed as arg is not a list, at least one of the calls above will return None # if one of them is none, means that we should return only one of them return ipv4_obj or ipv6_obj # one of them else: return ipv4_obj + ipv6_obj # extend lists def _filter_ipaddr(value, options, version=None): ipaddr_filter_out = None if version: if version == 4: ipaddr_filter_out = ipv4(value, options) elif version == 6: ipaddr_filter_out = ipv6(value, options) else: ipaddr_filter_out = ipaddr(value, options) if not ipaddr_filter_out: return if not isinstance(ipaddr_filter_out, (list, tuple, types.GeneratorType)): ipaddr_filter_out = [ipaddr_filter_out] return ipaddr_filter_out @jinja_filter('ip_host') def ip_host(value, options=None, version=None): ''' Returns the interfaces IP address, e.g.: 192.168.0.1/28. ''' ipaddr_filter_out = _filter_ipaddr(value, options=options, version=version) if not ipaddr_filter_out: return if not isinstance(value, (list, tuple, types.GeneratorType)): return six.text_type(ipaddress.ip_interface(ipaddr_filter_out[0])) return [six.text_type(ipaddress.ip_interface(ip_a)) for ip_a in ipaddr_filter_out] def _network_hosts(ip_addr_entry): return [ six.text_type(host) for host in ipaddress.ip_network(ip_addr_entry, strict=False).hosts() ] @jinja_filter('network_hosts') def network_hosts(value, options=None, version=None): ''' Return the list of hosts within a network. .. note:: When running this command with a large IPv6 network, the command will take a long time to gather all of the hosts. ''' ipaddr_filter_out = _filter_ipaddr(value, options=options, version=version) if not ipaddr_filter_out: return if not isinstance(value, (list, tuple, types.GeneratorType)): return _network_hosts(ipaddr_filter_out[0]) return [ _network_hosts(ip_a) for ip_a in ipaddr_filter_out ] def _network_size(ip_addr_entry): return ipaddress.ip_network(ip_addr_entry, strict=False).num_addresses @jinja_filter('network_size') def network_size(value, options=None, version=None): ''' Get the size of a network. ''' ipaddr_filter_out = _filter_ipaddr(value, options=options, version=version) if not ipaddr_filter_out: return if not isinstance(value, (list, tuple, types.GeneratorType)): return _network_size(ipaddr_filter_out[0]) return [ _network_size(ip_a) for ip_a in ipaddr_filter_out ] def natural_ipv4_netmask(ip, fmt='prefixlen'): ''' Returns the "natural" mask of an IPv4 address ''' bits = _ipv4_to_bits(ip) if bits.startswith('11'): mask = '24' elif bits.startswith('1'): mask = '16' else: mask = '8' if fmt == 'netmask': return cidr_to_ipv4_netmask(mask) else: return '/' + mask def rpad_ipv4_network(ip): ''' Returns an IP network address padded with zeros. Ex: '192.168.3' -> '192.168.3.0' '10.209' -> '10.209.0.0' ''' return '.'.join(itertools.islice(itertools.chain(ip.split('.'), '0000'), 0, 4)) def cidr_to_ipv4_netmask(cidr_bits): ''' Returns an IPv4 netmask ''' try: cidr_bits = int(cidr_bits) if not 1 <= cidr_bits <= 32: return '' except ValueError: return '' netmask = '' for idx in range(4): if idx: netmask += '.' if cidr_bits >= 8: netmask += '255' cidr_bits -= 8 else: netmask += '{0:d}'.format(256 - (2 ** (8 - cidr_bits))) cidr_bits = 0 return netmask def _number_of_set_bits_to_ipv4_netmask(set_bits): # pylint: disable=C0103 ''' Returns an IPv4 netmask from the integer representation of that mask. Ex. 0xffffff00 -> '255.255.255.0' ''' return cidr_to_ipv4_netmask(_number_of_set_bits(set_bits)) # pylint: disable=C0103 def _number_of_set_bits(x): ''' Returns the number of bits that are set in a 32bit int ''' # Taken from http://stackoverflow.com/a/4912729. Many thanks! x -= (x >> 1) & 0x55555555 x = ((x >> 2) & 0x33333333) + (x & 0x33333333) x = ((x >> 4) + x) & 0x0f0f0f0f x += x >> 8 x += x >> 16 return x & 0x0000003f # pylint: enable=C0103 def _interfaces_ip(out): ''' Uses ip to return a dictionary of interfaces with various information about each (up/down state, ip address, netmask, and hwaddr) ''' ret = dict() def parse_network(value, cols): ''' Return a tuple of ip, netmask, broadcast based on the current set of cols ''' brd = None scope = None if '/' in value: # we have a CIDR in this address ip, cidr = value.split('/') # pylint: disable=C0103 else: ip = value # pylint: disable=C0103 cidr = 32 if type_ == 'inet': mask = cidr_to_ipv4_netmask(int(cidr)) if 'brd' in cols: brd = cols[cols.index('brd') + 1] elif type_ == 'inet6': mask = cidr if 'scope' in cols: scope = cols[cols.index('scope') + 1] return (ip, mask, brd, scope) groups = re.compile('\r?\n\\d').split(out) for group in groups: iface = None data = dict() for line in group.splitlines(): if ' ' not in line: continue match = re.match(r'^\d*:\s+([\w.\-]+)(?:@)?([\w.\-]+)?:\s+<(.+)>', line) if match: iface, parent, attrs = match.groups() if 'UP' in attrs.split(','): data['up'] = True else: data['up'] = False if parent: data['parent'] = parent continue cols = line.split() if len(cols) >= 2: type_, value = tuple(cols[0:2]) iflabel = cols[-1:][0] if type_ in ('inet', 'inet6'): if 'secondary' not in cols: ipaddr, netmask, broadcast, scope = parse_network(value, cols) if type_ == 'inet': if 'inet' not in data: data['inet'] = list() addr_obj = dict() addr_obj['address'] = ipaddr addr_obj['netmask'] = netmask addr_obj['broadcast'] = broadcast addr_obj['label'] = iflabel data['inet'].append(addr_obj) elif type_ == 'inet6': if 'inet6' not in data: data['inet6'] = list() addr_obj = dict() addr_obj['address'] = ipaddr addr_obj['prefixlen'] = netmask addr_obj['scope'] = scope data['inet6'].append(addr_obj) else: if 'secondary' not in data: data['secondary'] = list() ip_, mask, brd, scp = parse_network(value, cols) data['secondary'].append({ 'type': type_, 'address': ip_, 'netmask': mask, 'broadcast': brd, 'label': iflabel, }) del ip_, mask, brd, scp elif type_.startswith('link'): data['hwaddr'] = value if iface: ret[iface] = data del iface, data return ret def _interfaces_ifconfig(out): ''' Uses ifconfig to return a dictionary of interfaces with various information about each (up/down state, ip address, netmask, and hwaddr) ''' ret = dict() piface = re.compile(r'^([^\s:]+)') pmac = re.compile('.*?(?:HWaddr|ether|address:|lladdr) ([0-9a-fA-F:]+)') if salt.utils.platform.is_sunos(): pip = re.compile(r'.*?(?:inet\s+)([0-9]+\.[0-9]+\.[0-9]+\.[0-9]+)(.*)') pip6 = re.compile('.*?(?:inet6 )([0-9a-fA-F:]+)') pmask6 = re.compile(r'.*?(?:inet6 [0-9a-fA-F:]+/(\d+)).*') else: pip = re.compile(r'.*?(?:inet addr:|inet [^\d]*)(.*?)\s') pip6 = re.compile('.*?(?:inet6 addr: (.*?)/|inet6 )([0-9a-fA-F:]+)') pmask6 = re.compile(r'.*?(?:inet6 addr: [0-9a-fA-F:]+/(\d+)|prefixlen (\d+))(?: Scope:([a-zA-Z]+)| scopeid (0x[0-9a-fA-F]))?') pmask = re.compile(r'.*?(?:Mask:|netmask )(?:((?:0x)?[0-9a-fA-F]{8})|([\d\.]+))') pupdown = re.compile('UP') pbcast = re.compile(r'.*?(?:Bcast:|broadcast )([\d\.]+)') groups = re.compile('\r?\n(?=\\S)').split(out) for group in groups: data = dict() iface = '' updown = False for line in group.splitlines(): miface = piface.match(line) mmac = pmac.match(line) mip = pip.match(line) mip6 = pip6.match(line) mupdown = pupdown.search(line) if miface: iface = miface.group(1) if mmac: data['hwaddr'] = mmac.group(1) if salt.utils.platform.is_sunos(): expand_mac = [] for chunk in data['hwaddr'].split(':'): expand_mac.append('0{0}'.format(chunk) if len(chunk) < 2 else '{0}'.format(chunk)) data['hwaddr'] = ':'.join(expand_mac) if mip: if 'inet' not in data: data['inet'] = list() addr_obj = dict() addr_obj['address'] = mip.group(1) mmask = pmask.match(line) if mmask: if mmask.group(1): mmask = _number_of_set_bits_to_ipv4_netmask( int(mmask.group(1), 16)) else: mmask = mmask.group(2) addr_obj['netmask'] = mmask mbcast = pbcast.match(line) if mbcast: addr_obj['broadcast'] = mbcast.group(1) data['inet'].append(addr_obj) if mupdown: updown = True if mip6: if 'inet6' not in data: data['inet6'] = list() addr_obj = dict() addr_obj['address'] = mip6.group(1) or mip6.group(2) mmask6 = pmask6.match(line) if mmask6: addr_obj['prefixlen'] = mmask6.group(1) or mmask6.group(2) if not salt.utils.platform.is_sunos(): ipv6scope = mmask6.group(3) or mmask6.group(4) addr_obj['scope'] = ipv6scope.lower() if ipv6scope is not None else ipv6scope # SunOS sometimes has ::/0 as inet6 addr when using addrconf if not salt.utils.platform.is_sunos() \ or addr_obj['address'] != '::' \ and addr_obj['prefixlen'] != 0: data['inet6'].append(addr_obj) data['up'] = updown if iface in ret: # SunOS optimization, where interfaces occur twice in 'ifconfig -a' # output with the same name: for ipv4 and then for ipv6 addr family. # Every instance has it's own 'UP' status and we assume that ipv4 # status determines global interface status. # # merge items with higher priority for older values # after that merge the inet and inet6 sub items for both ret[iface] = dict(list(data.items()) + list(ret[iface].items())) if 'inet' in data: ret[iface]['inet'].extend(x for x in data['inet'] if x not in ret[iface]['inet']) if 'inet6' in data: ret[iface]['inet6'].extend(x for x in data['inet6'] if x not in ret[iface]['inet6']) else: ret[iface] = data del data return ret def linux_interfaces(): ''' Obtain interface information for *NIX/BSD variants ''' ifaces = dict() ip_path = salt.utils.path.which('ip') ifconfig_path = None if ip_path else salt.utils.path.which('ifconfig') if ip_path: cmd1 = subprocess.Popen( '{0} link show'.format(ip_path), shell=True, close_fds=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate()[0] cmd2 = subprocess.Popen( '{0} addr show'.format(ip_path), shell=True, close_fds=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate()[0] ifaces = _interfaces_ip("{0}\n{1}".format( salt.utils.stringutils.to_str(cmd1), salt.utils.stringutils.to_str(cmd2))) elif ifconfig_path: cmd = subprocess.Popen( '{0} -a'.format(ifconfig_path), shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate()[0] ifaces = _interfaces_ifconfig(salt.utils.stringutils.to_str(cmd)) return ifaces def _netbsd_interfaces_ifconfig(out): ''' Uses ifconfig to return a dictionary of interfaces with various information about each (up/down state, ip address, netmask, and hwaddr) ''' ret = dict() piface = re.compile(r'^([^\s:]+)') pmac = re.compile('.*?address: ([0-9a-f:]+)') pip = re.compile(r'.*?inet [^\d]*(.*?)/([\d]*)\s') pip6 = re.compile(r'.*?inet6 ([0-9a-f:]+)%([a-zA-Z0-9]*)/([\d]*)\s') pupdown = re.compile('UP') pbcast = re.compile(r'.*?broadcast ([\d\.]+)') groups = re.compile('\r?\n(?=\\S)').split(out) for group in groups: data = dict() iface = '' updown = False for line in group.splitlines(): miface = piface.match(line) mmac = pmac.match(line) mip = pip.match(line) mip6 = pip6.match(line) mupdown = pupdown.search(line) if miface: iface = miface.group(1) if mmac: data['hwaddr'] = mmac.group(1) if mip: if 'inet' not in data: data['inet'] = list() addr_obj = dict() addr_obj['address'] = mip.group(1) mmask = mip.group(2) if mip.group(2): addr_obj['netmask'] = cidr_to_ipv4_netmask(mip.group(2)) mbcast = pbcast.match(line) if mbcast: addr_obj['broadcast'] = mbcast.group(1) data['inet'].append(addr_obj) if mupdown: updown = True if mip6: if 'inet6' not in data: data['inet6'] = list() addr_obj = dict() addr_obj['address'] = mip6.group(1) mmask6 = mip6.group(3) addr_obj['scope'] = mip6.group(2) addr_obj['prefixlen'] = mip6.group(3) data['inet6'].append(addr_obj) data['up'] = updown ret[iface] = data del data return ret def netbsd_interfaces(): ''' Obtain interface information for NetBSD >= 8 where the ifconfig output diverged from other BSD variants (Netmask is now part of the address) ''' # NetBSD versions prior to 8.0 can still use linux_interfaces() if LooseVersion(os.uname()[2]) < LooseVersion('8.0'): return linux_interfaces() ifconfig_path = salt.utils.path.which('ifconfig') cmd = subprocess.Popen( '{0} -a'.format(ifconfig_path), shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate()[0] return _netbsd_interfaces_ifconfig(salt.utils.stringutils.to_str(cmd)) def _interfaces_ipconfig(out): ''' Returns a dictionary of interfaces with various information about each (up/down state, ip address, netmask, and hwaddr) NOTE: This is not used by any function and may be able to be removed in the future. ''' ifaces = dict() iface = None adapter_iface_regex = re.compile(r'adapter (\S.+):$') for line in out.splitlines(): if not line: continue # TODO what does Windows call Infiniband and 10/40gige adapters if line.startswith('Ethernet'): iface = ifaces[adapter_iface_regex.search(line).group(1)] iface['up'] = True addr = None continue if iface: key, val = line.split(',', 1) key = key.strip(' .') val = val.strip() if addr and key == 'Subnet Mask': addr['netmask'] = val elif key in ('IP Address', 'IPv4 Address'): if 'inet' not in iface: iface['inet'] = list() addr = {'address': val.rstrip('(Preferred)'), 'netmask': None, 'broadcast': None} # TODO find the broadcast iface['inet'].append(addr) elif 'IPv6 Address' in key: if 'inet6' not in iface: iface['inet'] = list() # XXX What is the prefixlen!? addr = {'address': val.rstrip('(Preferred)'), 'prefixlen': None} iface['inet6'].append(addr) elif key == 'Physical Address': iface['hwaddr'] = val elif key == 'Media State': # XXX seen used for tunnel adaptors # might be useful iface['up'] = (val != 'Media disconnected') def win_interfaces(): ''' Obtain interface information for Windows systems ''' with salt.utils.winapi.Com(): c = wmi.WMI() ifaces = {} for iface in c.Win32_NetworkAdapterConfiguration(IPEnabled=1): ifaces[iface.Description] = dict() if iface.MACAddress: ifaces[iface.Description]['hwaddr'] = iface.MACAddress if iface.IPEnabled: ifaces[iface.Description]['up'] = True for ip in iface.IPAddress: if '.' in ip: if 'inet' not in ifaces[iface.Description]: ifaces[iface.Description]['inet'] = [] item = {'address': ip, 'label': iface.Description} if iface.DefaultIPGateway: broadcast = next((i for i in iface.DefaultIPGateway if '.' in i), '') if broadcast: item['broadcast'] = broadcast if iface.IPSubnet: netmask = next((i for i in iface.IPSubnet if '.' in i), '') if netmask: item['netmask'] = netmask ifaces[iface.Description]['inet'].append(item) if ':' in ip: if 'inet6' not in ifaces[iface.Description]: ifaces[iface.Description]['inet6'] = [] item = {'address': ip} if iface.DefaultIPGateway: broadcast = next((i for i in iface.DefaultIPGateway if ':' in i), '') if broadcast: item['broadcast'] = broadcast if iface.IPSubnet: netmask = next((i for i in iface.IPSubnet if ':' in i), '') if netmask: item['netmask'] = netmask ifaces[iface.Description]['inet6'].append(item) else: ifaces[iface.Description]['up'] = False return ifaces def interfaces(): ''' Return a dictionary of information about all the interfaces on the minion ''' if salt.utils.platform.is_windows(): return win_interfaces() elif salt.utils.platform.is_netbsd(): return netbsd_interfaces() else: return linux_interfaces() def get_net_start(ipaddr, netmask): ''' Return the address of the network ''' net = ipaddress.ip_network('{0}/{1}'.format(ipaddr, netmask), strict=False) return six.text_type(net.network_address) def get_net_size(mask): ''' Turns an IPv4 netmask into it's corresponding prefix length (255.255.255.0 -> 24 as in 192.168.1.10/24). ''' binary_str = '' for octet in mask.split('.'): binary_str += bin(int(octet))[2:].zfill(8) return len(binary_str.rstrip('0')) def calc_net(ipaddr, netmask=None): ''' Takes IP (CIDR notation supported) and optionally netmask and returns the network in CIDR-notation. (The IP can be any IP inside the subnet) ''' if netmask is not None: ipaddr = '{0}/{1}'.format(ipaddr, netmask) return six.text_type(ipaddress.ip_network(ipaddr, strict=False)) def _ipv4_to_bits(ipaddr): ''' Accepts an IPv4 dotted quad and returns a string representing its binary counterpart ''' return ''.join([bin(int(x))[2:].rjust(8, '0') for x in ipaddr.split('.')]) def _get_iface_info(iface): ''' If `iface` is available, return interface info and no error, otherwise return no info and log and return an error ''' iface_info = interfaces() if iface in iface_info.keys(): return iface_info, False else: error_msg = ('Interface "{0}" not in available interfaces: "{1}"' ''.format(iface, '", "'.join(iface_info.keys()))) log.error(error_msg) return None, error_msg def _hw_addr_aix(iface): ''' Return the hardware address (a.k.a. MAC address) for a given interface on AIX MAC address not available in through interfaces ''' cmd = subprocess.Popen( 'entstat -d {0} | grep \'Hardware Address\''.format(iface), shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate()[0] if cmd: comps = cmd.split(' ') if len(comps) == 3: mac_addr = comps[2].strip('\'').strip() return mac_addr error_msg = ('Interface "{0}" either not available or does not contain a hardware address'.format(iface)) log.error(error_msg) return error_msg def hw_addr(iface): ''' Return the hardware address (a.k.a. MAC address) for a given interface .. versionchanged:: 2016.11.4 Added support for AIX ''' if salt.utils.platform.is_aix(): return _hw_addr_aix iface_info, error = _get_iface_info(iface) if error is False: return iface_info.get(iface, {}).get('hwaddr', '') else: return error def interface(iface): ''' Return the details of `iface` or an error if it does not exist ''' iface_info, error = _get_iface_info(iface) if error is False: return iface_info.get(iface, {}).get('inet', '') else: return error def interface_ip(iface): ''' Return `iface` IPv4 addr or an error if `iface` does not exist ''' iface_info, error = _get_iface_info(iface) if error is False: inet = iface_info.get(iface, {}).get('inet', None) return inet[0].get('address', '') if inet else '' else: return error def _subnets(proto='inet', interfaces_=None): ''' Returns a list of subnets to which the host belongs ''' if interfaces_ is None: ifaces = interfaces() elif isinstance(interfaces_, list): ifaces = {} for key, value in six.iteritems(interfaces()): if key in interfaces_: ifaces[key] = value else: ifaces = {interfaces_: interfaces().get(interfaces_, {})} ret = set() if proto == 'inet': subnet = 'netmask' dflt_cidr = 32 elif proto == 'inet6': subnet = 'prefixlen' dflt_cidr = 128 else: log.error('Invalid proto %s calling subnets()', proto) return for ip_info in six.itervalues(ifaces): addrs = ip_info.get(proto, []) addrs.extend([addr for addr in ip_info.get('secondary', []) if addr.get('type') == proto]) for intf in addrs: if subnet in intf: intf = ipaddress.ip_interface('{0}/{1}'.format(intf['address'], intf[subnet])) else: intf = ipaddress.ip_interface('{0}/{1}'.format(intf['address'], dflt_cidr)) if not intf.is_loopback: ret.add(intf.network) return [six.text_type(net) for net in sorted(ret)] def subnets(interfaces=None): ''' Returns a list of IPv4 subnets to which the host belongs ''' return _subnets('inet', interfaces_=interfaces) def subnets6(): ''' Returns a list of IPv6 subnets to which the host belongs ''' return _subnets('inet6') def in_subnet(cidr, addr=None): ''' Returns True if host or (any of) addrs is within specified subnet, otherwise False ''' try: cidr = ipaddress.ip_network(cidr) except ValueError: log.error('Invalid CIDR \'%s\'', cidr) return False if addr is None: addr = ip_addrs() addr.extend(ip_addrs6()) elif not isinstance(addr, (list, tuple)): addr = (addr,) return any(ipaddress.ip_address(item) in cidr for item in addr) def _ip_addrs(interface=None, include_loopback=False, interface_data=None, proto='inet'): ''' Return the full list of IP adresses matching the criteria proto = inet|inet6 ''' ret = set() ifaces = interface_data \ if isinstance(interface_data, dict) \ else interfaces() if interface is None: target_ifaces = ifaces else: target_ifaces = dict([(k, v) for k, v in six.iteritems(ifaces) if k == interface]) if not target_ifaces: log.error('Interface %s not found.', interface) for ip_info in six.itervalues(target_ifaces): addrs = ip_info.get(proto, []) addrs.extend([addr for addr in ip_info.get('secondary', []) if addr.get('type') == proto]) for addr in addrs: addr = ipaddress.ip_address(addr.get('address')) if not addr.is_loopback or include_loopback: ret.add(addr) return [six.text_type(addr) for addr in sorted(ret)] def ip_addrs(interface=None, include_loopback=False, interface_data=None): ''' Returns a list of IPv4 addresses assigned to the host. 127.0.0.1 is ignored, unless 'include_loopback=True' is indicated. If 'interface' is provided, then only IP addresses from that interface will be returned. ''' return _ip_addrs(interface, include_loopback, interface_data, 'inet') def ip_addrs6(interface=None, include_loopback=False, interface_data=None): ''' Returns a list of IPv6 addresses assigned to the host. ::1 is ignored, unless 'include_loopback=True' is indicated. If 'interface' is provided, then only IP addresses from that interface will be returned. ''' return _ip_addrs(interface, include_loopback, interface_data, 'inet6') def hex2ip(hex_ip, invert=False): ''' Convert a hex string to an ip, if a failure occurs the original hex is returned. If 'invert=True' assume that ip from /proc/net/<proto> ''' if len(hex_ip) == 32: # ipv6 ip = [] for i in range(0, 32, 8): ip_part = hex_ip[i:i + 8] ip_part = [ip_part[x:x + 2] for x in range(0, 8, 2)] if invert: ip.append("{0[3]}{0[2]}:{0[1]}{0[0]}".format(ip_part)) else: ip.append("{0[0]}{0[1]}:{0[2]}{0[3]}".format(ip_part)) try: address = ipaddress.IPv6Address(":".join(ip)) if address.ipv4_mapped: return str(address.ipv4_mapped) else: return address.compressed except ipaddress.AddressValueError as ex: log.error('hex2ip - ipv6 address error: %s', ex) return hex_ip try: hip = int(hex_ip, 16) except ValueError: return hex_ip if invert: return '{3}.{2}.{1}.{0}'.format(hip >> 24 & 255, hip >> 16 & 255, hip >> 8 & 255, hip & 255) return '{0}.{1}.{2}.{3}'.format(hip >> 24 & 255, hip >> 16 & 255, hip >> 8 & 255, hip & 255) def mac2eui64(mac, prefix=None): ''' Convert a MAC address to a EUI64 identifier or, with prefix provided, a full IPv6 address ''' # http://tools.ietf.org/html/rfc4291#section-2.5.1 eui64 = re.sub(r'[.:-]', '', mac).lower() eui64 = eui64[0:6] + 'fffe' + eui64[6:] eui64 = hex(int(eui64[0:2], 16) | 2)[2:].zfill(2) + eui64[2:] if prefix is None: return ':'.join(re.findall(r'.{4}', eui64)) else: try: net = ipaddress.ip_network(prefix, strict=False) euil = int('0x{0}'.format(eui64), 16) return '{0}/{1}'.format(net[euil], net.prefixlen) except Exception: return def active_tcp(): ''' Return a dict describing all active tcp connections as quickly as possible ''' ret = {} for statf in ['/proc/net/tcp', '/proc/net/tcp6']: if os.path.isfile(statf): with salt.utils.files.fopen(statf, 'rb') as fp_: for line in fp_: line = salt.utils.stringutils.to_unicode(line) if line.strip().startswith('sl'): continue iret = _parse_tcp_line(line) sl = next(iter(iret)) if iret[sl]['state'] == 1: # 1 is ESTABLISHED del iret[sl]['state'] ret[len(ret)] = iret[sl] return ret def local_port_tcp(port): ''' Return a set of remote ip addrs attached to the specified local port ''' ret = _remotes_on(port, 'local_port') return ret def remote_port_tcp(port): ''' Return a set of ip addrs the current host is connected to on given port ''' ret = _remotes_on(port, 'remote_port') return ret def _remotes_on(port, which_end): ''' Return a set of ip addrs active tcp connections ''' port = int(port) ret = _netlink_tool_remote_on(port, which_end) if ret is not None: return ret ret = set() proc_available = False for statf in ['/proc/net/tcp', '/proc/net/tcp6']: if os.path.isfile(statf): proc_available = True with salt.utils.files.fopen(statf, 'r') as fp_: for line in fp_: line = salt.utils.stringutils.to_unicode(line) if line.strip().startswith('sl'): continue iret = _parse_tcp_line(line) sl = next(iter(iret)) if iret[sl][which_end] == port and iret[sl]['state'] == 1: # 1 is ESTABLISHED ret.add(iret[sl]['remote_addr']) if not proc_available: # Fallback to use OS specific tools if salt.utils.platform.is_sunos(): return _sunos_remotes_on(port, which_end) if salt.utils.platform.is_freebsd(): return _freebsd_remotes_on(port, which_end) if salt.utils.platform.is_netbsd(): return _netbsd_remotes_on(port, which_end) if salt.utils.platform.is_openbsd(): return _openbsd_remotes_on(port, which_end) if salt.utils.platform.is_windows(): return _windows_remotes_on(port, which_end) if salt.utils.platform.is_aix(): return _aix_remotes_on(port, which_end) return _linux_remotes_on(port, which_end) return ret def _parse_tcp_line(line): ''' Parse a single line from the contents of /proc/net/tcp or /proc/net/tcp6 ''' ret = {} comps = line.strip().split() sl = comps[0].rstrip(':') ret[sl] = {} l_addr, l_port = comps[1].split(':') r_addr, r_port = comps[2].split(':') ret[sl]['local_addr'] = hex2ip(l_addr, True) ret[sl]['local_port'] = int(l_port, 16) ret[sl]['remote_addr'] = hex2ip(r_addr, True) ret[sl]['remote_port'] = int(r_port, 16) ret[sl]['state'] = int(comps[3], 16) return ret def _netlink_tool_remote_on(port, which_end): ''' Returns set of ipv4 host addresses of remote established connections on local or remote tcp port. Parses output of shell 'ss' to get connections [root@salt-master ~]# ss -ant State Recv-Q Send-Q Local Address:Port Peer Address:Port LISTEN 0 511 *:80 *:* LISTEN 0 128 *:22 *:* ESTAB 0 0 127.0.0.1:56726 127.0.0.1:4505 ''' remotes = set() valid = False try: data = subprocess.check_output(['ss', '-ant']) # pylint: disable=minimum-python-version except subprocess.CalledProcessError: log.error('Failed ss') raise except OSError: # not command "No such file or directory" return None lines = salt.utils.stringutils.to_str(data).split('\n') for line in lines: if 'Address:Port' in line: # ss tools may not be valid valid = True continue elif 'ESTAB' not in line: continue chunks = line.split() local_host, local_port = chunks[3].split(':', 1) remote_host, remote_port = chunks[4].split(':', 1) if which_end == 'remote_port' and int(remote_port) != port: continue if which_end == 'local_port' and int(local_port) != port: continue remotes.add(remote_host) if valid is False: remotes = None return remotes def _sunos_remotes_on(port, which_end): ''' SunOS specific helper function. Returns set of ipv4 host addresses of remote established connections on local or remote tcp port. Parses output of shell 'netstat' to get connections [root@salt-master ~]# netstat -f inet -n TCP: IPv4 Local Address Remote Address Swind Send-Q Rwind Recv-Q State -------------------- -------------------- ----- ------ ----- ------ ----------- 10.0.0.101.4505 10.0.0.1.45329 1064800 0 1055864 0 ESTABLISHED 10.0.0.101.4505 10.0.0.100.50798 1064800 0 1055864 0 ESTABLISHED ''' remotes = set() try: data = subprocess.check_output(['netstat', '-f', 'inet', '-n']) # pylint: disable=minimum-python-version except subprocess.CalledProcessError: log.error('Failed netstat') raise lines = salt.utils.stringutils.to_str(data).split('\n') for line in lines: if 'ESTABLISHED' not in line: continue chunks = line.split() local_host, local_port = chunks[0].rsplit('.', 1) remote_host, remote_port = chunks[1].rsplit('.', 1) if which_end == 'remote_port' and int(remote_port) != port: continue if which_end == 'local_port' and int(local_port) != port: continue remotes.add(remote_host) return remotes def _freebsd_remotes_on(port, which_end): ''' Returns set of ipv4 host addresses of remote established connections on local tcp port port. Parses output of shell 'sockstat' (FreeBSD) to get connections $ sudo sockstat -4 USER COMMAND PID FD PROTO LOCAL ADDRESS FOREIGN ADDRESS root python2.7 1456 29 tcp4 *:4505 *:* root python2.7 1445 17 tcp4 *:4506 *:* root python2.7 1294 14 tcp4 127.0.0.1:11813 127.0.0.1:4505 root python2.7 1294 41 tcp4 127.0.0.1:61115 127.0.0.1:4506 $ sudo sockstat -4 -c -p 4506 USER COMMAND PID FD PROTO LOCAL ADDRESS FOREIGN ADDRESS root python2.7 1294 41 tcp4 127.0.0.1:61115 127.0.0.1:4506 ''' port = int(port) remotes = set() try: cmd = salt.utils.args.shlex_split('sockstat -4 -c -p {0}'.format(port)) data = subprocess.check_output(cmd) # pylint: disable=minimum-python-version except subprocess.CalledProcessError as ex: log.error('Failed "sockstat" with returncode = %s', ex.returncode) raise lines = salt.utils.stringutils.to_str(data).split('\n') for line in lines: chunks = line.split() if not chunks: continue # ['root', 'python2.7', '1456', '37', 'tcp4', # '127.0.0.1:4505-', '127.0.0.1:55703'] # print chunks if 'COMMAND' in chunks[1]: continue # ignore header if len(chunks) < 2: continue # sockstat -4 -c -p 4506 does this with high PIDs: # USER COMMAND PID FD PROTO LOCAL ADDRESS FOREIGN ADDRESS # salt-master python2.781106 35 tcp4 192.168.12.34:4506 192.168.12.45:60143 local = chunks[-2] remote = chunks[-1] lhost, lport = local.split(':') rhost, rport = remote.split(':') if which_end == 'local' and int(lport) != port: # ignore if local port not port continue if which_end == 'remote' and int(rport) != port: # ignore if remote port not port continue remotes.add(rhost) return remotes def _netbsd_remotes_on(port, which_end): ''' Returns set of ipv4 host addresses of remote established connections on local tcp port port. Parses output of shell 'sockstat' (NetBSD) to get connections $ sudo sockstat -4 -n USER COMMAND PID FD PROTO LOCAL ADDRESS FOREIGN ADDRESS root python2.7 1456 29 tcp *.4505 *.* root python2.7 1445 17 tcp *.4506 *.* root python2.7 1294 14 tcp 127.0.0.1.11813 127.0.0.1.4505 root python2.7 1294 41 tcp 127.0.0.1.61115 127.0.0.1.4506 $ sudo sockstat -4 -c -n -p 4506 USER COMMAND PID FD PROTO LOCAL ADDRESS FOREIGN ADDRESS root python2.7 1294 41 tcp 127.0.0.1.61115 127.0.0.1.4506 ''' port = int(port) remotes = set() try: cmd = salt.utils.args.shlex_split('sockstat -4 -c -n -p {0}'.format(port)) data = subprocess.check_output(cmd) # pylint: disable=minimum-python-version except subprocess.CalledProcessError as ex: log.error('Failed "sockstat" with returncode = %s', ex.returncode) raise lines = salt.utils.stringutils.to_str(data).split('\n') for line in lines: chunks = line.split() if not chunks: continue # ['root', 'python2.7', '1456', '37', 'tcp', # '127.0.0.1.4505-', '127.0.0.1.55703'] # print chunks if 'COMMAND' in chunks[1]: continue # ignore header if len(chunks) < 2: continue local = chunks[5].split('.') lport = local.pop() lhost = '.'.join(local) remote = chunks[6].split('.') rport = remote.pop() rhost = '.'.join(remote) if which_end == 'local' and int(lport) != port: # ignore if local port not port continue if which_end == 'remote' and int(rport) != port: # ignore if remote port not port continue remotes.add(rhost) return remotes def _openbsd_remotes_on(port, which_end): ''' OpenBSD specific helper function. Returns set of ipv4 host addresses of remote established connections on local or remote tcp port. Parses output of shell 'netstat' to get connections $ netstat -nf inet Active Internet connections Proto Recv-Q Send-Q Local Address Foreign Address (state) tcp 0 0 10.0.0.101.4505 10.0.0.1.45329 ESTABLISHED tcp 0 0 10.0.0.101.4505 10.0.0.100.50798 ESTABLISHED ''' remotes = set() try: data = subprocess.check_output(['netstat', '-nf', 'inet']) # pylint: disable=minimum-python-version except subprocess.CalledProcessError: log.error('Failed netstat') raise lines = data.split('\n') for line in lines: if 'ESTABLISHED' not in line: continue chunks = line.split() local_host, local_port = chunks[3].rsplit('.', 1) remote_host, remote_port = chunks[4].rsplit('.', 1) if which_end == 'remote_port' and int(remote_port) != port: continue if which_end == 'local_port' and int(local_port) != port: continue remotes.add(remote_host) return remotes def _windows_remotes_on(port, which_end): r''' Windows specific helper function. Returns set of ipv4 host addresses of remote established connections on local or remote tcp port. Parses output of shell 'netstat' to get connections C:\>netstat -n Active Connections Proto Local Address Foreign Address State TCP 10.2.33.17:3007 130.164.12.233:10123 ESTABLISHED TCP 10.2.33.17:3389 130.164.30.5:10378 ESTABLISHED ''' remotes = set() try: data = subprocess.check_output(['netstat', '-n']) # pylint: disable=minimum-python-version except subprocess.CalledProcessError: log.error('Failed netstat') raise lines = salt.utils.stringutils.to_str(data).split('\n') for line in lines: if 'ESTABLISHED' not in line: continue chunks = line.split() local_host, local_port = chunks[1].rsplit(':', 1) remote_host, remote_port = chunks[2].rsplit(':', 1) if which_end == 'remote_port' and int(remote_port) != port: continue if which_end == 'local_port' and int(local_port) != port: continue remotes.add(remote_host) return remotes def _linux_remotes_on(port, which_end): ''' Linux specific helper function. Returns set of ip host addresses of remote established connections on local tcp port port. Parses output of shell 'lsof' to get connections $ sudo lsof -iTCP:4505 -n COMMAND PID USER FD TYPE DEVICE SIZE/OFF NODE NAME Python 9971 root 35u IPv4 0x18a8464a29ca329d 0t0 TCP *:4505 (LISTEN) Python 9971 root 37u IPv4 0x18a8464a29b2b29d 0t0 TCP 127.0.0.1:4505->127.0.0.1:55703 (ESTABLISHED) Python 10152 root 22u IPv4 0x18a8464a29c8cab5 0t0 TCP 127.0.0.1:55703->127.0.0.1:4505 (ESTABLISHED) Python 10153 root 22u IPv4 0x18a8464a29c8cab5 0t0 TCP [fe80::249a]:4505->[fe80::150]:59367 (ESTABLISHED) ''' remotes = set() try: data = subprocess.check_output( ['lsof', '-iTCP:{0:d}'.format(port), '-n', '-P'] # pylint: disable=minimum-python-version ) except subprocess.CalledProcessError as ex: if ex.returncode == 1: # Lsof return 1 if any error was detected, including the failure # to locate Internet addresses, and it is not an error in this case. log.warning('"lsof" returncode = 1, likely no active TCP sessions.') return remotes log.error('Failed "lsof" with returncode = %s', ex.returncode) raise lines = salt.utils.stringutils.to_str(data).split('\n') for line in lines: chunks = line.split() if not chunks: continue # ['Python', '9971', 'root', '37u', 'IPv4', '0x18a8464a29b2b29d', '0t0', # 'TCP', '127.0.0.1:4505->127.0.0.1:55703', '(ESTABLISHED)'] # print chunks if 'COMMAND' in chunks[0]: continue # ignore header if 'ESTABLISHED' not in chunks[-1]: continue # ignore if not ESTABLISHED # '127.0.0.1:4505->127.0.0.1:55703' local, remote = chunks[8].split('->') _, lport = local.rsplit(':', 1) rhost, rport = remote.rsplit(':', 1) if which_end == 'remote_port' and int(rport) != port: continue if which_end == 'local_port' and int(lport) != port: continue remotes.add(rhost.strip("[]")) return remotes def _aix_remotes_on(port, which_end): ''' AIX specific helper function. Returns set of ipv4 host addresses of remote established connections on local or remote tcp port. Parses output of shell 'netstat' to get connections root@la68pp002_pub:/opt/salt/lib/python2.7/site-packages/salt/modules# netstat -f inet -n Active Internet connections Proto Recv-Q Send-Q Local Address Foreign Address (state) tcp4 0 0 172.29.149.95.50093 209.41.78.13.4505 ESTABLISHED tcp4 0 0 127.0.0.1.9514 *.* LISTEN tcp4 0 0 127.0.0.1.9515 *.* LISTEN tcp4 0 0 127.0.0.1.199 127.0.0.1.32779 ESTABLISHED tcp4 0 0 127.0.0.1.32779 127.0.0.1.199 ESTABLISHED tcp4 0 40 172.29.149.95.22 172.29.96.83.41022 ESTABLISHED tcp4 0 0 172.29.149.95.22 172.29.96.83.41032 ESTABLISHED tcp4 0 0 127.0.0.1.32771 127.0.0.1.32775 ESTABLISHED tcp 0 0 127.0.0.1.32775 127.0.0.1.32771 ESTABLISHED tcp4 0 0 127.0.0.1.32771 127.0.0.1.32776 ESTABLISHED tcp 0 0 127.0.0.1.32776 127.0.0.1.32771 ESTABLISHED tcp4 0 0 127.0.0.1.32771 127.0.0.1.32777 ESTABLISHED tcp 0 0 127.0.0.1.32777 127.0.0.1.32771 ESTABLISHED tcp4 0 0 127.0.0.1.32771 127.0.0.1.32778 ESTABLISHED tcp 0 0 127.0.0.1.32778 127.0.0.1.32771 ESTABLISHED ''' remotes = set() try: data = subprocess.check_output(['netstat', '-f', 'inet', '-n']) # pylint: disable=minimum-python-version except subprocess.CalledProcessError: log.error('Failed netstat') raise lines = salt.utils.stringutils.to_str(data).split('\n') for line in lines: if 'ESTABLISHED' not in line: continue chunks = line.split() local_host, local_port = chunks[3].rsplit('.', 1) remote_host, remote_port = chunks[4].rsplit('.', 1) if which_end == 'remote_port' and int(remote_port) != port: continue if which_end == 'local_port' and int(local_port) != port: continue remotes.add(remote_host) return remotes @jinja_filter('gen_mac') def gen_mac(prefix='AC:DE:48'): ''' Generates a MAC address with the defined OUI prefix. Common prefixes: - ``00:16:3E`` -- Xen - ``00:18:51`` -- OpenVZ - ``00:50:56`` -- VMware (manually generated) - ``52:54:00`` -- QEMU/KVM - ``AC:DE:48`` -- PRIVATE References: - http://standards.ieee.org/develop/regauth/oui/oui.txt - https://www.wireshark.org/tools/oui-lookup.html - https://en.wikipedia.org/wiki/MAC_address ''' return '{0}:{1:02X}:{2:02X}:{3:02X}'.format(prefix, random.randint(0, 0xff), random.randint(0, 0xff), random.randint(0, 0xff)) @jinja_filter('mac_str_to_bytes') def mac_str_to_bytes(mac_str): ''' Convert a MAC address string into bytes. Works with or without separators: b1 = mac_str_to_bytes('08:00:27:13:69:77') b2 = mac_str_to_bytes('080027136977') assert b1 == b2 assert isinstance(b1, bytes) ''' if len(mac_str) == 12: pass elif len(mac_str) == 17: sep = mac_str[2] mac_str = mac_str.replace(sep, '') else: raise ValueError('Invalid MAC address') chars = (int(mac_str[s:s+2], 16) for s in range(0, 12, 2)) return bytes(chars) if six.PY3 else b''.join(chr(x) for x in chars) def refresh_dns(): ''' issue #21397: force glibc to re-read resolv.conf ''' try: res_init() except NameError: # Exception raised loading the library, thus res_init is not defined pass @jinja_filter('connection_check') def connection_check(addr, port=80, safe=False, ipv6=None): ''' Provides a convenient alias for the dns_check filter. ''' return dns_check(addr, port, safe, ipv6) @jinja_filter('dns_check') def dns_check(addr, port=80, safe=False, ipv6=None, attempt_connect=True): ''' Return the ip resolved by dns, but do not exit on failure, only raise an exception. Obeys system preference for IPv4/6 address resolution - this can be overridden by the ipv6 flag. Tries to connect to the address before considering it useful. If no address can be reached, the first one resolved is used as a fallback. ''' error = False lookup = addr seen_ipv6 = False family = socket.AF_INET6 if ipv6 else socket.AF_INET if ipv6 is False else socket.AF_UNSPEC hostnames = [] try: refresh_dns() hostnames = socket.getaddrinfo(addr, port, family, socket.SOCK_STREAM) except TypeError: err = ('Attempt to resolve address \'{0}\' failed. Invalid or unresolveable address').format(lookup) raise SaltSystemExit(code=42, msg=err) except socket.error: error = True # If ipv6 is set to True, attempt another lookup using the IPv4 family, # just in case we're attempting to lookup an IPv4 IP # as an IPv6 hostname. if error and ipv6: try: refresh_dns() hostnames = socket.getaddrinfo(addr, port, socket.AF_INET, socket.SOCK_STREAM) except TypeError: err = ('Attempt to resolve address \'{0}\' failed. Invalid or unresolveable address').format(lookup) raise SaltSystemExit(code=42, msg=err) except socket.error: error = True try: if not hostnames: error = True else: resolved = False candidates = [] for h in hostnames: # Input is IP address, passed through unchanged, just return it if h[4][0] == addr: resolved = salt.utils.zeromq.ip_bracket(addr) break candidate_addr = salt.utils.zeromq.ip_bracket(h[4][0]) # sometimes /etc/hosts contains ::1 localhost if not ipv6 and candidate_addr == '[::1]': continue candidates.append(candidate_addr) if attempt_connect: try: s = socket.socket(h[0], socket.SOCK_STREAM) s.settimeout(2) s.connect((candidate_addr.strip('[]'), h[4][1])) s.close() resolved = candidate_addr break except socket.error: pass if not resolved: if candidates: resolved = candidates[0] else: error = True except TypeError: err = ('Attempt to resolve address \'{0}\' failed. Invalid or unresolveable address').format(lookup) raise SaltSystemExit(code=42, msg=err) except socket.error: error = True if error: err = ('DNS lookup or connection check of \'{0}\' failed.').format(addr) if safe: if salt.log.is_console_configured(): # If logging is not configured it also means that either # the master or minion instance calling this hasn't even # started running log.error(err) raise SaltClientError() raise SaltSystemExit(code=42, msg=err) return resolved def parse_host_port(host_port): """ Takes a string argument specifying host or host:port. Returns a (hostname, port) or (ip_address, port) tuple. If no port is given, the second (port) element of the returned tuple will be None. host:port argument, for example, is accepted in the forms of: - hostname - hostname:1234 - hostname.domain.tld - hostname.domain.tld:5678 - [1234::5]:5678 - 1234::5 - 10.11.12.13:4567 - 10.11.12.13 """ host, port = None, None # default _s_ = host_port[:] if _s_[0] == "[": if "]" in host_port: host, _s_ = _s_.lstrip("[").rsplit("]", 1) host = ipaddress.IPv6Address(host).compressed if _s_[0] == ":": port = int(_s_.lstrip(":")) else: if len(_s_) > 1: raise ValueError('found ambiguous "{}" port in "{}"'.format(_s_, host_port)) else: if _s_.count(":") == 1: host, _hostport_separator_, port = _s_.partition(":") try: port = int(port) except ValueError as _e_: log.error('host_port "%s" port value "%s" is not an integer.', host_port, port) raise _e_ else: host = _s_ try: if not isinstance(host, ipaddress._BaseAddress): host_ip = ipaddress.ip_address(host).compressed host = host_ip except ValueError: log.debug('"%s" Not an IP address? Assuming it is a hostname.', host) if host != sanitize_host(host): log.error('bad hostname: "%s"', host) raise ValueError('bad hostname: "{}"'.format(host)) return host, port def is_fqdn(hostname): """ Verify if hostname conforms to be a FQDN. :param hostname: text string with the name of the host :return: bool, True if hostname is correct FQDN, False otherwise """ compliant = re.compile(r"(?!-)[A-Z\d\-\_]{1,63}(?<!-)$", re.IGNORECASE) return "." in hostname and len(hostname) < 0xff and all(compliant.match(x) for x in hostname.rstrip(".").split("."))
saltstack/salt
salt/utils/network.py
ipaddr
python
def ipaddr(value, options=None): ''' Filters and returns only valid IP objects. ''' ipv4_obj = ipv4(value, options=options) ipv6_obj = ipv6(value, options=options) if ipv4_obj is None or ipv6_obj is None: # an IP address can be either IPv4 either IPv6 # therefofe if the value passed as arg is not a list, at least one of the calls above will return None # if one of them is none, means that we should return only one of them return ipv4_obj or ipv6_obj # one of them else: return ipv4_obj + ipv6_obj
Filters and returns only valid IP objects.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/network.py#L484-L496
null
# -*- coding: utf-8 -*- ''' Define some generic socket functions for network modules ''' # Import python libs from __future__ import absolute_import, unicode_literals, print_function import itertools import os import re import types import socket import logging import platform import random import subprocess from string import ascii_letters, digits # Import 3rd-party libs from salt.ext import six from salt.ext.six.moves import range # pylint: disable=import-error,redefined-builtin # Attempt to import wmi try: import wmi import salt.utils.winapi except ImportError: pass # Import salt libs import salt.utils.args import salt.utils.files import salt.utils.path import salt.utils.platform import salt.utils.stringutils import salt.utils.zeromq from salt._compat import ipaddress from salt.exceptions import SaltClientError, SaltSystemExit from salt.utils.decorators.jinja import jinja_filter from salt.utils.versions import LooseVersion # inet_pton does not exist in Windows, this is a workaround if salt.utils.platform.is_windows(): from salt.ext import win_inet_pton # pylint: disable=unused-import log = logging.getLogger(__name__) try: import ctypes import ctypes.util libc = ctypes.cdll.LoadLibrary(ctypes.util.find_library("c")) res_init = libc.__res_init except (ImportError, OSError, AttributeError, TypeError): pass # pylint: disable=C0103 def sanitize_host(host): ''' Sanitize host string. https://tools.ietf.org/html/rfc1123#section-2.1 ''' RFC952_characters = ascii_letters + digits + ".-" return "".join([c for c in host[0:255] if c in RFC952_characters]) def isportopen(host, port): ''' Return status of a port ''' if not 1 <= int(port) <= 65535: return False sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) out = sock.connect_ex((sanitize_host(host), int(port))) return out def host_to_ips(host): ''' Returns a list of IP addresses of a given hostname or None if not found. ''' ips = [] try: for family, socktype, proto, canonname, sockaddr in socket.getaddrinfo( host, 0, socket.AF_UNSPEC, socket.SOCK_STREAM): if family == socket.AF_INET: ip, port = sockaddr elif family == socket.AF_INET6: ip, port, flow_info, scope_id = sockaddr ips.append(ip) if not ips: ips = None except Exception: ips = None return ips def _generate_minion_id(): ''' Get list of possible host names and convention names. :return: ''' # There are three types of hostnames: # 1. Network names. How host is accessed from the network. # 2. Host aliases. They might be not available in all the network or only locally (/etc/hosts) # 3. Convention names, an internal nodename. class DistinctList(list): ''' List, which allows one to append only distinct objects. Needs to work on Python 2.6, because of collections.OrderedDict only since 2.7 version. Override 'filter()' for custom filtering. ''' localhost_matchers = [r'localhost.*', r'ip6-.*', r'127[.]\d', r'0\.0\.0\.0', r'::1.*', r'ipv6-.*', r'fe00::.*', r'fe02::.*', r'1.0.0.*.ip6.arpa'] def append(self, p_object): if p_object and p_object not in self and not self.filter(p_object): super(DistinctList, self).append(p_object) return self def extend(self, iterable): for obj in iterable: self.append(obj) return self def filter(self, element): 'Returns True if element needs to be filtered' for rgx in self.localhost_matchers: if re.match(rgx, element): return True def first(self): return self and self[0] or None hostname = socket.gethostname() hosts = DistinctList().append( salt.utils.stringutils.to_unicode(socket.getfqdn(salt.utils.stringutils.to_bytes(hostname))) ).append(platform.node()).append(hostname) if not hosts: try: for a_nfo in socket.getaddrinfo(hosts.first() or 'localhost', None, socket.AF_INET, socket.SOCK_RAW, socket.IPPROTO_IP, socket.AI_CANONNAME): if len(a_nfo) > 3: hosts.append(a_nfo[3]) except socket.gaierror: log.warning('Cannot resolve address %s info via socket: %s', hosts.first() or 'localhost (N/A)', socket.gaierror) # Universal method for everywhere (Linux, Slowlaris, Windows etc) for f_name in ('/etc/hostname', '/etc/nodename', '/etc/hosts', r'{win}\system32\drivers\etc\hosts'.format(win=os.getenv('WINDIR'))): try: with salt.utils.files.fopen(f_name) as f_hdl: for line in f_hdl: line = salt.utils.stringutils.to_unicode(line) hst = line.strip().split('#')[0].strip().split() if hst: if hst[0][:4] in ('127.', '::1') or len(hst) == 1: hosts.extend(hst) except IOError: pass # include public and private ipaddresses return hosts.extend([addr for addr in ip_addrs() if not ipaddress.ip_address(addr).is_loopback]) def generate_minion_id(): ''' Return only first element of the hostname from all possible list. :return: ''' try: ret = salt.utils.stringutils.to_unicode(_generate_minion_id().first()) except TypeError: ret = None return ret or 'localhost' def get_socket(addr, type=socket.SOCK_STREAM, proto=0): ''' Return a socket object for the addr IP-version agnostic ''' version = ipaddress.ip_address(addr).version if version == 4: family = socket.AF_INET elif version == 6: family = socket.AF_INET6 return socket.socket(family, type, proto) def get_fqhostname(): ''' Returns the fully qualified hostname ''' # try getaddrinfo() fqdn = None try: addrinfo = socket.getaddrinfo( socket.gethostname(), 0, socket.AF_UNSPEC, socket.SOCK_STREAM, socket.SOL_TCP, socket.AI_CANONNAME ) for info in addrinfo: # info struct [family, socktype, proto, canonname, sockaddr] # On Windows `canonname` can be an empty string # This can cause the function to return `None` if len(info) > 3 and info[3]: fqdn = info[3] break except socket.gaierror: pass # NOTE: this used to log.error() but it was later disabled except socket.error as err: log.debug('socket.getaddrinfo() failure while finding fqdn: %s', err) if fqdn is None: fqdn = socket.getfqdn() return fqdn def ip_to_host(ip): ''' Returns the hostname of a given IP ''' try: hostname, aliaslist, ipaddrlist = socket.gethostbyaddr(ip) except Exception as exc: log.debug('salt.utils.network.ip_to_host(%r) failed: %s', ip, exc) hostname = None return hostname # pylint: enable=C0103 def is_reachable_host(entity_name): ''' Returns a bool telling if the entity name is a reachable host (IPv4/IPv6/FQDN/etc). :param hostname: :return: ''' try: assert type(socket.getaddrinfo(entity_name, 0, 0, 0, 0)) == list ret = True except socket.gaierror: ret = False return ret def is_ip(ip): ''' Returns a bool telling if the passed IP is a valid IPv4 or IPv6 address. ''' return is_ipv4(ip) or is_ipv6(ip) def is_ipv4(ip): ''' Returns a bool telling if the value passed to it was a valid IPv4 address ''' try: return ipaddress.ip_address(ip).version == 4 except ValueError: return False def is_ipv6(ip): ''' Returns a bool telling if the value passed to it was a valid IPv6 address ''' try: return ipaddress.ip_address(ip).version == 6 except ValueError: return False def is_subnet(cidr): ''' Returns a bool telling if the passed string is an IPv4 or IPv6 subnet ''' return is_ipv4_subnet(cidr) or is_ipv6_subnet(cidr) def is_ipv4_subnet(cidr): ''' Returns a bool telling if the passed string is an IPv4 subnet ''' try: return '/' in cidr and bool(ipaddress.IPv4Network(cidr)) except Exception: return False def is_ipv6_subnet(cidr): ''' Returns a bool telling if the passed string is an IPv6 subnet ''' try: return '/' in cidr and bool(ipaddress.IPv6Network(cidr)) except Exception: return False @jinja_filter('is_ip') def is_ip_filter(ip, options=None): ''' Returns a bool telling if the passed IP is a valid IPv4 or IPv6 address. ''' return is_ipv4_filter(ip, options=options) or is_ipv6_filter(ip, options=options) def _ip_options_global(ip_obj, version): return not ip_obj.is_private def _ip_options_multicast(ip_obj, version): return ip_obj.is_multicast def _ip_options_loopback(ip_obj, version): return ip_obj.is_loopback def _ip_options_link_local(ip_obj, version): return ip_obj.is_link_local def _ip_options_private(ip_obj, version): return ip_obj.is_private def _ip_options_reserved(ip_obj, version): return ip_obj.is_reserved def _ip_options_site_local(ip_obj, version): if version == 6: return ip_obj.is_site_local return False def _ip_options_unspecified(ip_obj, version): return ip_obj.is_unspecified def _ip_options(ip_obj, version, options=None): # will process and IP options options_fun_map = { 'global': _ip_options_global, 'link-local': _ip_options_link_local, 'linklocal': _ip_options_link_local, 'll': _ip_options_link_local, 'link_local': _ip_options_link_local, 'loopback': _ip_options_loopback, 'lo': _ip_options_loopback, 'multicast': _ip_options_multicast, 'private': _ip_options_private, 'public': _ip_options_global, 'reserved': _ip_options_reserved, 'site-local': _ip_options_site_local, 'sl': _ip_options_site_local, 'site_local': _ip_options_site_local, 'unspecified': _ip_options_unspecified } if not options: return six.text_type(ip_obj) # IP version already checked options_list = [option.strip() for option in options.split(',')] for option, fun in options_fun_map.items(): if option in options_list: fun_res = fun(ip_obj, version) if not fun_res: return None # stop at first failed test # else continue return six.text_type(ip_obj) def _is_ipv(ip, version, options=None): if not version: version = 4 if version not in (4, 6): return None try: ip_obj = ipaddress.ip_address(ip) except ValueError: # maybe it is an IP network try: ip_obj = ipaddress.ip_interface(ip) except ValueError: # nope, still not :( return None if not ip_obj.version == version: return None # has the right version, let's move on return _ip_options(ip_obj, version, options=options) @jinja_filter('is_ipv4') def is_ipv4_filter(ip, options=None): ''' Returns a bool telling if the value passed to it was a valid IPv4 address. ip The IP address. net: False Consider IP addresses followed by netmask. options CSV of options regarding the nature of the IP address. E.g.: loopback, multicast, private etc. ''' _is_ipv4 = _is_ipv(ip, 4, options=options) return isinstance(_is_ipv4, six.string_types) @jinja_filter('is_ipv6') def is_ipv6_filter(ip, options=None): ''' Returns a bool telling if the value passed to it was a valid IPv6 address. ip The IP address. net: False Consider IP addresses followed by netmask. options CSV of options regarding the nature of the IP address. E.g.: loopback, multicast, private etc. ''' _is_ipv6 = _is_ipv(ip, 6, options=options) return isinstance(_is_ipv6, six.string_types) def _ipv_filter(value, version, options=None): if version not in (4, 6): return if isinstance(value, (six.string_types, six.text_type, six.binary_type)): return _is_ipv(value, version, options=options) # calls is_ipv4 or is_ipv6 for `value` elif isinstance(value, (list, tuple, types.GeneratorType)): # calls is_ipv4 or is_ipv6 for each element in the list # os it filters and returns only those elements having the desired IP version return [ _is_ipv(addr, version, options=options) for addr in value if _is_ipv(addr, version, options=options) is not None ] return None @jinja_filter('ipv4') def ipv4(value, options=None): ''' Filters a list and returns IPv4 values only. ''' return _ipv_filter(value, 4, options=options) @jinja_filter('ipv6') def ipv6(value, options=None): ''' Filters a list and returns IPv6 values only. ''' return _ipv_filter(value, 6, options=options) @jinja_filter('ipaddr') # extend lists def _filter_ipaddr(value, options, version=None): ipaddr_filter_out = None if version: if version == 4: ipaddr_filter_out = ipv4(value, options) elif version == 6: ipaddr_filter_out = ipv6(value, options) else: ipaddr_filter_out = ipaddr(value, options) if not ipaddr_filter_out: return if not isinstance(ipaddr_filter_out, (list, tuple, types.GeneratorType)): ipaddr_filter_out = [ipaddr_filter_out] return ipaddr_filter_out @jinja_filter('ip_host') def ip_host(value, options=None, version=None): ''' Returns the interfaces IP address, e.g.: 192.168.0.1/28. ''' ipaddr_filter_out = _filter_ipaddr(value, options=options, version=version) if not ipaddr_filter_out: return if not isinstance(value, (list, tuple, types.GeneratorType)): return six.text_type(ipaddress.ip_interface(ipaddr_filter_out[0])) return [six.text_type(ipaddress.ip_interface(ip_a)) for ip_a in ipaddr_filter_out] def _network_hosts(ip_addr_entry): return [ six.text_type(host) for host in ipaddress.ip_network(ip_addr_entry, strict=False).hosts() ] @jinja_filter('network_hosts') def network_hosts(value, options=None, version=None): ''' Return the list of hosts within a network. .. note:: When running this command with a large IPv6 network, the command will take a long time to gather all of the hosts. ''' ipaddr_filter_out = _filter_ipaddr(value, options=options, version=version) if not ipaddr_filter_out: return if not isinstance(value, (list, tuple, types.GeneratorType)): return _network_hosts(ipaddr_filter_out[0]) return [ _network_hosts(ip_a) for ip_a in ipaddr_filter_out ] def _network_size(ip_addr_entry): return ipaddress.ip_network(ip_addr_entry, strict=False).num_addresses @jinja_filter('network_size') def network_size(value, options=None, version=None): ''' Get the size of a network. ''' ipaddr_filter_out = _filter_ipaddr(value, options=options, version=version) if not ipaddr_filter_out: return if not isinstance(value, (list, tuple, types.GeneratorType)): return _network_size(ipaddr_filter_out[0]) return [ _network_size(ip_a) for ip_a in ipaddr_filter_out ] def natural_ipv4_netmask(ip, fmt='prefixlen'): ''' Returns the "natural" mask of an IPv4 address ''' bits = _ipv4_to_bits(ip) if bits.startswith('11'): mask = '24' elif bits.startswith('1'): mask = '16' else: mask = '8' if fmt == 'netmask': return cidr_to_ipv4_netmask(mask) else: return '/' + mask def rpad_ipv4_network(ip): ''' Returns an IP network address padded with zeros. Ex: '192.168.3' -> '192.168.3.0' '10.209' -> '10.209.0.0' ''' return '.'.join(itertools.islice(itertools.chain(ip.split('.'), '0000'), 0, 4)) def cidr_to_ipv4_netmask(cidr_bits): ''' Returns an IPv4 netmask ''' try: cidr_bits = int(cidr_bits) if not 1 <= cidr_bits <= 32: return '' except ValueError: return '' netmask = '' for idx in range(4): if idx: netmask += '.' if cidr_bits >= 8: netmask += '255' cidr_bits -= 8 else: netmask += '{0:d}'.format(256 - (2 ** (8 - cidr_bits))) cidr_bits = 0 return netmask def _number_of_set_bits_to_ipv4_netmask(set_bits): # pylint: disable=C0103 ''' Returns an IPv4 netmask from the integer representation of that mask. Ex. 0xffffff00 -> '255.255.255.0' ''' return cidr_to_ipv4_netmask(_number_of_set_bits(set_bits)) # pylint: disable=C0103 def _number_of_set_bits(x): ''' Returns the number of bits that are set in a 32bit int ''' # Taken from http://stackoverflow.com/a/4912729. Many thanks! x -= (x >> 1) & 0x55555555 x = ((x >> 2) & 0x33333333) + (x & 0x33333333) x = ((x >> 4) + x) & 0x0f0f0f0f x += x >> 8 x += x >> 16 return x & 0x0000003f # pylint: enable=C0103 def _interfaces_ip(out): ''' Uses ip to return a dictionary of interfaces with various information about each (up/down state, ip address, netmask, and hwaddr) ''' ret = dict() def parse_network(value, cols): ''' Return a tuple of ip, netmask, broadcast based on the current set of cols ''' brd = None scope = None if '/' in value: # we have a CIDR in this address ip, cidr = value.split('/') # pylint: disable=C0103 else: ip = value # pylint: disable=C0103 cidr = 32 if type_ == 'inet': mask = cidr_to_ipv4_netmask(int(cidr)) if 'brd' in cols: brd = cols[cols.index('brd') + 1] elif type_ == 'inet6': mask = cidr if 'scope' in cols: scope = cols[cols.index('scope') + 1] return (ip, mask, brd, scope) groups = re.compile('\r?\n\\d').split(out) for group in groups: iface = None data = dict() for line in group.splitlines(): if ' ' not in line: continue match = re.match(r'^\d*:\s+([\w.\-]+)(?:@)?([\w.\-]+)?:\s+<(.+)>', line) if match: iface, parent, attrs = match.groups() if 'UP' in attrs.split(','): data['up'] = True else: data['up'] = False if parent: data['parent'] = parent continue cols = line.split() if len(cols) >= 2: type_, value = tuple(cols[0:2]) iflabel = cols[-1:][0] if type_ in ('inet', 'inet6'): if 'secondary' not in cols: ipaddr, netmask, broadcast, scope = parse_network(value, cols) if type_ == 'inet': if 'inet' not in data: data['inet'] = list() addr_obj = dict() addr_obj['address'] = ipaddr addr_obj['netmask'] = netmask addr_obj['broadcast'] = broadcast addr_obj['label'] = iflabel data['inet'].append(addr_obj) elif type_ == 'inet6': if 'inet6' not in data: data['inet6'] = list() addr_obj = dict() addr_obj['address'] = ipaddr addr_obj['prefixlen'] = netmask addr_obj['scope'] = scope data['inet6'].append(addr_obj) else: if 'secondary' not in data: data['secondary'] = list() ip_, mask, brd, scp = parse_network(value, cols) data['secondary'].append({ 'type': type_, 'address': ip_, 'netmask': mask, 'broadcast': brd, 'label': iflabel, }) del ip_, mask, brd, scp elif type_.startswith('link'): data['hwaddr'] = value if iface: ret[iface] = data del iface, data return ret def _interfaces_ifconfig(out): ''' Uses ifconfig to return a dictionary of interfaces with various information about each (up/down state, ip address, netmask, and hwaddr) ''' ret = dict() piface = re.compile(r'^([^\s:]+)') pmac = re.compile('.*?(?:HWaddr|ether|address:|lladdr) ([0-9a-fA-F:]+)') if salt.utils.platform.is_sunos(): pip = re.compile(r'.*?(?:inet\s+)([0-9]+\.[0-9]+\.[0-9]+\.[0-9]+)(.*)') pip6 = re.compile('.*?(?:inet6 )([0-9a-fA-F:]+)') pmask6 = re.compile(r'.*?(?:inet6 [0-9a-fA-F:]+/(\d+)).*') else: pip = re.compile(r'.*?(?:inet addr:|inet [^\d]*)(.*?)\s') pip6 = re.compile('.*?(?:inet6 addr: (.*?)/|inet6 )([0-9a-fA-F:]+)') pmask6 = re.compile(r'.*?(?:inet6 addr: [0-9a-fA-F:]+/(\d+)|prefixlen (\d+))(?: Scope:([a-zA-Z]+)| scopeid (0x[0-9a-fA-F]))?') pmask = re.compile(r'.*?(?:Mask:|netmask )(?:((?:0x)?[0-9a-fA-F]{8})|([\d\.]+))') pupdown = re.compile('UP') pbcast = re.compile(r'.*?(?:Bcast:|broadcast )([\d\.]+)') groups = re.compile('\r?\n(?=\\S)').split(out) for group in groups: data = dict() iface = '' updown = False for line in group.splitlines(): miface = piface.match(line) mmac = pmac.match(line) mip = pip.match(line) mip6 = pip6.match(line) mupdown = pupdown.search(line) if miface: iface = miface.group(1) if mmac: data['hwaddr'] = mmac.group(1) if salt.utils.platform.is_sunos(): expand_mac = [] for chunk in data['hwaddr'].split(':'): expand_mac.append('0{0}'.format(chunk) if len(chunk) < 2 else '{0}'.format(chunk)) data['hwaddr'] = ':'.join(expand_mac) if mip: if 'inet' not in data: data['inet'] = list() addr_obj = dict() addr_obj['address'] = mip.group(1) mmask = pmask.match(line) if mmask: if mmask.group(1): mmask = _number_of_set_bits_to_ipv4_netmask( int(mmask.group(1), 16)) else: mmask = mmask.group(2) addr_obj['netmask'] = mmask mbcast = pbcast.match(line) if mbcast: addr_obj['broadcast'] = mbcast.group(1) data['inet'].append(addr_obj) if mupdown: updown = True if mip6: if 'inet6' not in data: data['inet6'] = list() addr_obj = dict() addr_obj['address'] = mip6.group(1) or mip6.group(2) mmask6 = pmask6.match(line) if mmask6: addr_obj['prefixlen'] = mmask6.group(1) or mmask6.group(2) if not salt.utils.platform.is_sunos(): ipv6scope = mmask6.group(3) or mmask6.group(4) addr_obj['scope'] = ipv6scope.lower() if ipv6scope is not None else ipv6scope # SunOS sometimes has ::/0 as inet6 addr when using addrconf if not salt.utils.platform.is_sunos() \ or addr_obj['address'] != '::' \ and addr_obj['prefixlen'] != 0: data['inet6'].append(addr_obj) data['up'] = updown if iface in ret: # SunOS optimization, where interfaces occur twice in 'ifconfig -a' # output with the same name: for ipv4 and then for ipv6 addr family. # Every instance has it's own 'UP' status and we assume that ipv4 # status determines global interface status. # # merge items with higher priority for older values # after that merge the inet and inet6 sub items for both ret[iface] = dict(list(data.items()) + list(ret[iface].items())) if 'inet' in data: ret[iface]['inet'].extend(x for x in data['inet'] if x not in ret[iface]['inet']) if 'inet6' in data: ret[iface]['inet6'].extend(x for x in data['inet6'] if x not in ret[iface]['inet6']) else: ret[iface] = data del data return ret def linux_interfaces(): ''' Obtain interface information for *NIX/BSD variants ''' ifaces = dict() ip_path = salt.utils.path.which('ip') ifconfig_path = None if ip_path else salt.utils.path.which('ifconfig') if ip_path: cmd1 = subprocess.Popen( '{0} link show'.format(ip_path), shell=True, close_fds=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate()[0] cmd2 = subprocess.Popen( '{0} addr show'.format(ip_path), shell=True, close_fds=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate()[0] ifaces = _interfaces_ip("{0}\n{1}".format( salt.utils.stringutils.to_str(cmd1), salt.utils.stringutils.to_str(cmd2))) elif ifconfig_path: cmd = subprocess.Popen( '{0} -a'.format(ifconfig_path), shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate()[0] ifaces = _interfaces_ifconfig(salt.utils.stringutils.to_str(cmd)) return ifaces def _netbsd_interfaces_ifconfig(out): ''' Uses ifconfig to return a dictionary of interfaces with various information about each (up/down state, ip address, netmask, and hwaddr) ''' ret = dict() piface = re.compile(r'^([^\s:]+)') pmac = re.compile('.*?address: ([0-9a-f:]+)') pip = re.compile(r'.*?inet [^\d]*(.*?)/([\d]*)\s') pip6 = re.compile(r'.*?inet6 ([0-9a-f:]+)%([a-zA-Z0-9]*)/([\d]*)\s') pupdown = re.compile('UP') pbcast = re.compile(r'.*?broadcast ([\d\.]+)') groups = re.compile('\r?\n(?=\\S)').split(out) for group in groups: data = dict() iface = '' updown = False for line in group.splitlines(): miface = piface.match(line) mmac = pmac.match(line) mip = pip.match(line) mip6 = pip6.match(line) mupdown = pupdown.search(line) if miface: iface = miface.group(1) if mmac: data['hwaddr'] = mmac.group(1) if mip: if 'inet' not in data: data['inet'] = list() addr_obj = dict() addr_obj['address'] = mip.group(1) mmask = mip.group(2) if mip.group(2): addr_obj['netmask'] = cidr_to_ipv4_netmask(mip.group(2)) mbcast = pbcast.match(line) if mbcast: addr_obj['broadcast'] = mbcast.group(1) data['inet'].append(addr_obj) if mupdown: updown = True if mip6: if 'inet6' not in data: data['inet6'] = list() addr_obj = dict() addr_obj['address'] = mip6.group(1) mmask6 = mip6.group(3) addr_obj['scope'] = mip6.group(2) addr_obj['prefixlen'] = mip6.group(3) data['inet6'].append(addr_obj) data['up'] = updown ret[iface] = data del data return ret def netbsd_interfaces(): ''' Obtain interface information for NetBSD >= 8 where the ifconfig output diverged from other BSD variants (Netmask is now part of the address) ''' # NetBSD versions prior to 8.0 can still use linux_interfaces() if LooseVersion(os.uname()[2]) < LooseVersion('8.0'): return linux_interfaces() ifconfig_path = salt.utils.path.which('ifconfig') cmd = subprocess.Popen( '{0} -a'.format(ifconfig_path), shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate()[0] return _netbsd_interfaces_ifconfig(salt.utils.stringutils.to_str(cmd)) def _interfaces_ipconfig(out): ''' Returns a dictionary of interfaces with various information about each (up/down state, ip address, netmask, and hwaddr) NOTE: This is not used by any function and may be able to be removed in the future. ''' ifaces = dict() iface = None adapter_iface_regex = re.compile(r'adapter (\S.+):$') for line in out.splitlines(): if not line: continue # TODO what does Windows call Infiniband and 10/40gige adapters if line.startswith('Ethernet'): iface = ifaces[adapter_iface_regex.search(line).group(1)] iface['up'] = True addr = None continue if iface: key, val = line.split(',', 1) key = key.strip(' .') val = val.strip() if addr and key == 'Subnet Mask': addr['netmask'] = val elif key in ('IP Address', 'IPv4 Address'): if 'inet' not in iface: iface['inet'] = list() addr = {'address': val.rstrip('(Preferred)'), 'netmask': None, 'broadcast': None} # TODO find the broadcast iface['inet'].append(addr) elif 'IPv6 Address' in key: if 'inet6' not in iface: iface['inet'] = list() # XXX What is the prefixlen!? addr = {'address': val.rstrip('(Preferred)'), 'prefixlen': None} iface['inet6'].append(addr) elif key == 'Physical Address': iface['hwaddr'] = val elif key == 'Media State': # XXX seen used for tunnel adaptors # might be useful iface['up'] = (val != 'Media disconnected') def win_interfaces(): ''' Obtain interface information for Windows systems ''' with salt.utils.winapi.Com(): c = wmi.WMI() ifaces = {} for iface in c.Win32_NetworkAdapterConfiguration(IPEnabled=1): ifaces[iface.Description] = dict() if iface.MACAddress: ifaces[iface.Description]['hwaddr'] = iface.MACAddress if iface.IPEnabled: ifaces[iface.Description]['up'] = True for ip in iface.IPAddress: if '.' in ip: if 'inet' not in ifaces[iface.Description]: ifaces[iface.Description]['inet'] = [] item = {'address': ip, 'label': iface.Description} if iface.DefaultIPGateway: broadcast = next((i for i in iface.DefaultIPGateway if '.' in i), '') if broadcast: item['broadcast'] = broadcast if iface.IPSubnet: netmask = next((i for i in iface.IPSubnet if '.' in i), '') if netmask: item['netmask'] = netmask ifaces[iface.Description]['inet'].append(item) if ':' in ip: if 'inet6' not in ifaces[iface.Description]: ifaces[iface.Description]['inet6'] = [] item = {'address': ip} if iface.DefaultIPGateway: broadcast = next((i for i in iface.DefaultIPGateway if ':' in i), '') if broadcast: item['broadcast'] = broadcast if iface.IPSubnet: netmask = next((i for i in iface.IPSubnet if ':' in i), '') if netmask: item['netmask'] = netmask ifaces[iface.Description]['inet6'].append(item) else: ifaces[iface.Description]['up'] = False return ifaces def interfaces(): ''' Return a dictionary of information about all the interfaces on the minion ''' if salt.utils.platform.is_windows(): return win_interfaces() elif salt.utils.platform.is_netbsd(): return netbsd_interfaces() else: return linux_interfaces() def get_net_start(ipaddr, netmask): ''' Return the address of the network ''' net = ipaddress.ip_network('{0}/{1}'.format(ipaddr, netmask), strict=False) return six.text_type(net.network_address) def get_net_size(mask): ''' Turns an IPv4 netmask into it's corresponding prefix length (255.255.255.0 -> 24 as in 192.168.1.10/24). ''' binary_str = '' for octet in mask.split('.'): binary_str += bin(int(octet))[2:].zfill(8) return len(binary_str.rstrip('0')) def calc_net(ipaddr, netmask=None): ''' Takes IP (CIDR notation supported) and optionally netmask and returns the network in CIDR-notation. (The IP can be any IP inside the subnet) ''' if netmask is not None: ipaddr = '{0}/{1}'.format(ipaddr, netmask) return six.text_type(ipaddress.ip_network(ipaddr, strict=False)) def _ipv4_to_bits(ipaddr): ''' Accepts an IPv4 dotted quad and returns a string representing its binary counterpart ''' return ''.join([bin(int(x))[2:].rjust(8, '0') for x in ipaddr.split('.')]) def _get_iface_info(iface): ''' If `iface` is available, return interface info and no error, otherwise return no info and log and return an error ''' iface_info = interfaces() if iface in iface_info.keys(): return iface_info, False else: error_msg = ('Interface "{0}" not in available interfaces: "{1}"' ''.format(iface, '", "'.join(iface_info.keys()))) log.error(error_msg) return None, error_msg def _hw_addr_aix(iface): ''' Return the hardware address (a.k.a. MAC address) for a given interface on AIX MAC address not available in through interfaces ''' cmd = subprocess.Popen( 'entstat -d {0} | grep \'Hardware Address\''.format(iface), shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate()[0] if cmd: comps = cmd.split(' ') if len(comps) == 3: mac_addr = comps[2].strip('\'').strip() return mac_addr error_msg = ('Interface "{0}" either not available or does not contain a hardware address'.format(iface)) log.error(error_msg) return error_msg def hw_addr(iface): ''' Return the hardware address (a.k.a. MAC address) for a given interface .. versionchanged:: 2016.11.4 Added support for AIX ''' if salt.utils.platform.is_aix(): return _hw_addr_aix iface_info, error = _get_iface_info(iface) if error is False: return iface_info.get(iface, {}).get('hwaddr', '') else: return error def interface(iface): ''' Return the details of `iface` or an error if it does not exist ''' iface_info, error = _get_iface_info(iface) if error is False: return iface_info.get(iface, {}).get('inet', '') else: return error def interface_ip(iface): ''' Return `iface` IPv4 addr or an error if `iface` does not exist ''' iface_info, error = _get_iface_info(iface) if error is False: inet = iface_info.get(iface, {}).get('inet', None) return inet[0].get('address', '') if inet else '' else: return error def _subnets(proto='inet', interfaces_=None): ''' Returns a list of subnets to which the host belongs ''' if interfaces_ is None: ifaces = interfaces() elif isinstance(interfaces_, list): ifaces = {} for key, value in six.iteritems(interfaces()): if key in interfaces_: ifaces[key] = value else: ifaces = {interfaces_: interfaces().get(interfaces_, {})} ret = set() if proto == 'inet': subnet = 'netmask' dflt_cidr = 32 elif proto == 'inet6': subnet = 'prefixlen' dflt_cidr = 128 else: log.error('Invalid proto %s calling subnets()', proto) return for ip_info in six.itervalues(ifaces): addrs = ip_info.get(proto, []) addrs.extend([addr for addr in ip_info.get('secondary', []) if addr.get('type') == proto]) for intf in addrs: if subnet in intf: intf = ipaddress.ip_interface('{0}/{1}'.format(intf['address'], intf[subnet])) else: intf = ipaddress.ip_interface('{0}/{1}'.format(intf['address'], dflt_cidr)) if not intf.is_loopback: ret.add(intf.network) return [six.text_type(net) for net in sorted(ret)] def subnets(interfaces=None): ''' Returns a list of IPv4 subnets to which the host belongs ''' return _subnets('inet', interfaces_=interfaces) def subnets6(): ''' Returns a list of IPv6 subnets to which the host belongs ''' return _subnets('inet6') def in_subnet(cidr, addr=None): ''' Returns True if host or (any of) addrs is within specified subnet, otherwise False ''' try: cidr = ipaddress.ip_network(cidr) except ValueError: log.error('Invalid CIDR \'%s\'', cidr) return False if addr is None: addr = ip_addrs() addr.extend(ip_addrs6()) elif not isinstance(addr, (list, tuple)): addr = (addr,) return any(ipaddress.ip_address(item) in cidr for item in addr) def _ip_addrs(interface=None, include_loopback=False, interface_data=None, proto='inet'): ''' Return the full list of IP adresses matching the criteria proto = inet|inet6 ''' ret = set() ifaces = interface_data \ if isinstance(interface_data, dict) \ else interfaces() if interface is None: target_ifaces = ifaces else: target_ifaces = dict([(k, v) for k, v in six.iteritems(ifaces) if k == interface]) if not target_ifaces: log.error('Interface %s not found.', interface) for ip_info in six.itervalues(target_ifaces): addrs = ip_info.get(proto, []) addrs.extend([addr for addr in ip_info.get('secondary', []) if addr.get('type') == proto]) for addr in addrs: addr = ipaddress.ip_address(addr.get('address')) if not addr.is_loopback or include_loopback: ret.add(addr) return [six.text_type(addr) for addr in sorted(ret)] def ip_addrs(interface=None, include_loopback=False, interface_data=None): ''' Returns a list of IPv4 addresses assigned to the host. 127.0.0.1 is ignored, unless 'include_loopback=True' is indicated. If 'interface' is provided, then only IP addresses from that interface will be returned. ''' return _ip_addrs(interface, include_loopback, interface_data, 'inet') def ip_addrs6(interface=None, include_loopback=False, interface_data=None): ''' Returns a list of IPv6 addresses assigned to the host. ::1 is ignored, unless 'include_loopback=True' is indicated. If 'interface' is provided, then only IP addresses from that interface will be returned. ''' return _ip_addrs(interface, include_loopback, interface_data, 'inet6') def hex2ip(hex_ip, invert=False): ''' Convert a hex string to an ip, if a failure occurs the original hex is returned. If 'invert=True' assume that ip from /proc/net/<proto> ''' if len(hex_ip) == 32: # ipv6 ip = [] for i in range(0, 32, 8): ip_part = hex_ip[i:i + 8] ip_part = [ip_part[x:x + 2] for x in range(0, 8, 2)] if invert: ip.append("{0[3]}{0[2]}:{0[1]}{0[0]}".format(ip_part)) else: ip.append("{0[0]}{0[1]}:{0[2]}{0[3]}".format(ip_part)) try: address = ipaddress.IPv6Address(":".join(ip)) if address.ipv4_mapped: return str(address.ipv4_mapped) else: return address.compressed except ipaddress.AddressValueError as ex: log.error('hex2ip - ipv6 address error: %s', ex) return hex_ip try: hip = int(hex_ip, 16) except ValueError: return hex_ip if invert: return '{3}.{2}.{1}.{0}'.format(hip >> 24 & 255, hip >> 16 & 255, hip >> 8 & 255, hip & 255) return '{0}.{1}.{2}.{3}'.format(hip >> 24 & 255, hip >> 16 & 255, hip >> 8 & 255, hip & 255) def mac2eui64(mac, prefix=None): ''' Convert a MAC address to a EUI64 identifier or, with prefix provided, a full IPv6 address ''' # http://tools.ietf.org/html/rfc4291#section-2.5.1 eui64 = re.sub(r'[.:-]', '', mac).lower() eui64 = eui64[0:6] + 'fffe' + eui64[6:] eui64 = hex(int(eui64[0:2], 16) | 2)[2:].zfill(2) + eui64[2:] if prefix is None: return ':'.join(re.findall(r'.{4}', eui64)) else: try: net = ipaddress.ip_network(prefix, strict=False) euil = int('0x{0}'.format(eui64), 16) return '{0}/{1}'.format(net[euil], net.prefixlen) except Exception: return def active_tcp(): ''' Return a dict describing all active tcp connections as quickly as possible ''' ret = {} for statf in ['/proc/net/tcp', '/proc/net/tcp6']: if os.path.isfile(statf): with salt.utils.files.fopen(statf, 'rb') as fp_: for line in fp_: line = salt.utils.stringutils.to_unicode(line) if line.strip().startswith('sl'): continue iret = _parse_tcp_line(line) sl = next(iter(iret)) if iret[sl]['state'] == 1: # 1 is ESTABLISHED del iret[sl]['state'] ret[len(ret)] = iret[sl] return ret def local_port_tcp(port): ''' Return a set of remote ip addrs attached to the specified local port ''' ret = _remotes_on(port, 'local_port') return ret def remote_port_tcp(port): ''' Return a set of ip addrs the current host is connected to on given port ''' ret = _remotes_on(port, 'remote_port') return ret def _remotes_on(port, which_end): ''' Return a set of ip addrs active tcp connections ''' port = int(port) ret = _netlink_tool_remote_on(port, which_end) if ret is not None: return ret ret = set() proc_available = False for statf in ['/proc/net/tcp', '/proc/net/tcp6']: if os.path.isfile(statf): proc_available = True with salt.utils.files.fopen(statf, 'r') as fp_: for line in fp_: line = salt.utils.stringutils.to_unicode(line) if line.strip().startswith('sl'): continue iret = _parse_tcp_line(line) sl = next(iter(iret)) if iret[sl][which_end] == port and iret[sl]['state'] == 1: # 1 is ESTABLISHED ret.add(iret[sl]['remote_addr']) if not proc_available: # Fallback to use OS specific tools if salt.utils.platform.is_sunos(): return _sunos_remotes_on(port, which_end) if salt.utils.platform.is_freebsd(): return _freebsd_remotes_on(port, which_end) if salt.utils.platform.is_netbsd(): return _netbsd_remotes_on(port, which_end) if salt.utils.platform.is_openbsd(): return _openbsd_remotes_on(port, which_end) if salt.utils.platform.is_windows(): return _windows_remotes_on(port, which_end) if salt.utils.platform.is_aix(): return _aix_remotes_on(port, which_end) return _linux_remotes_on(port, which_end) return ret def _parse_tcp_line(line): ''' Parse a single line from the contents of /proc/net/tcp or /proc/net/tcp6 ''' ret = {} comps = line.strip().split() sl = comps[0].rstrip(':') ret[sl] = {} l_addr, l_port = comps[1].split(':') r_addr, r_port = comps[2].split(':') ret[sl]['local_addr'] = hex2ip(l_addr, True) ret[sl]['local_port'] = int(l_port, 16) ret[sl]['remote_addr'] = hex2ip(r_addr, True) ret[sl]['remote_port'] = int(r_port, 16) ret[sl]['state'] = int(comps[3], 16) return ret def _netlink_tool_remote_on(port, which_end): ''' Returns set of ipv4 host addresses of remote established connections on local or remote tcp port. Parses output of shell 'ss' to get connections [root@salt-master ~]# ss -ant State Recv-Q Send-Q Local Address:Port Peer Address:Port LISTEN 0 511 *:80 *:* LISTEN 0 128 *:22 *:* ESTAB 0 0 127.0.0.1:56726 127.0.0.1:4505 ''' remotes = set() valid = False try: data = subprocess.check_output(['ss', '-ant']) # pylint: disable=minimum-python-version except subprocess.CalledProcessError: log.error('Failed ss') raise except OSError: # not command "No such file or directory" return None lines = salt.utils.stringutils.to_str(data).split('\n') for line in lines: if 'Address:Port' in line: # ss tools may not be valid valid = True continue elif 'ESTAB' not in line: continue chunks = line.split() local_host, local_port = chunks[3].split(':', 1) remote_host, remote_port = chunks[4].split(':', 1) if which_end == 'remote_port' and int(remote_port) != port: continue if which_end == 'local_port' and int(local_port) != port: continue remotes.add(remote_host) if valid is False: remotes = None return remotes def _sunos_remotes_on(port, which_end): ''' SunOS specific helper function. Returns set of ipv4 host addresses of remote established connections on local or remote tcp port. Parses output of shell 'netstat' to get connections [root@salt-master ~]# netstat -f inet -n TCP: IPv4 Local Address Remote Address Swind Send-Q Rwind Recv-Q State -------------------- -------------------- ----- ------ ----- ------ ----------- 10.0.0.101.4505 10.0.0.1.45329 1064800 0 1055864 0 ESTABLISHED 10.0.0.101.4505 10.0.0.100.50798 1064800 0 1055864 0 ESTABLISHED ''' remotes = set() try: data = subprocess.check_output(['netstat', '-f', 'inet', '-n']) # pylint: disable=minimum-python-version except subprocess.CalledProcessError: log.error('Failed netstat') raise lines = salt.utils.stringutils.to_str(data).split('\n') for line in lines: if 'ESTABLISHED' not in line: continue chunks = line.split() local_host, local_port = chunks[0].rsplit('.', 1) remote_host, remote_port = chunks[1].rsplit('.', 1) if which_end == 'remote_port' and int(remote_port) != port: continue if which_end == 'local_port' and int(local_port) != port: continue remotes.add(remote_host) return remotes def _freebsd_remotes_on(port, which_end): ''' Returns set of ipv4 host addresses of remote established connections on local tcp port port. Parses output of shell 'sockstat' (FreeBSD) to get connections $ sudo sockstat -4 USER COMMAND PID FD PROTO LOCAL ADDRESS FOREIGN ADDRESS root python2.7 1456 29 tcp4 *:4505 *:* root python2.7 1445 17 tcp4 *:4506 *:* root python2.7 1294 14 tcp4 127.0.0.1:11813 127.0.0.1:4505 root python2.7 1294 41 tcp4 127.0.0.1:61115 127.0.0.1:4506 $ sudo sockstat -4 -c -p 4506 USER COMMAND PID FD PROTO LOCAL ADDRESS FOREIGN ADDRESS root python2.7 1294 41 tcp4 127.0.0.1:61115 127.0.0.1:4506 ''' port = int(port) remotes = set() try: cmd = salt.utils.args.shlex_split('sockstat -4 -c -p {0}'.format(port)) data = subprocess.check_output(cmd) # pylint: disable=minimum-python-version except subprocess.CalledProcessError as ex: log.error('Failed "sockstat" with returncode = %s', ex.returncode) raise lines = salt.utils.stringutils.to_str(data).split('\n') for line in lines: chunks = line.split() if not chunks: continue # ['root', 'python2.7', '1456', '37', 'tcp4', # '127.0.0.1:4505-', '127.0.0.1:55703'] # print chunks if 'COMMAND' in chunks[1]: continue # ignore header if len(chunks) < 2: continue # sockstat -4 -c -p 4506 does this with high PIDs: # USER COMMAND PID FD PROTO LOCAL ADDRESS FOREIGN ADDRESS # salt-master python2.781106 35 tcp4 192.168.12.34:4506 192.168.12.45:60143 local = chunks[-2] remote = chunks[-1] lhost, lport = local.split(':') rhost, rport = remote.split(':') if which_end == 'local' and int(lport) != port: # ignore if local port not port continue if which_end == 'remote' and int(rport) != port: # ignore if remote port not port continue remotes.add(rhost) return remotes def _netbsd_remotes_on(port, which_end): ''' Returns set of ipv4 host addresses of remote established connections on local tcp port port. Parses output of shell 'sockstat' (NetBSD) to get connections $ sudo sockstat -4 -n USER COMMAND PID FD PROTO LOCAL ADDRESS FOREIGN ADDRESS root python2.7 1456 29 tcp *.4505 *.* root python2.7 1445 17 tcp *.4506 *.* root python2.7 1294 14 tcp 127.0.0.1.11813 127.0.0.1.4505 root python2.7 1294 41 tcp 127.0.0.1.61115 127.0.0.1.4506 $ sudo sockstat -4 -c -n -p 4506 USER COMMAND PID FD PROTO LOCAL ADDRESS FOREIGN ADDRESS root python2.7 1294 41 tcp 127.0.0.1.61115 127.0.0.1.4506 ''' port = int(port) remotes = set() try: cmd = salt.utils.args.shlex_split('sockstat -4 -c -n -p {0}'.format(port)) data = subprocess.check_output(cmd) # pylint: disable=minimum-python-version except subprocess.CalledProcessError as ex: log.error('Failed "sockstat" with returncode = %s', ex.returncode) raise lines = salt.utils.stringutils.to_str(data).split('\n') for line in lines: chunks = line.split() if not chunks: continue # ['root', 'python2.7', '1456', '37', 'tcp', # '127.0.0.1.4505-', '127.0.0.1.55703'] # print chunks if 'COMMAND' in chunks[1]: continue # ignore header if len(chunks) < 2: continue local = chunks[5].split('.') lport = local.pop() lhost = '.'.join(local) remote = chunks[6].split('.') rport = remote.pop() rhost = '.'.join(remote) if which_end == 'local' and int(lport) != port: # ignore if local port not port continue if which_end == 'remote' and int(rport) != port: # ignore if remote port not port continue remotes.add(rhost) return remotes def _openbsd_remotes_on(port, which_end): ''' OpenBSD specific helper function. Returns set of ipv4 host addresses of remote established connections on local or remote tcp port. Parses output of shell 'netstat' to get connections $ netstat -nf inet Active Internet connections Proto Recv-Q Send-Q Local Address Foreign Address (state) tcp 0 0 10.0.0.101.4505 10.0.0.1.45329 ESTABLISHED tcp 0 0 10.0.0.101.4505 10.0.0.100.50798 ESTABLISHED ''' remotes = set() try: data = subprocess.check_output(['netstat', '-nf', 'inet']) # pylint: disable=minimum-python-version except subprocess.CalledProcessError: log.error('Failed netstat') raise lines = data.split('\n') for line in lines: if 'ESTABLISHED' not in line: continue chunks = line.split() local_host, local_port = chunks[3].rsplit('.', 1) remote_host, remote_port = chunks[4].rsplit('.', 1) if which_end == 'remote_port' and int(remote_port) != port: continue if which_end == 'local_port' and int(local_port) != port: continue remotes.add(remote_host) return remotes def _windows_remotes_on(port, which_end): r''' Windows specific helper function. Returns set of ipv4 host addresses of remote established connections on local or remote tcp port. Parses output of shell 'netstat' to get connections C:\>netstat -n Active Connections Proto Local Address Foreign Address State TCP 10.2.33.17:3007 130.164.12.233:10123 ESTABLISHED TCP 10.2.33.17:3389 130.164.30.5:10378 ESTABLISHED ''' remotes = set() try: data = subprocess.check_output(['netstat', '-n']) # pylint: disable=minimum-python-version except subprocess.CalledProcessError: log.error('Failed netstat') raise lines = salt.utils.stringutils.to_str(data).split('\n') for line in lines: if 'ESTABLISHED' not in line: continue chunks = line.split() local_host, local_port = chunks[1].rsplit(':', 1) remote_host, remote_port = chunks[2].rsplit(':', 1) if which_end == 'remote_port' and int(remote_port) != port: continue if which_end == 'local_port' and int(local_port) != port: continue remotes.add(remote_host) return remotes def _linux_remotes_on(port, which_end): ''' Linux specific helper function. Returns set of ip host addresses of remote established connections on local tcp port port. Parses output of shell 'lsof' to get connections $ sudo lsof -iTCP:4505 -n COMMAND PID USER FD TYPE DEVICE SIZE/OFF NODE NAME Python 9971 root 35u IPv4 0x18a8464a29ca329d 0t0 TCP *:4505 (LISTEN) Python 9971 root 37u IPv4 0x18a8464a29b2b29d 0t0 TCP 127.0.0.1:4505->127.0.0.1:55703 (ESTABLISHED) Python 10152 root 22u IPv4 0x18a8464a29c8cab5 0t0 TCP 127.0.0.1:55703->127.0.0.1:4505 (ESTABLISHED) Python 10153 root 22u IPv4 0x18a8464a29c8cab5 0t0 TCP [fe80::249a]:4505->[fe80::150]:59367 (ESTABLISHED) ''' remotes = set() try: data = subprocess.check_output( ['lsof', '-iTCP:{0:d}'.format(port), '-n', '-P'] # pylint: disable=minimum-python-version ) except subprocess.CalledProcessError as ex: if ex.returncode == 1: # Lsof return 1 if any error was detected, including the failure # to locate Internet addresses, and it is not an error in this case. log.warning('"lsof" returncode = 1, likely no active TCP sessions.') return remotes log.error('Failed "lsof" with returncode = %s', ex.returncode) raise lines = salt.utils.stringutils.to_str(data).split('\n') for line in lines: chunks = line.split() if not chunks: continue # ['Python', '9971', 'root', '37u', 'IPv4', '0x18a8464a29b2b29d', '0t0', # 'TCP', '127.0.0.1:4505->127.0.0.1:55703', '(ESTABLISHED)'] # print chunks if 'COMMAND' in chunks[0]: continue # ignore header if 'ESTABLISHED' not in chunks[-1]: continue # ignore if not ESTABLISHED # '127.0.0.1:4505->127.0.0.1:55703' local, remote = chunks[8].split('->') _, lport = local.rsplit(':', 1) rhost, rport = remote.rsplit(':', 1) if which_end == 'remote_port' and int(rport) != port: continue if which_end == 'local_port' and int(lport) != port: continue remotes.add(rhost.strip("[]")) return remotes def _aix_remotes_on(port, which_end): ''' AIX specific helper function. Returns set of ipv4 host addresses of remote established connections on local or remote tcp port. Parses output of shell 'netstat' to get connections root@la68pp002_pub:/opt/salt/lib/python2.7/site-packages/salt/modules# netstat -f inet -n Active Internet connections Proto Recv-Q Send-Q Local Address Foreign Address (state) tcp4 0 0 172.29.149.95.50093 209.41.78.13.4505 ESTABLISHED tcp4 0 0 127.0.0.1.9514 *.* LISTEN tcp4 0 0 127.0.0.1.9515 *.* LISTEN tcp4 0 0 127.0.0.1.199 127.0.0.1.32779 ESTABLISHED tcp4 0 0 127.0.0.1.32779 127.0.0.1.199 ESTABLISHED tcp4 0 40 172.29.149.95.22 172.29.96.83.41022 ESTABLISHED tcp4 0 0 172.29.149.95.22 172.29.96.83.41032 ESTABLISHED tcp4 0 0 127.0.0.1.32771 127.0.0.1.32775 ESTABLISHED tcp 0 0 127.0.0.1.32775 127.0.0.1.32771 ESTABLISHED tcp4 0 0 127.0.0.1.32771 127.0.0.1.32776 ESTABLISHED tcp 0 0 127.0.0.1.32776 127.0.0.1.32771 ESTABLISHED tcp4 0 0 127.0.0.1.32771 127.0.0.1.32777 ESTABLISHED tcp 0 0 127.0.0.1.32777 127.0.0.1.32771 ESTABLISHED tcp4 0 0 127.0.0.1.32771 127.0.0.1.32778 ESTABLISHED tcp 0 0 127.0.0.1.32778 127.0.0.1.32771 ESTABLISHED ''' remotes = set() try: data = subprocess.check_output(['netstat', '-f', 'inet', '-n']) # pylint: disable=minimum-python-version except subprocess.CalledProcessError: log.error('Failed netstat') raise lines = salt.utils.stringutils.to_str(data).split('\n') for line in lines: if 'ESTABLISHED' not in line: continue chunks = line.split() local_host, local_port = chunks[3].rsplit('.', 1) remote_host, remote_port = chunks[4].rsplit('.', 1) if which_end == 'remote_port' and int(remote_port) != port: continue if which_end == 'local_port' and int(local_port) != port: continue remotes.add(remote_host) return remotes @jinja_filter('gen_mac') def gen_mac(prefix='AC:DE:48'): ''' Generates a MAC address with the defined OUI prefix. Common prefixes: - ``00:16:3E`` -- Xen - ``00:18:51`` -- OpenVZ - ``00:50:56`` -- VMware (manually generated) - ``52:54:00`` -- QEMU/KVM - ``AC:DE:48`` -- PRIVATE References: - http://standards.ieee.org/develop/regauth/oui/oui.txt - https://www.wireshark.org/tools/oui-lookup.html - https://en.wikipedia.org/wiki/MAC_address ''' return '{0}:{1:02X}:{2:02X}:{3:02X}'.format(prefix, random.randint(0, 0xff), random.randint(0, 0xff), random.randint(0, 0xff)) @jinja_filter('mac_str_to_bytes') def mac_str_to_bytes(mac_str): ''' Convert a MAC address string into bytes. Works with or without separators: b1 = mac_str_to_bytes('08:00:27:13:69:77') b2 = mac_str_to_bytes('080027136977') assert b1 == b2 assert isinstance(b1, bytes) ''' if len(mac_str) == 12: pass elif len(mac_str) == 17: sep = mac_str[2] mac_str = mac_str.replace(sep, '') else: raise ValueError('Invalid MAC address') chars = (int(mac_str[s:s+2], 16) for s in range(0, 12, 2)) return bytes(chars) if six.PY3 else b''.join(chr(x) for x in chars) def refresh_dns(): ''' issue #21397: force glibc to re-read resolv.conf ''' try: res_init() except NameError: # Exception raised loading the library, thus res_init is not defined pass @jinja_filter('connection_check') def connection_check(addr, port=80, safe=False, ipv6=None): ''' Provides a convenient alias for the dns_check filter. ''' return dns_check(addr, port, safe, ipv6) @jinja_filter('dns_check') def dns_check(addr, port=80, safe=False, ipv6=None, attempt_connect=True): ''' Return the ip resolved by dns, but do not exit on failure, only raise an exception. Obeys system preference for IPv4/6 address resolution - this can be overridden by the ipv6 flag. Tries to connect to the address before considering it useful. If no address can be reached, the first one resolved is used as a fallback. ''' error = False lookup = addr seen_ipv6 = False family = socket.AF_INET6 if ipv6 else socket.AF_INET if ipv6 is False else socket.AF_UNSPEC hostnames = [] try: refresh_dns() hostnames = socket.getaddrinfo(addr, port, family, socket.SOCK_STREAM) except TypeError: err = ('Attempt to resolve address \'{0}\' failed. Invalid or unresolveable address').format(lookup) raise SaltSystemExit(code=42, msg=err) except socket.error: error = True # If ipv6 is set to True, attempt another lookup using the IPv4 family, # just in case we're attempting to lookup an IPv4 IP # as an IPv6 hostname. if error and ipv6: try: refresh_dns() hostnames = socket.getaddrinfo(addr, port, socket.AF_INET, socket.SOCK_STREAM) except TypeError: err = ('Attempt to resolve address \'{0}\' failed. Invalid or unresolveable address').format(lookup) raise SaltSystemExit(code=42, msg=err) except socket.error: error = True try: if not hostnames: error = True else: resolved = False candidates = [] for h in hostnames: # Input is IP address, passed through unchanged, just return it if h[4][0] == addr: resolved = salt.utils.zeromq.ip_bracket(addr) break candidate_addr = salt.utils.zeromq.ip_bracket(h[4][0]) # sometimes /etc/hosts contains ::1 localhost if not ipv6 and candidate_addr == '[::1]': continue candidates.append(candidate_addr) if attempt_connect: try: s = socket.socket(h[0], socket.SOCK_STREAM) s.settimeout(2) s.connect((candidate_addr.strip('[]'), h[4][1])) s.close() resolved = candidate_addr break except socket.error: pass if not resolved: if candidates: resolved = candidates[0] else: error = True except TypeError: err = ('Attempt to resolve address \'{0}\' failed. Invalid or unresolveable address').format(lookup) raise SaltSystemExit(code=42, msg=err) except socket.error: error = True if error: err = ('DNS lookup or connection check of \'{0}\' failed.').format(addr) if safe: if salt.log.is_console_configured(): # If logging is not configured it also means that either # the master or minion instance calling this hasn't even # started running log.error(err) raise SaltClientError() raise SaltSystemExit(code=42, msg=err) return resolved def parse_host_port(host_port): """ Takes a string argument specifying host or host:port. Returns a (hostname, port) or (ip_address, port) tuple. If no port is given, the second (port) element of the returned tuple will be None. host:port argument, for example, is accepted in the forms of: - hostname - hostname:1234 - hostname.domain.tld - hostname.domain.tld:5678 - [1234::5]:5678 - 1234::5 - 10.11.12.13:4567 - 10.11.12.13 """ host, port = None, None # default _s_ = host_port[:] if _s_[0] == "[": if "]" in host_port: host, _s_ = _s_.lstrip("[").rsplit("]", 1) host = ipaddress.IPv6Address(host).compressed if _s_[0] == ":": port = int(_s_.lstrip(":")) else: if len(_s_) > 1: raise ValueError('found ambiguous "{}" port in "{}"'.format(_s_, host_port)) else: if _s_.count(":") == 1: host, _hostport_separator_, port = _s_.partition(":") try: port = int(port) except ValueError as _e_: log.error('host_port "%s" port value "%s" is not an integer.', host_port, port) raise _e_ else: host = _s_ try: if not isinstance(host, ipaddress._BaseAddress): host_ip = ipaddress.ip_address(host).compressed host = host_ip except ValueError: log.debug('"%s" Not an IP address? Assuming it is a hostname.', host) if host != sanitize_host(host): log.error('bad hostname: "%s"', host) raise ValueError('bad hostname: "{}"'.format(host)) return host, port def is_fqdn(hostname): """ Verify if hostname conforms to be a FQDN. :param hostname: text string with the name of the host :return: bool, True if hostname is correct FQDN, False otherwise """ compliant = re.compile(r"(?!-)[A-Z\d\-\_]{1,63}(?<!-)$", re.IGNORECASE) return "." in hostname and len(hostname) < 0xff and all(compliant.match(x) for x in hostname.rstrip(".").split("."))
saltstack/salt
salt/utils/network.py
ip_host
python
def ip_host(value, options=None, version=None): ''' Returns the interfaces IP address, e.g.: 192.168.0.1/28. ''' ipaddr_filter_out = _filter_ipaddr(value, options=options, version=version) if not ipaddr_filter_out: return if not isinstance(value, (list, tuple, types.GeneratorType)): return six.text_type(ipaddress.ip_interface(ipaddr_filter_out[0])) return [six.text_type(ipaddress.ip_interface(ip_a)) for ip_a in ipaddr_filter_out]
Returns the interfaces IP address, e.g.: 192.168.0.1/28.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/network.py#L516-L525
[ "def _filter_ipaddr(value, options, version=None):\n ipaddr_filter_out = None\n if version:\n if version == 4:\n ipaddr_filter_out = ipv4(value, options)\n elif version == 6:\n ipaddr_filter_out = ipv6(value, options)\n else:\n ipaddr_filter_out = ipaddr(value, options)\n if not ipaddr_filter_out:\n return\n if not isinstance(ipaddr_filter_out, (list, tuple, types.GeneratorType)):\n ipaddr_filter_out = [ipaddr_filter_out]\n return ipaddr_filter_out\n" ]
# -*- coding: utf-8 -*- ''' Define some generic socket functions for network modules ''' # Import python libs from __future__ import absolute_import, unicode_literals, print_function import itertools import os import re import types import socket import logging import platform import random import subprocess from string import ascii_letters, digits # Import 3rd-party libs from salt.ext import six from salt.ext.six.moves import range # pylint: disable=import-error,redefined-builtin # Attempt to import wmi try: import wmi import salt.utils.winapi except ImportError: pass # Import salt libs import salt.utils.args import salt.utils.files import salt.utils.path import salt.utils.platform import salt.utils.stringutils import salt.utils.zeromq from salt._compat import ipaddress from salt.exceptions import SaltClientError, SaltSystemExit from salt.utils.decorators.jinja import jinja_filter from salt.utils.versions import LooseVersion # inet_pton does not exist in Windows, this is a workaround if salt.utils.platform.is_windows(): from salt.ext import win_inet_pton # pylint: disable=unused-import log = logging.getLogger(__name__) try: import ctypes import ctypes.util libc = ctypes.cdll.LoadLibrary(ctypes.util.find_library("c")) res_init = libc.__res_init except (ImportError, OSError, AttributeError, TypeError): pass # pylint: disable=C0103 def sanitize_host(host): ''' Sanitize host string. https://tools.ietf.org/html/rfc1123#section-2.1 ''' RFC952_characters = ascii_letters + digits + ".-" return "".join([c for c in host[0:255] if c in RFC952_characters]) def isportopen(host, port): ''' Return status of a port ''' if not 1 <= int(port) <= 65535: return False sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) out = sock.connect_ex((sanitize_host(host), int(port))) return out def host_to_ips(host): ''' Returns a list of IP addresses of a given hostname or None if not found. ''' ips = [] try: for family, socktype, proto, canonname, sockaddr in socket.getaddrinfo( host, 0, socket.AF_UNSPEC, socket.SOCK_STREAM): if family == socket.AF_INET: ip, port = sockaddr elif family == socket.AF_INET6: ip, port, flow_info, scope_id = sockaddr ips.append(ip) if not ips: ips = None except Exception: ips = None return ips def _generate_minion_id(): ''' Get list of possible host names and convention names. :return: ''' # There are three types of hostnames: # 1. Network names. How host is accessed from the network. # 2. Host aliases. They might be not available in all the network or only locally (/etc/hosts) # 3. Convention names, an internal nodename. class DistinctList(list): ''' List, which allows one to append only distinct objects. Needs to work on Python 2.6, because of collections.OrderedDict only since 2.7 version. Override 'filter()' for custom filtering. ''' localhost_matchers = [r'localhost.*', r'ip6-.*', r'127[.]\d', r'0\.0\.0\.0', r'::1.*', r'ipv6-.*', r'fe00::.*', r'fe02::.*', r'1.0.0.*.ip6.arpa'] def append(self, p_object): if p_object and p_object not in self and not self.filter(p_object): super(DistinctList, self).append(p_object) return self def extend(self, iterable): for obj in iterable: self.append(obj) return self def filter(self, element): 'Returns True if element needs to be filtered' for rgx in self.localhost_matchers: if re.match(rgx, element): return True def first(self): return self and self[0] or None hostname = socket.gethostname() hosts = DistinctList().append( salt.utils.stringutils.to_unicode(socket.getfqdn(salt.utils.stringutils.to_bytes(hostname))) ).append(platform.node()).append(hostname) if not hosts: try: for a_nfo in socket.getaddrinfo(hosts.first() or 'localhost', None, socket.AF_INET, socket.SOCK_RAW, socket.IPPROTO_IP, socket.AI_CANONNAME): if len(a_nfo) > 3: hosts.append(a_nfo[3]) except socket.gaierror: log.warning('Cannot resolve address %s info via socket: %s', hosts.first() or 'localhost (N/A)', socket.gaierror) # Universal method for everywhere (Linux, Slowlaris, Windows etc) for f_name in ('/etc/hostname', '/etc/nodename', '/etc/hosts', r'{win}\system32\drivers\etc\hosts'.format(win=os.getenv('WINDIR'))): try: with salt.utils.files.fopen(f_name) as f_hdl: for line in f_hdl: line = salt.utils.stringutils.to_unicode(line) hst = line.strip().split('#')[0].strip().split() if hst: if hst[0][:4] in ('127.', '::1') or len(hst) == 1: hosts.extend(hst) except IOError: pass # include public and private ipaddresses return hosts.extend([addr for addr in ip_addrs() if not ipaddress.ip_address(addr).is_loopback]) def generate_minion_id(): ''' Return only first element of the hostname from all possible list. :return: ''' try: ret = salt.utils.stringutils.to_unicode(_generate_minion_id().first()) except TypeError: ret = None return ret or 'localhost' def get_socket(addr, type=socket.SOCK_STREAM, proto=0): ''' Return a socket object for the addr IP-version agnostic ''' version = ipaddress.ip_address(addr).version if version == 4: family = socket.AF_INET elif version == 6: family = socket.AF_INET6 return socket.socket(family, type, proto) def get_fqhostname(): ''' Returns the fully qualified hostname ''' # try getaddrinfo() fqdn = None try: addrinfo = socket.getaddrinfo( socket.gethostname(), 0, socket.AF_UNSPEC, socket.SOCK_STREAM, socket.SOL_TCP, socket.AI_CANONNAME ) for info in addrinfo: # info struct [family, socktype, proto, canonname, sockaddr] # On Windows `canonname` can be an empty string # This can cause the function to return `None` if len(info) > 3 and info[3]: fqdn = info[3] break except socket.gaierror: pass # NOTE: this used to log.error() but it was later disabled except socket.error as err: log.debug('socket.getaddrinfo() failure while finding fqdn: %s', err) if fqdn is None: fqdn = socket.getfqdn() return fqdn def ip_to_host(ip): ''' Returns the hostname of a given IP ''' try: hostname, aliaslist, ipaddrlist = socket.gethostbyaddr(ip) except Exception as exc: log.debug('salt.utils.network.ip_to_host(%r) failed: %s', ip, exc) hostname = None return hostname # pylint: enable=C0103 def is_reachable_host(entity_name): ''' Returns a bool telling if the entity name is a reachable host (IPv4/IPv6/FQDN/etc). :param hostname: :return: ''' try: assert type(socket.getaddrinfo(entity_name, 0, 0, 0, 0)) == list ret = True except socket.gaierror: ret = False return ret def is_ip(ip): ''' Returns a bool telling if the passed IP is a valid IPv4 or IPv6 address. ''' return is_ipv4(ip) or is_ipv6(ip) def is_ipv4(ip): ''' Returns a bool telling if the value passed to it was a valid IPv4 address ''' try: return ipaddress.ip_address(ip).version == 4 except ValueError: return False def is_ipv6(ip): ''' Returns a bool telling if the value passed to it was a valid IPv6 address ''' try: return ipaddress.ip_address(ip).version == 6 except ValueError: return False def is_subnet(cidr): ''' Returns a bool telling if the passed string is an IPv4 or IPv6 subnet ''' return is_ipv4_subnet(cidr) or is_ipv6_subnet(cidr) def is_ipv4_subnet(cidr): ''' Returns a bool telling if the passed string is an IPv4 subnet ''' try: return '/' in cidr and bool(ipaddress.IPv4Network(cidr)) except Exception: return False def is_ipv6_subnet(cidr): ''' Returns a bool telling if the passed string is an IPv6 subnet ''' try: return '/' in cidr and bool(ipaddress.IPv6Network(cidr)) except Exception: return False @jinja_filter('is_ip') def is_ip_filter(ip, options=None): ''' Returns a bool telling if the passed IP is a valid IPv4 or IPv6 address. ''' return is_ipv4_filter(ip, options=options) or is_ipv6_filter(ip, options=options) def _ip_options_global(ip_obj, version): return not ip_obj.is_private def _ip_options_multicast(ip_obj, version): return ip_obj.is_multicast def _ip_options_loopback(ip_obj, version): return ip_obj.is_loopback def _ip_options_link_local(ip_obj, version): return ip_obj.is_link_local def _ip_options_private(ip_obj, version): return ip_obj.is_private def _ip_options_reserved(ip_obj, version): return ip_obj.is_reserved def _ip_options_site_local(ip_obj, version): if version == 6: return ip_obj.is_site_local return False def _ip_options_unspecified(ip_obj, version): return ip_obj.is_unspecified def _ip_options(ip_obj, version, options=None): # will process and IP options options_fun_map = { 'global': _ip_options_global, 'link-local': _ip_options_link_local, 'linklocal': _ip_options_link_local, 'll': _ip_options_link_local, 'link_local': _ip_options_link_local, 'loopback': _ip_options_loopback, 'lo': _ip_options_loopback, 'multicast': _ip_options_multicast, 'private': _ip_options_private, 'public': _ip_options_global, 'reserved': _ip_options_reserved, 'site-local': _ip_options_site_local, 'sl': _ip_options_site_local, 'site_local': _ip_options_site_local, 'unspecified': _ip_options_unspecified } if not options: return six.text_type(ip_obj) # IP version already checked options_list = [option.strip() for option in options.split(',')] for option, fun in options_fun_map.items(): if option in options_list: fun_res = fun(ip_obj, version) if not fun_res: return None # stop at first failed test # else continue return six.text_type(ip_obj) def _is_ipv(ip, version, options=None): if not version: version = 4 if version not in (4, 6): return None try: ip_obj = ipaddress.ip_address(ip) except ValueError: # maybe it is an IP network try: ip_obj = ipaddress.ip_interface(ip) except ValueError: # nope, still not :( return None if not ip_obj.version == version: return None # has the right version, let's move on return _ip_options(ip_obj, version, options=options) @jinja_filter('is_ipv4') def is_ipv4_filter(ip, options=None): ''' Returns a bool telling if the value passed to it was a valid IPv4 address. ip The IP address. net: False Consider IP addresses followed by netmask. options CSV of options regarding the nature of the IP address. E.g.: loopback, multicast, private etc. ''' _is_ipv4 = _is_ipv(ip, 4, options=options) return isinstance(_is_ipv4, six.string_types) @jinja_filter('is_ipv6') def is_ipv6_filter(ip, options=None): ''' Returns a bool telling if the value passed to it was a valid IPv6 address. ip The IP address. net: False Consider IP addresses followed by netmask. options CSV of options regarding the nature of the IP address. E.g.: loopback, multicast, private etc. ''' _is_ipv6 = _is_ipv(ip, 6, options=options) return isinstance(_is_ipv6, six.string_types) def _ipv_filter(value, version, options=None): if version not in (4, 6): return if isinstance(value, (six.string_types, six.text_type, six.binary_type)): return _is_ipv(value, version, options=options) # calls is_ipv4 or is_ipv6 for `value` elif isinstance(value, (list, tuple, types.GeneratorType)): # calls is_ipv4 or is_ipv6 for each element in the list # os it filters and returns only those elements having the desired IP version return [ _is_ipv(addr, version, options=options) for addr in value if _is_ipv(addr, version, options=options) is not None ] return None @jinja_filter('ipv4') def ipv4(value, options=None): ''' Filters a list and returns IPv4 values only. ''' return _ipv_filter(value, 4, options=options) @jinja_filter('ipv6') def ipv6(value, options=None): ''' Filters a list and returns IPv6 values only. ''' return _ipv_filter(value, 6, options=options) @jinja_filter('ipaddr') def ipaddr(value, options=None): ''' Filters and returns only valid IP objects. ''' ipv4_obj = ipv4(value, options=options) ipv6_obj = ipv6(value, options=options) if ipv4_obj is None or ipv6_obj is None: # an IP address can be either IPv4 either IPv6 # therefofe if the value passed as arg is not a list, at least one of the calls above will return None # if one of them is none, means that we should return only one of them return ipv4_obj or ipv6_obj # one of them else: return ipv4_obj + ipv6_obj # extend lists def _filter_ipaddr(value, options, version=None): ipaddr_filter_out = None if version: if version == 4: ipaddr_filter_out = ipv4(value, options) elif version == 6: ipaddr_filter_out = ipv6(value, options) else: ipaddr_filter_out = ipaddr(value, options) if not ipaddr_filter_out: return if not isinstance(ipaddr_filter_out, (list, tuple, types.GeneratorType)): ipaddr_filter_out = [ipaddr_filter_out] return ipaddr_filter_out @jinja_filter('ip_host') def _network_hosts(ip_addr_entry): return [ six.text_type(host) for host in ipaddress.ip_network(ip_addr_entry, strict=False).hosts() ] @jinja_filter('network_hosts') def network_hosts(value, options=None, version=None): ''' Return the list of hosts within a network. .. note:: When running this command with a large IPv6 network, the command will take a long time to gather all of the hosts. ''' ipaddr_filter_out = _filter_ipaddr(value, options=options, version=version) if not ipaddr_filter_out: return if not isinstance(value, (list, tuple, types.GeneratorType)): return _network_hosts(ipaddr_filter_out[0]) return [ _network_hosts(ip_a) for ip_a in ipaddr_filter_out ] def _network_size(ip_addr_entry): return ipaddress.ip_network(ip_addr_entry, strict=False).num_addresses @jinja_filter('network_size') def network_size(value, options=None, version=None): ''' Get the size of a network. ''' ipaddr_filter_out = _filter_ipaddr(value, options=options, version=version) if not ipaddr_filter_out: return if not isinstance(value, (list, tuple, types.GeneratorType)): return _network_size(ipaddr_filter_out[0]) return [ _network_size(ip_a) for ip_a in ipaddr_filter_out ] def natural_ipv4_netmask(ip, fmt='prefixlen'): ''' Returns the "natural" mask of an IPv4 address ''' bits = _ipv4_to_bits(ip) if bits.startswith('11'): mask = '24' elif bits.startswith('1'): mask = '16' else: mask = '8' if fmt == 'netmask': return cidr_to_ipv4_netmask(mask) else: return '/' + mask def rpad_ipv4_network(ip): ''' Returns an IP network address padded with zeros. Ex: '192.168.3' -> '192.168.3.0' '10.209' -> '10.209.0.0' ''' return '.'.join(itertools.islice(itertools.chain(ip.split('.'), '0000'), 0, 4)) def cidr_to_ipv4_netmask(cidr_bits): ''' Returns an IPv4 netmask ''' try: cidr_bits = int(cidr_bits) if not 1 <= cidr_bits <= 32: return '' except ValueError: return '' netmask = '' for idx in range(4): if idx: netmask += '.' if cidr_bits >= 8: netmask += '255' cidr_bits -= 8 else: netmask += '{0:d}'.format(256 - (2 ** (8 - cidr_bits))) cidr_bits = 0 return netmask def _number_of_set_bits_to_ipv4_netmask(set_bits): # pylint: disable=C0103 ''' Returns an IPv4 netmask from the integer representation of that mask. Ex. 0xffffff00 -> '255.255.255.0' ''' return cidr_to_ipv4_netmask(_number_of_set_bits(set_bits)) # pylint: disable=C0103 def _number_of_set_bits(x): ''' Returns the number of bits that are set in a 32bit int ''' # Taken from http://stackoverflow.com/a/4912729. Many thanks! x -= (x >> 1) & 0x55555555 x = ((x >> 2) & 0x33333333) + (x & 0x33333333) x = ((x >> 4) + x) & 0x0f0f0f0f x += x >> 8 x += x >> 16 return x & 0x0000003f # pylint: enable=C0103 def _interfaces_ip(out): ''' Uses ip to return a dictionary of interfaces with various information about each (up/down state, ip address, netmask, and hwaddr) ''' ret = dict() def parse_network(value, cols): ''' Return a tuple of ip, netmask, broadcast based on the current set of cols ''' brd = None scope = None if '/' in value: # we have a CIDR in this address ip, cidr = value.split('/') # pylint: disable=C0103 else: ip = value # pylint: disable=C0103 cidr = 32 if type_ == 'inet': mask = cidr_to_ipv4_netmask(int(cidr)) if 'brd' in cols: brd = cols[cols.index('brd') + 1] elif type_ == 'inet6': mask = cidr if 'scope' in cols: scope = cols[cols.index('scope') + 1] return (ip, mask, brd, scope) groups = re.compile('\r?\n\\d').split(out) for group in groups: iface = None data = dict() for line in group.splitlines(): if ' ' not in line: continue match = re.match(r'^\d*:\s+([\w.\-]+)(?:@)?([\w.\-]+)?:\s+<(.+)>', line) if match: iface, parent, attrs = match.groups() if 'UP' in attrs.split(','): data['up'] = True else: data['up'] = False if parent: data['parent'] = parent continue cols = line.split() if len(cols) >= 2: type_, value = tuple(cols[0:2]) iflabel = cols[-1:][0] if type_ in ('inet', 'inet6'): if 'secondary' not in cols: ipaddr, netmask, broadcast, scope = parse_network(value, cols) if type_ == 'inet': if 'inet' not in data: data['inet'] = list() addr_obj = dict() addr_obj['address'] = ipaddr addr_obj['netmask'] = netmask addr_obj['broadcast'] = broadcast addr_obj['label'] = iflabel data['inet'].append(addr_obj) elif type_ == 'inet6': if 'inet6' not in data: data['inet6'] = list() addr_obj = dict() addr_obj['address'] = ipaddr addr_obj['prefixlen'] = netmask addr_obj['scope'] = scope data['inet6'].append(addr_obj) else: if 'secondary' not in data: data['secondary'] = list() ip_, mask, brd, scp = parse_network(value, cols) data['secondary'].append({ 'type': type_, 'address': ip_, 'netmask': mask, 'broadcast': brd, 'label': iflabel, }) del ip_, mask, brd, scp elif type_.startswith('link'): data['hwaddr'] = value if iface: ret[iface] = data del iface, data return ret def _interfaces_ifconfig(out): ''' Uses ifconfig to return a dictionary of interfaces with various information about each (up/down state, ip address, netmask, and hwaddr) ''' ret = dict() piface = re.compile(r'^([^\s:]+)') pmac = re.compile('.*?(?:HWaddr|ether|address:|lladdr) ([0-9a-fA-F:]+)') if salt.utils.platform.is_sunos(): pip = re.compile(r'.*?(?:inet\s+)([0-9]+\.[0-9]+\.[0-9]+\.[0-9]+)(.*)') pip6 = re.compile('.*?(?:inet6 )([0-9a-fA-F:]+)') pmask6 = re.compile(r'.*?(?:inet6 [0-9a-fA-F:]+/(\d+)).*') else: pip = re.compile(r'.*?(?:inet addr:|inet [^\d]*)(.*?)\s') pip6 = re.compile('.*?(?:inet6 addr: (.*?)/|inet6 )([0-9a-fA-F:]+)') pmask6 = re.compile(r'.*?(?:inet6 addr: [0-9a-fA-F:]+/(\d+)|prefixlen (\d+))(?: Scope:([a-zA-Z]+)| scopeid (0x[0-9a-fA-F]))?') pmask = re.compile(r'.*?(?:Mask:|netmask )(?:((?:0x)?[0-9a-fA-F]{8})|([\d\.]+))') pupdown = re.compile('UP') pbcast = re.compile(r'.*?(?:Bcast:|broadcast )([\d\.]+)') groups = re.compile('\r?\n(?=\\S)').split(out) for group in groups: data = dict() iface = '' updown = False for line in group.splitlines(): miface = piface.match(line) mmac = pmac.match(line) mip = pip.match(line) mip6 = pip6.match(line) mupdown = pupdown.search(line) if miface: iface = miface.group(1) if mmac: data['hwaddr'] = mmac.group(1) if salt.utils.platform.is_sunos(): expand_mac = [] for chunk in data['hwaddr'].split(':'): expand_mac.append('0{0}'.format(chunk) if len(chunk) < 2 else '{0}'.format(chunk)) data['hwaddr'] = ':'.join(expand_mac) if mip: if 'inet' not in data: data['inet'] = list() addr_obj = dict() addr_obj['address'] = mip.group(1) mmask = pmask.match(line) if mmask: if mmask.group(1): mmask = _number_of_set_bits_to_ipv4_netmask( int(mmask.group(1), 16)) else: mmask = mmask.group(2) addr_obj['netmask'] = mmask mbcast = pbcast.match(line) if mbcast: addr_obj['broadcast'] = mbcast.group(1) data['inet'].append(addr_obj) if mupdown: updown = True if mip6: if 'inet6' not in data: data['inet6'] = list() addr_obj = dict() addr_obj['address'] = mip6.group(1) or mip6.group(2) mmask6 = pmask6.match(line) if mmask6: addr_obj['prefixlen'] = mmask6.group(1) or mmask6.group(2) if not salt.utils.platform.is_sunos(): ipv6scope = mmask6.group(3) or mmask6.group(4) addr_obj['scope'] = ipv6scope.lower() if ipv6scope is not None else ipv6scope # SunOS sometimes has ::/0 as inet6 addr when using addrconf if not salt.utils.platform.is_sunos() \ or addr_obj['address'] != '::' \ and addr_obj['prefixlen'] != 0: data['inet6'].append(addr_obj) data['up'] = updown if iface in ret: # SunOS optimization, where interfaces occur twice in 'ifconfig -a' # output with the same name: for ipv4 and then for ipv6 addr family. # Every instance has it's own 'UP' status and we assume that ipv4 # status determines global interface status. # # merge items with higher priority for older values # after that merge the inet and inet6 sub items for both ret[iface] = dict(list(data.items()) + list(ret[iface].items())) if 'inet' in data: ret[iface]['inet'].extend(x for x in data['inet'] if x not in ret[iface]['inet']) if 'inet6' in data: ret[iface]['inet6'].extend(x for x in data['inet6'] if x not in ret[iface]['inet6']) else: ret[iface] = data del data return ret def linux_interfaces(): ''' Obtain interface information for *NIX/BSD variants ''' ifaces = dict() ip_path = salt.utils.path.which('ip') ifconfig_path = None if ip_path else salt.utils.path.which('ifconfig') if ip_path: cmd1 = subprocess.Popen( '{0} link show'.format(ip_path), shell=True, close_fds=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate()[0] cmd2 = subprocess.Popen( '{0} addr show'.format(ip_path), shell=True, close_fds=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate()[0] ifaces = _interfaces_ip("{0}\n{1}".format( salt.utils.stringutils.to_str(cmd1), salt.utils.stringutils.to_str(cmd2))) elif ifconfig_path: cmd = subprocess.Popen( '{0} -a'.format(ifconfig_path), shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate()[0] ifaces = _interfaces_ifconfig(salt.utils.stringutils.to_str(cmd)) return ifaces def _netbsd_interfaces_ifconfig(out): ''' Uses ifconfig to return a dictionary of interfaces with various information about each (up/down state, ip address, netmask, and hwaddr) ''' ret = dict() piface = re.compile(r'^([^\s:]+)') pmac = re.compile('.*?address: ([0-9a-f:]+)') pip = re.compile(r'.*?inet [^\d]*(.*?)/([\d]*)\s') pip6 = re.compile(r'.*?inet6 ([0-9a-f:]+)%([a-zA-Z0-9]*)/([\d]*)\s') pupdown = re.compile('UP') pbcast = re.compile(r'.*?broadcast ([\d\.]+)') groups = re.compile('\r?\n(?=\\S)').split(out) for group in groups: data = dict() iface = '' updown = False for line in group.splitlines(): miface = piface.match(line) mmac = pmac.match(line) mip = pip.match(line) mip6 = pip6.match(line) mupdown = pupdown.search(line) if miface: iface = miface.group(1) if mmac: data['hwaddr'] = mmac.group(1) if mip: if 'inet' not in data: data['inet'] = list() addr_obj = dict() addr_obj['address'] = mip.group(1) mmask = mip.group(2) if mip.group(2): addr_obj['netmask'] = cidr_to_ipv4_netmask(mip.group(2)) mbcast = pbcast.match(line) if mbcast: addr_obj['broadcast'] = mbcast.group(1) data['inet'].append(addr_obj) if mupdown: updown = True if mip6: if 'inet6' not in data: data['inet6'] = list() addr_obj = dict() addr_obj['address'] = mip6.group(1) mmask6 = mip6.group(3) addr_obj['scope'] = mip6.group(2) addr_obj['prefixlen'] = mip6.group(3) data['inet6'].append(addr_obj) data['up'] = updown ret[iface] = data del data return ret def netbsd_interfaces(): ''' Obtain interface information for NetBSD >= 8 where the ifconfig output diverged from other BSD variants (Netmask is now part of the address) ''' # NetBSD versions prior to 8.0 can still use linux_interfaces() if LooseVersion(os.uname()[2]) < LooseVersion('8.0'): return linux_interfaces() ifconfig_path = salt.utils.path.which('ifconfig') cmd = subprocess.Popen( '{0} -a'.format(ifconfig_path), shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate()[0] return _netbsd_interfaces_ifconfig(salt.utils.stringutils.to_str(cmd)) def _interfaces_ipconfig(out): ''' Returns a dictionary of interfaces with various information about each (up/down state, ip address, netmask, and hwaddr) NOTE: This is not used by any function and may be able to be removed in the future. ''' ifaces = dict() iface = None adapter_iface_regex = re.compile(r'adapter (\S.+):$') for line in out.splitlines(): if not line: continue # TODO what does Windows call Infiniband and 10/40gige adapters if line.startswith('Ethernet'): iface = ifaces[adapter_iface_regex.search(line).group(1)] iface['up'] = True addr = None continue if iface: key, val = line.split(',', 1) key = key.strip(' .') val = val.strip() if addr and key == 'Subnet Mask': addr['netmask'] = val elif key in ('IP Address', 'IPv4 Address'): if 'inet' not in iface: iface['inet'] = list() addr = {'address': val.rstrip('(Preferred)'), 'netmask': None, 'broadcast': None} # TODO find the broadcast iface['inet'].append(addr) elif 'IPv6 Address' in key: if 'inet6' not in iface: iface['inet'] = list() # XXX What is the prefixlen!? addr = {'address': val.rstrip('(Preferred)'), 'prefixlen': None} iface['inet6'].append(addr) elif key == 'Physical Address': iface['hwaddr'] = val elif key == 'Media State': # XXX seen used for tunnel adaptors # might be useful iface['up'] = (val != 'Media disconnected') def win_interfaces(): ''' Obtain interface information for Windows systems ''' with salt.utils.winapi.Com(): c = wmi.WMI() ifaces = {} for iface in c.Win32_NetworkAdapterConfiguration(IPEnabled=1): ifaces[iface.Description] = dict() if iface.MACAddress: ifaces[iface.Description]['hwaddr'] = iface.MACAddress if iface.IPEnabled: ifaces[iface.Description]['up'] = True for ip in iface.IPAddress: if '.' in ip: if 'inet' not in ifaces[iface.Description]: ifaces[iface.Description]['inet'] = [] item = {'address': ip, 'label': iface.Description} if iface.DefaultIPGateway: broadcast = next((i for i in iface.DefaultIPGateway if '.' in i), '') if broadcast: item['broadcast'] = broadcast if iface.IPSubnet: netmask = next((i for i in iface.IPSubnet if '.' in i), '') if netmask: item['netmask'] = netmask ifaces[iface.Description]['inet'].append(item) if ':' in ip: if 'inet6' not in ifaces[iface.Description]: ifaces[iface.Description]['inet6'] = [] item = {'address': ip} if iface.DefaultIPGateway: broadcast = next((i for i in iface.DefaultIPGateway if ':' in i), '') if broadcast: item['broadcast'] = broadcast if iface.IPSubnet: netmask = next((i for i in iface.IPSubnet if ':' in i), '') if netmask: item['netmask'] = netmask ifaces[iface.Description]['inet6'].append(item) else: ifaces[iface.Description]['up'] = False return ifaces def interfaces(): ''' Return a dictionary of information about all the interfaces on the minion ''' if salt.utils.platform.is_windows(): return win_interfaces() elif salt.utils.platform.is_netbsd(): return netbsd_interfaces() else: return linux_interfaces() def get_net_start(ipaddr, netmask): ''' Return the address of the network ''' net = ipaddress.ip_network('{0}/{1}'.format(ipaddr, netmask), strict=False) return six.text_type(net.network_address) def get_net_size(mask): ''' Turns an IPv4 netmask into it's corresponding prefix length (255.255.255.0 -> 24 as in 192.168.1.10/24). ''' binary_str = '' for octet in mask.split('.'): binary_str += bin(int(octet))[2:].zfill(8) return len(binary_str.rstrip('0')) def calc_net(ipaddr, netmask=None): ''' Takes IP (CIDR notation supported) and optionally netmask and returns the network in CIDR-notation. (The IP can be any IP inside the subnet) ''' if netmask is not None: ipaddr = '{0}/{1}'.format(ipaddr, netmask) return six.text_type(ipaddress.ip_network(ipaddr, strict=False)) def _ipv4_to_bits(ipaddr): ''' Accepts an IPv4 dotted quad and returns a string representing its binary counterpart ''' return ''.join([bin(int(x))[2:].rjust(8, '0') for x in ipaddr.split('.')]) def _get_iface_info(iface): ''' If `iface` is available, return interface info and no error, otherwise return no info and log and return an error ''' iface_info = interfaces() if iface in iface_info.keys(): return iface_info, False else: error_msg = ('Interface "{0}" not in available interfaces: "{1}"' ''.format(iface, '", "'.join(iface_info.keys()))) log.error(error_msg) return None, error_msg def _hw_addr_aix(iface): ''' Return the hardware address (a.k.a. MAC address) for a given interface on AIX MAC address not available in through interfaces ''' cmd = subprocess.Popen( 'entstat -d {0} | grep \'Hardware Address\''.format(iface), shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate()[0] if cmd: comps = cmd.split(' ') if len(comps) == 3: mac_addr = comps[2].strip('\'').strip() return mac_addr error_msg = ('Interface "{0}" either not available or does not contain a hardware address'.format(iface)) log.error(error_msg) return error_msg def hw_addr(iface): ''' Return the hardware address (a.k.a. MAC address) for a given interface .. versionchanged:: 2016.11.4 Added support for AIX ''' if salt.utils.platform.is_aix(): return _hw_addr_aix iface_info, error = _get_iface_info(iface) if error is False: return iface_info.get(iface, {}).get('hwaddr', '') else: return error def interface(iface): ''' Return the details of `iface` or an error if it does not exist ''' iface_info, error = _get_iface_info(iface) if error is False: return iface_info.get(iface, {}).get('inet', '') else: return error def interface_ip(iface): ''' Return `iface` IPv4 addr or an error if `iface` does not exist ''' iface_info, error = _get_iface_info(iface) if error is False: inet = iface_info.get(iface, {}).get('inet', None) return inet[0].get('address', '') if inet else '' else: return error def _subnets(proto='inet', interfaces_=None): ''' Returns a list of subnets to which the host belongs ''' if interfaces_ is None: ifaces = interfaces() elif isinstance(interfaces_, list): ifaces = {} for key, value in six.iteritems(interfaces()): if key in interfaces_: ifaces[key] = value else: ifaces = {interfaces_: interfaces().get(interfaces_, {})} ret = set() if proto == 'inet': subnet = 'netmask' dflt_cidr = 32 elif proto == 'inet6': subnet = 'prefixlen' dflt_cidr = 128 else: log.error('Invalid proto %s calling subnets()', proto) return for ip_info in six.itervalues(ifaces): addrs = ip_info.get(proto, []) addrs.extend([addr for addr in ip_info.get('secondary', []) if addr.get('type') == proto]) for intf in addrs: if subnet in intf: intf = ipaddress.ip_interface('{0}/{1}'.format(intf['address'], intf[subnet])) else: intf = ipaddress.ip_interface('{0}/{1}'.format(intf['address'], dflt_cidr)) if not intf.is_loopback: ret.add(intf.network) return [six.text_type(net) for net in sorted(ret)] def subnets(interfaces=None): ''' Returns a list of IPv4 subnets to which the host belongs ''' return _subnets('inet', interfaces_=interfaces) def subnets6(): ''' Returns a list of IPv6 subnets to which the host belongs ''' return _subnets('inet6') def in_subnet(cidr, addr=None): ''' Returns True if host or (any of) addrs is within specified subnet, otherwise False ''' try: cidr = ipaddress.ip_network(cidr) except ValueError: log.error('Invalid CIDR \'%s\'', cidr) return False if addr is None: addr = ip_addrs() addr.extend(ip_addrs6()) elif not isinstance(addr, (list, tuple)): addr = (addr,) return any(ipaddress.ip_address(item) in cidr for item in addr) def _ip_addrs(interface=None, include_loopback=False, interface_data=None, proto='inet'): ''' Return the full list of IP adresses matching the criteria proto = inet|inet6 ''' ret = set() ifaces = interface_data \ if isinstance(interface_data, dict) \ else interfaces() if interface is None: target_ifaces = ifaces else: target_ifaces = dict([(k, v) for k, v in six.iteritems(ifaces) if k == interface]) if not target_ifaces: log.error('Interface %s not found.', interface) for ip_info in six.itervalues(target_ifaces): addrs = ip_info.get(proto, []) addrs.extend([addr for addr in ip_info.get('secondary', []) if addr.get('type') == proto]) for addr in addrs: addr = ipaddress.ip_address(addr.get('address')) if not addr.is_loopback or include_loopback: ret.add(addr) return [six.text_type(addr) for addr in sorted(ret)] def ip_addrs(interface=None, include_loopback=False, interface_data=None): ''' Returns a list of IPv4 addresses assigned to the host. 127.0.0.1 is ignored, unless 'include_loopback=True' is indicated. If 'interface' is provided, then only IP addresses from that interface will be returned. ''' return _ip_addrs(interface, include_loopback, interface_data, 'inet') def ip_addrs6(interface=None, include_loopback=False, interface_data=None): ''' Returns a list of IPv6 addresses assigned to the host. ::1 is ignored, unless 'include_loopback=True' is indicated. If 'interface' is provided, then only IP addresses from that interface will be returned. ''' return _ip_addrs(interface, include_loopback, interface_data, 'inet6') def hex2ip(hex_ip, invert=False): ''' Convert a hex string to an ip, if a failure occurs the original hex is returned. If 'invert=True' assume that ip from /proc/net/<proto> ''' if len(hex_ip) == 32: # ipv6 ip = [] for i in range(0, 32, 8): ip_part = hex_ip[i:i + 8] ip_part = [ip_part[x:x + 2] for x in range(0, 8, 2)] if invert: ip.append("{0[3]}{0[2]}:{0[1]}{0[0]}".format(ip_part)) else: ip.append("{0[0]}{0[1]}:{0[2]}{0[3]}".format(ip_part)) try: address = ipaddress.IPv6Address(":".join(ip)) if address.ipv4_mapped: return str(address.ipv4_mapped) else: return address.compressed except ipaddress.AddressValueError as ex: log.error('hex2ip - ipv6 address error: %s', ex) return hex_ip try: hip = int(hex_ip, 16) except ValueError: return hex_ip if invert: return '{3}.{2}.{1}.{0}'.format(hip >> 24 & 255, hip >> 16 & 255, hip >> 8 & 255, hip & 255) return '{0}.{1}.{2}.{3}'.format(hip >> 24 & 255, hip >> 16 & 255, hip >> 8 & 255, hip & 255) def mac2eui64(mac, prefix=None): ''' Convert a MAC address to a EUI64 identifier or, with prefix provided, a full IPv6 address ''' # http://tools.ietf.org/html/rfc4291#section-2.5.1 eui64 = re.sub(r'[.:-]', '', mac).lower() eui64 = eui64[0:6] + 'fffe' + eui64[6:] eui64 = hex(int(eui64[0:2], 16) | 2)[2:].zfill(2) + eui64[2:] if prefix is None: return ':'.join(re.findall(r'.{4}', eui64)) else: try: net = ipaddress.ip_network(prefix, strict=False) euil = int('0x{0}'.format(eui64), 16) return '{0}/{1}'.format(net[euil], net.prefixlen) except Exception: return def active_tcp(): ''' Return a dict describing all active tcp connections as quickly as possible ''' ret = {} for statf in ['/proc/net/tcp', '/proc/net/tcp6']: if os.path.isfile(statf): with salt.utils.files.fopen(statf, 'rb') as fp_: for line in fp_: line = salt.utils.stringutils.to_unicode(line) if line.strip().startswith('sl'): continue iret = _parse_tcp_line(line) sl = next(iter(iret)) if iret[sl]['state'] == 1: # 1 is ESTABLISHED del iret[sl]['state'] ret[len(ret)] = iret[sl] return ret def local_port_tcp(port): ''' Return a set of remote ip addrs attached to the specified local port ''' ret = _remotes_on(port, 'local_port') return ret def remote_port_tcp(port): ''' Return a set of ip addrs the current host is connected to on given port ''' ret = _remotes_on(port, 'remote_port') return ret def _remotes_on(port, which_end): ''' Return a set of ip addrs active tcp connections ''' port = int(port) ret = _netlink_tool_remote_on(port, which_end) if ret is not None: return ret ret = set() proc_available = False for statf in ['/proc/net/tcp', '/proc/net/tcp6']: if os.path.isfile(statf): proc_available = True with salt.utils.files.fopen(statf, 'r') as fp_: for line in fp_: line = salt.utils.stringutils.to_unicode(line) if line.strip().startswith('sl'): continue iret = _parse_tcp_line(line) sl = next(iter(iret)) if iret[sl][which_end] == port and iret[sl]['state'] == 1: # 1 is ESTABLISHED ret.add(iret[sl]['remote_addr']) if not proc_available: # Fallback to use OS specific tools if salt.utils.platform.is_sunos(): return _sunos_remotes_on(port, which_end) if salt.utils.platform.is_freebsd(): return _freebsd_remotes_on(port, which_end) if salt.utils.platform.is_netbsd(): return _netbsd_remotes_on(port, which_end) if salt.utils.platform.is_openbsd(): return _openbsd_remotes_on(port, which_end) if salt.utils.platform.is_windows(): return _windows_remotes_on(port, which_end) if salt.utils.platform.is_aix(): return _aix_remotes_on(port, which_end) return _linux_remotes_on(port, which_end) return ret def _parse_tcp_line(line): ''' Parse a single line from the contents of /proc/net/tcp or /proc/net/tcp6 ''' ret = {} comps = line.strip().split() sl = comps[0].rstrip(':') ret[sl] = {} l_addr, l_port = comps[1].split(':') r_addr, r_port = comps[2].split(':') ret[sl]['local_addr'] = hex2ip(l_addr, True) ret[sl]['local_port'] = int(l_port, 16) ret[sl]['remote_addr'] = hex2ip(r_addr, True) ret[sl]['remote_port'] = int(r_port, 16) ret[sl]['state'] = int(comps[3], 16) return ret def _netlink_tool_remote_on(port, which_end): ''' Returns set of ipv4 host addresses of remote established connections on local or remote tcp port. Parses output of shell 'ss' to get connections [root@salt-master ~]# ss -ant State Recv-Q Send-Q Local Address:Port Peer Address:Port LISTEN 0 511 *:80 *:* LISTEN 0 128 *:22 *:* ESTAB 0 0 127.0.0.1:56726 127.0.0.1:4505 ''' remotes = set() valid = False try: data = subprocess.check_output(['ss', '-ant']) # pylint: disable=minimum-python-version except subprocess.CalledProcessError: log.error('Failed ss') raise except OSError: # not command "No such file or directory" return None lines = salt.utils.stringutils.to_str(data).split('\n') for line in lines: if 'Address:Port' in line: # ss tools may not be valid valid = True continue elif 'ESTAB' not in line: continue chunks = line.split() local_host, local_port = chunks[3].split(':', 1) remote_host, remote_port = chunks[4].split(':', 1) if which_end == 'remote_port' and int(remote_port) != port: continue if which_end == 'local_port' and int(local_port) != port: continue remotes.add(remote_host) if valid is False: remotes = None return remotes def _sunos_remotes_on(port, which_end): ''' SunOS specific helper function. Returns set of ipv4 host addresses of remote established connections on local or remote tcp port. Parses output of shell 'netstat' to get connections [root@salt-master ~]# netstat -f inet -n TCP: IPv4 Local Address Remote Address Swind Send-Q Rwind Recv-Q State -------------------- -------------------- ----- ------ ----- ------ ----------- 10.0.0.101.4505 10.0.0.1.45329 1064800 0 1055864 0 ESTABLISHED 10.0.0.101.4505 10.0.0.100.50798 1064800 0 1055864 0 ESTABLISHED ''' remotes = set() try: data = subprocess.check_output(['netstat', '-f', 'inet', '-n']) # pylint: disable=minimum-python-version except subprocess.CalledProcessError: log.error('Failed netstat') raise lines = salt.utils.stringutils.to_str(data).split('\n') for line in lines: if 'ESTABLISHED' not in line: continue chunks = line.split() local_host, local_port = chunks[0].rsplit('.', 1) remote_host, remote_port = chunks[1].rsplit('.', 1) if which_end == 'remote_port' and int(remote_port) != port: continue if which_end == 'local_port' and int(local_port) != port: continue remotes.add(remote_host) return remotes def _freebsd_remotes_on(port, which_end): ''' Returns set of ipv4 host addresses of remote established connections on local tcp port port. Parses output of shell 'sockstat' (FreeBSD) to get connections $ sudo sockstat -4 USER COMMAND PID FD PROTO LOCAL ADDRESS FOREIGN ADDRESS root python2.7 1456 29 tcp4 *:4505 *:* root python2.7 1445 17 tcp4 *:4506 *:* root python2.7 1294 14 tcp4 127.0.0.1:11813 127.0.0.1:4505 root python2.7 1294 41 tcp4 127.0.0.1:61115 127.0.0.1:4506 $ sudo sockstat -4 -c -p 4506 USER COMMAND PID FD PROTO LOCAL ADDRESS FOREIGN ADDRESS root python2.7 1294 41 tcp4 127.0.0.1:61115 127.0.0.1:4506 ''' port = int(port) remotes = set() try: cmd = salt.utils.args.shlex_split('sockstat -4 -c -p {0}'.format(port)) data = subprocess.check_output(cmd) # pylint: disable=minimum-python-version except subprocess.CalledProcessError as ex: log.error('Failed "sockstat" with returncode = %s', ex.returncode) raise lines = salt.utils.stringutils.to_str(data).split('\n') for line in lines: chunks = line.split() if not chunks: continue # ['root', 'python2.7', '1456', '37', 'tcp4', # '127.0.0.1:4505-', '127.0.0.1:55703'] # print chunks if 'COMMAND' in chunks[1]: continue # ignore header if len(chunks) < 2: continue # sockstat -4 -c -p 4506 does this with high PIDs: # USER COMMAND PID FD PROTO LOCAL ADDRESS FOREIGN ADDRESS # salt-master python2.781106 35 tcp4 192.168.12.34:4506 192.168.12.45:60143 local = chunks[-2] remote = chunks[-1] lhost, lport = local.split(':') rhost, rport = remote.split(':') if which_end == 'local' and int(lport) != port: # ignore if local port not port continue if which_end == 'remote' and int(rport) != port: # ignore if remote port not port continue remotes.add(rhost) return remotes def _netbsd_remotes_on(port, which_end): ''' Returns set of ipv4 host addresses of remote established connections on local tcp port port. Parses output of shell 'sockstat' (NetBSD) to get connections $ sudo sockstat -4 -n USER COMMAND PID FD PROTO LOCAL ADDRESS FOREIGN ADDRESS root python2.7 1456 29 tcp *.4505 *.* root python2.7 1445 17 tcp *.4506 *.* root python2.7 1294 14 tcp 127.0.0.1.11813 127.0.0.1.4505 root python2.7 1294 41 tcp 127.0.0.1.61115 127.0.0.1.4506 $ sudo sockstat -4 -c -n -p 4506 USER COMMAND PID FD PROTO LOCAL ADDRESS FOREIGN ADDRESS root python2.7 1294 41 tcp 127.0.0.1.61115 127.0.0.1.4506 ''' port = int(port) remotes = set() try: cmd = salt.utils.args.shlex_split('sockstat -4 -c -n -p {0}'.format(port)) data = subprocess.check_output(cmd) # pylint: disable=minimum-python-version except subprocess.CalledProcessError as ex: log.error('Failed "sockstat" with returncode = %s', ex.returncode) raise lines = salt.utils.stringutils.to_str(data).split('\n') for line in lines: chunks = line.split() if not chunks: continue # ['root', 'python2.7', '1456', '37', 'tcp', # '127.0.0.1.4505-', '127.0.0.1.55703'] # print chunks if 'COMMAND' in chunks[1]: continue # ignore header if len(chunks) < 2: continue local = chunks[5].split('.') lport = local.pop() lhost = '.'.join(local) remote = chunks[6].split('.') rport = remote.pop() rhost = '.'.join(remote) if which_end == 'local' and int(lport) != port: # ignore if local port not port continue if which_end == 'remote' and int(rport) != port: # ignore if remote port not port continue remotes.add(rhost) return remotes def _openbsd_remotes_on(port, which_end): ''' OpenBSD specific helper function. Returns set of ipv4 host addresses of remote established connections on local or remote tcp port. Parses output of shell 'netstat' to get connections $ netstat -nf inet Active Internet connections Proto Recv-Q Send-Q Local Address Foreign Address (state) tcp 0 0 10.0.0.101.4505 10.0.0.1.45329 ESTABLISHED tcp 0 0 10.0.0.101.4505 10.0.0.100.50798 ESTABLISHED ''' remotes = set() try: data = subprocess.check_output(['netstat', '-nf', 'inet']) # pylint: disable=minimum-python-version except subprocess.CalledProcessError: log.error('Failed netstat') raise lines = data.split('\n') for line in lines: if 'ESTABLISHED' not in line: continue chunks = line.split() local_host, local_port = chunks[3].rsplit('.', 1) remote_host, remote_port = chunks[4].rsplit('.', 1) if which_end == 'remote_port' and int(remote_port) != port: continue if which_end == 'local_port' and int(local_port) != port: continue remotes.add(remote_host) return remotes def _windows_remotes_on(port, which_end): r''' Windows specific helper function. Returns set of ipv4 host addresses of remote established connections on local or remote tcp port. Parses output of shell 'netstat' to get connections C:\>netstat -n Active Connections Proto Local Address Foreign Address State TCP 10.2.33.17:3007 130.164.12.233:10123 ESTABLISHED TCP 10.2.33.17:3389 130.164.30.5:10378 ESTABLISHED ''' remotes = set() try: data = subprocess.check_output(['netstat', '-n']) # pylint: disable=minimum-python-version except subprocess.CalledProcessError: log.error('Failed netstat') raise lines = salt.utils.stringutils.to_str(data).split('\n') for line in lines: if 'ESTABLISHED' not in line: continue chunks = line.split() local_host, local_port = chunks[1].rsplit(':', 1) remote_host, remote_port = chunks[2].rsplit(':', 1) if which_end == 'remote_port' and int(remote_port) != port: continue if which_end == 'local_port' and int(local_port) != port: continue remotes.add(remote_host) return remotes def _linux_remotes_on(port, which_end): ''' Linux specific helper function. Returns set of ip host addresses of remote established connections on local tcp port port. Parses output of shell 'lsof' to get connections $ sudo lsof -iTCP:4505 -n COMMAND PID USER FD TYPE DEVICE SIZE/OFF NODE NAME Python 9971 root 35u IPv4 0x18a8464a29ca329d 0t0 TCP *:4505 (LISTEN) Python 9971 root 37u IPv4 0x18a8464a29b2b29d 0t0 TCP 127.0.0.1:4505->127.0.0.1:55703 (ESTABLISHED) Python 10152 root 22u IPv4 0x18a8464a29c8cab5 0t0 TCP 127.0.0.1:55703->127.0.0.1:4505 (ESTABLISHED) Python 10153 root 22u IPv4 0x18a8464a29c8cab5 0t0 TCP [fe80::249a]:4505->[fe80::150]:59367 (ESTABLISHED) ''' remotes = set() try: data = subprocess.check_output( ['lsof', '-iTCP:{0:d}'.format(port), '-n', '-P'] # pylint: disable=minimum-python-version ) except subprocess.CalledProcessError as ex: if ex.returncode == 1: # Lsof return 1 if any error was detected, including the failure # to locate Internet addresses, and it is not an error in this case. log.warning('"lsof" returncode = 1, likely no active TCP sessions.') return remotes log.error('Failed "lsof" with returncode = %s', ex.returncode) raise lines = salt.utils.stringutils.to_str(data).split('\n') for line in lines: chunks = line.split() if not chunks: continue # ['Python', '9971', 'root', '37u', 'IPv4', '0x18a8464a29b2b29d', '0t0', # 'TCP', '127.0.0.1:4505->127.0.0.1:55703', '(ESTABLISHED)'] # print chunks if 'COMMAND' in chunks[0]: continue # ignore header if 'ESTABLISHED' not in chunks[-1]: continue # ignore if not ESTABLISHED # '127.0.0.1:4505->127.0.0.1:55703' local, remote = chunks[8].split('->') _, lport = local.rsplit(':', 1) rhost, rport = remote.rsplit(':', 1) if which_end == 'remote_port' and int(rport) != port: continue if which_end == 'local_port' and int(lport) != port: continue remotes.add(rhost.strip("[]")) return remotes def _aix_remotes_on(port, which_end): ''' AIX specific helper function. Returns set of ipv4 host addresses of remote established connections on local or remote tcp port. Parses output of shell 'netstat' to get connections root@la68pp002_pub:/opt/salt/lib/python2.7/site-packages/salt/modules# netstat -f inet -n Active Internet connections Proto Recv-Q Send-Q Local Address Foreign Address (state) tcp4 0 0 172.29.149.95.50093 209.41.78.13.4505 ESTABLISHED tcp4 0 0 127.0.0.1.9514 *.* LISTEN tcp4 0 0 127.0.0.1.9515 *.* LISTEN tcp4 0 0 127.0.0.1.199 127.0.0.1.32779 ESTABLISHED tcp4 0 0 127.0.0.1.32779 127.0.0.1.199 ESTABLISHED tcp4 0 40 172.29.149.95.22 172.29.96.83.41022 ESTABLISHED tcp4 0 0 172.29.149.95.22 172.29.96.83.41032 ESTABLISHED tcp4 0 0 127.0.0.1.32771 127.0.0.1.32775 ESTABLISHED tcp 0 0 127.0.0.1.32775 127.0.0.1.32771 ESTABLISHED tcp4 0 0 127.0.0.1.32771 127.0.0.1.32776 ESTABLISHED tcp 0 0 127.0.0.1.32776 127.0.0.1.32771 ESTABLISHED tcp4 0 0 127.0.0.1.32771 127.0.0.1.32777 ESTABLISHED tcp 0 0 127.0.0.1.32777 127.0.0.1.32771 ESTABLISHED tcp4 0 0 127.0.0.1.32771 127.0.0.1.32778 ESTABLISHED tcp 0 0 127.0.0.1.32778 127.0.0.1.32771 ESTABLISHED ''' remotes = set() try: data = subprocess.check_output(['netstat', '-f', 'inet', '-n']) # pylint: disable=minimum-python-version except subprocess.CalledProcessError: log.error('Failed netstat') raise lines = salt.utils.stringutils.to_str(data).split('\n') for line in lines: if 'ESTABLISHED' not in line: continue chunks = line.split() local_host, local_port = chunks[3].rsplit('.', 1) remote_host, remote_port = chunks[4].rsplit('.', 1) if which_end == 'remote_port' and int(remote_port) != port: continue if which_end == 'local_port' and int(local_port) != port: continue remotes.add(remote_host) return remotes @jinja_filter('gen_mac') def gen_mac(prefix='AC:DE:48'): ''' Generates a MAC address with the defined OUI prefix. Common prefixes: - ``00:16:3E`` -- Xen - ``00:18:51`` -- OpenVZ - ``00:50:56`` -- VMware (manually generated) - ``52:54:00`` -- QEMU/KVM - ``AC:DE:48`` -- PRIVATE References: - http://standards.ieee.org/develop/regauth/oui/oui.txt - https://www.wireshark.org/tools/oui-lookup.html - https://en.wikipedia.org/wiki/MAC_address ''' return '{0}:{1:02X}:{2:02X}:{3:02X}'.format(prefix, random.randint(0, 0xff), random.randint(0, 0xff), random.randint(0, 0xff)) @jinja_filter('mac_str_to_bytes') def mac_str_to_bytes(mac_str): ''' Convert a MAC address string into bytes. Works with or without separators: b1 = mac_str_to_bytes('08:00:27:13:69:77') b2 = mac_str_to_bytes('080027136977') assert b1 == b2 assert isinstance(b1, bytes) ''' if len(mac_str) == 12: pass elif len(mac_str) == 17: sep = mac_str[2] mac_str = mac_str.replace(sep, '') else: raise ValueError('Invalid MAC address') chars = (int(mac_str[s:s+2], 16) for s in range(0, 12, 2)) return bytes(chars) if six.PY3 else b''.join(chr(x) for x in chars) def refresh_dns(): ''' issue #21397: force glibc to re-read resolv.conf ''' try: res_init() except NameError: # Exception raised loading the library, thus res_init is not defined pass @jinja_filter('connection_check') def connection_check(addr, port=80, safe=False, ipv6=None): ''' Provides a convenient alias for the dns_check filter. ''' return dns_check(addr, port, safe, ipv6) @jinja_filter('dns_check') def dns_check(addr, port=80, safe=False, ipv6=None, attempt_connect=True): ''' Return the ip resolved by dns, but do not exit on failure, only raise an exception. Obeys system preference for IPv4/6 address resolution - this can be overridden by the ipv6 flag. Tries to connect to the address before considering it useful. If no address can be reached, the first one resolved is used as a fallback. ''' error = False lookup = addr seen_ipv6 = False family = socket.AF_INET6 if ipv6 else socket.AF_INET if ipv6 is False else socket.AF_UNSPEC hostnames = [] try: refresh_dns() hostnames = socket.getaddrinfo(addr, port, family, socket.SOCK_STREAM) except TypeError: err = ('Attempt to resolve address \'{0}\' failed. Invalid or unresolveable address').format(lookup) raise SaltSystemExit(code=42, msg=err) except socket.error: error = True # If ipv6 is set to True, attempt another lookup using the IPv4 family, # just in case we're attempting to lookup an IPv4 IP # as an IPv6 hostname. if error and ipv6: try: refresh_dns() hostnames = socket.getaddrinfo(addr, port, socket.AF_INET, socket.SOCK_STREAM) except TypeError: err = ('Attempt to resolve address \'{0}\' failed. Invalid or unresolveable address').format(lookup) raise SaltSystemExit(code=42, msg=err) except socket.error: error = True try: if not hostnames: error = True else: resolved = False candidates = [] for h in hostnames: # Input is IP address, passed through unchanged, just return it if h[4][0] == addr: resolved = salt.utils.zeromq.ip_bracket(addr) break candidate_addr = salt.utils.zeromq.ip_bracket(h[4][0]) # sometimes /etc/hosts contains ::1 localhost if not ipv6 and candidate_addr == '[::1]': continue candidates.append(candidate_addr) if attempt_connect: try: s = socket.socket(h[0], socket.SOCK_STREAM) s.settimeout(2) s.connect((candidate_addr.strip('[]'), h[4][1])) s.close() resolved = candidate_addr break except socket.error: pass if not resolved: if candidates: resolved = candidates[0] else: error = True except TypeError: err = ('Attempt to resolve address \'{0}\' failed. Invalid or unresolveable address').format(lookup) raise SaltSystemExit(code=42, msg=err) except socket.error: error = True if error: err = ('DNS lookup or connection check of \'{0}\' failed.').format(addr) if safe: if salt.log.is_console_configured(): # If logging is not configured it also means that either # the master or minion instance calling this hasn't even # started running log.error(err) raise SaltClientError() raise SaltSystemExit(code=42, msg=err) return resolved def parse_host_port(host_port): """ Takes a string argument specifying host or host:port. Returns a (hostname, port) or (ip_address, port) tuple. If no port is given, the second (port) element of the returned tuple will be None. host:port argument, for example, is accepted in the forms of: - hostname - hostname:1234 - hostname.domain.tld - hostname.domain.tld:5678 - [1234::5]:5678 - 1234::5 - 10.11.12.13:4567 - 10.11.12.13 """ host, port = None, None # default _s_ = host_port[:] if _s_[0] == "[": if "]" in host_port: host, _s_ = _s_.lstrip("[").rsplit("]", 1) host = ipaddress.IPv6Address(host).compressed if _s_[0] == ":": port = int(_s_.lstrip(":")) else: if len(_s_) > 1: raise ValueError('found ambiguous "{}" port in "{}"'.format(_s_, host_port)) else: if _s_.count(":") == 1: host, _hostport_separator_, port = _s_.partition(":") try: port = int(port) except ValueError as _e_: log.error('host_port "%s" port value "%s" is not an integer.', host_port, port) raise _e_ else: host = _s_ try: if not isinstance(host, ipaddress._BaseAddress): host_ip = ipaddress.ip_address(host).compressed host = host_ip except ValueError: log.debug('"%s" Not an IP address? Assuming it is a hostname.', host) if host != sanitize_host(host): log.error('bad hostname: "%s"', host) raise ValueError('bad hostname: "{}"'.format(host)) return host, port def is_fqdn(hostname): """ Verify if hostname conforms to be a FQDN. :param hostname: text string with the name of the host :return: bool, True if hostname is correct FQDN, False otherwise """ compliant = re.compile(r"(?!-)[A-Z\d\-\_]{1,63}(?<!-)$", re.IGNORECASE) return "." in hostname and len(hostname) < 0xff and all(compliant.match(x) for x in hostname.rstrip(".").split("."))
saltstack/salt
salt/utils/network.py
network_hosts
python
def network_hosts(value, options=None, version=None): ''' Return the list of hosts within a network. .. note:: When running this command with a large IPv6 network, the command will take a long time to gather all of the hosts. ''' ipaddr_filter_out = _filter_ipaddr(value, options=options, version=version) if not ipaddr_filter_out: return if not isinstance(value, (list, tuple, types.GeneratorType)): return _network_hosts(ipaddr_filter_out[0]) return [ _network_hosts(ip_a) for ip_a in ipaddr_filter_out ]
Return the list of hosts within a network. .. note:: When running this command with a large IPv6 network, the command will take a long time to gather all of the hosts.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/network.py#L536-L553
[ "def _filter_ipaddr(value, options, version=None):\n ipaddr_filter_out = None\n if version:\n if version == 4:\n ipaddr_filter_out = ipv4(value, options)\n elif version == 6:\n ipaddr_filter_out = ipv6(value, options)\n else:\n ipaddr_filter_out = ipaddr(value, options)\n if not ipaddr_filter_out:\n return\n if not isinstance(ipaddr_filter_out, (list, tuple, types.GeneratorType)):\n ipaddr_filter_out = [ipaddr_filter_out]\n return ipaddr_filter_out\n", "def _network_hosts(ip_addr_entry):\n return [\n six.text_type(host)\n for host in ipaddress.ip_network(ip_addr_entry, strict=False).hosts()\n ]\n" ]
# -*- coding: utf-8 -*- ''' Define some generic socket functions for network modules ''' # Import python libs from __future__ import absolute_import, unicode_literals, print_function import itertools import os import re import types import socket import logging import platform import random import subprocess from string import ascii_letters, digits # Import 3rd-party libs from salt.ext import six from salt.ext.six.moves import range # pylint: disable=import-error,redefined-builtin # Attempt to import wmi try: import wmi import salt.utils.winapi except ImportError: pass # Import salt libs import salt.utils.args import salt.utils.files import salt.utils.path import salt.utils.platform import salt.utils.stringutils import salt.utils.zeromq from salt._compat import ipaddress from salt.exceptions import SaltClientError, SaltSystemExit from salt.utils.decorators.jinja import jinja_filter from salt.utils.versions import LooseVersion # inet_pton does not exist in Windows, this is a workaround if salt.utils.platform.is_windows(): from salt.ext import win_inet_pton # pylint: disable=unused-import log = logging.getLogger(__name__) try: import ctypes import ctypes.util libc = ctypes.cdll.LoadLibrary(ctypes.util.find_library("c")) res_init = libc.__res_init except (ImportError, OSError, AttributeError, TypeError): pass # pylint: disable=C0103 def sanitize_host(host): ''' Sanitize host string. https://tools.ietf.org/html/rfc1123#section-2.1 ''' RFC952_characters = ascii_letters + digits + ".-" return "".join([c for c in host[0:255] if c in RFC952_characters]) def isportopen(host, port): ''' Return status of a port ''' if not 1 <= int(port) <= 65535: return False sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) out = sock.connect_ex((sanitize_host(host), int(port))) return out def host_to_ips(host): ''' Returns a list of IP addresses of a given hostname or None if not found. ''' ips = [] try: for family, socktype, proto, canonname, sockaddr in socket.getaddrinfo( host, 0, socket.AF_UNSPEC, socket.SOCK_STREAM): if family == socket.AF_INET: ip, port = sockaddr elif family == socket.AF_INET6: ip, port, flow_info, scope_id = sockaddr ips.append(ip) if not ips: ips = None except Exception: ips = None return ips def _generate_minion_id(): ''' Get list of possible host names and convention names. :return: ''' # There are three types of hostnames: # 1. Network names. How host is accessed from the network. # 2. Host aliases. They might be not available in all the network or only locally (/etc/hosts) # 3. Convention names, an internal nodename. class DistinctList(list): ''' List, which allows one to append only distinct objects. Needs to work on Python 2.6, because of collections.OrderedDict only since 2.7 version. Override 'filter()' for custom filtering. ''' localhost_matchers = [r'localhost.*', r'ip6-.*', r'127[.]\d', r'0\.0\.0\.0', r'::1.*', r'ipv6-.*', r'fe00::.*', r'fe02::.*', r'1.0.0.*.ip6.arpa'] def append(self, p_object): if p_object and p_object not in self and not self.filter(p_object): super(DistinctList, self).append(p_object) return self def extend(self, iterable): for obj in iterable: self.append(obj) return self def filter(self, element): 'Returns True if element needs to be filtered' for rgx in self.localhost_matchers: if re.match(rgx, element): return True def first(self): return self and self[0] or None hostname = socket.gethostname() hosts = DistinctList().append( salt.utils.stringutils.to_unicode(socket.getfqdn(salt.utils.stringutils.to_bytes(hostname))) ).append(platform.node()).append(hostname) if not hosts: try: for a_nfo in socket.getaddrinfo(hosts.first() or 'localhost', None, socket.AF_INET, socket.SOCK_RAW, socket.IPPROTO_IP, socket.AI_CANONNAME): if len(a_nfo) > 3: hosts.append(a_nfo[3]) except socket.gaierror: log.warning('Cannot resolve address %s info via socket: %s', hosts.first() or 'localhost (N/A)', socket.gaierror) # Universal method for everywhere (Linux, Slowlaris, Windows etc) for f_name in ('/etc/hostname', '/etc/nodename', '/etc/hosts', r'{win}\system32\drivers\etc\hosts'.format(win=os.getenv('WINDIR'))): try: with salt.utils.files.fopen(f_name) as f_hdl: for line in f_hdl: line = salt.utils.stringutils.to_unicode(line) hst = line.strip().split('#')[0].strip().split() if hst: if hst[0][:4] in ('127.', '::1') or len(hst) == 1: hosts.extend(hst) except IOError: pass # include public and private ipaddresses return hosts.extend([addr for addr in ip_addrs() if not ipaddress.ip_address(addr).is_loopback]) def generate_minion_id(): ''' Return only first element of the hostname from all possible list. :return: ''' try: ret = salt.utils.stringutils.to_unicode(_generate_minion_id().first()) except TypeError: ret = None return ret or 'localhost' def get_socket(addr, type=socket.SOCK_STREAM, proto=0): ''' Return a socket object for the addr IP-version agnostic ''' version = ipaddress.ip_address(addr).version if version == 4: family = socket.AF_INET elif version == 6: family = socket.AF_INET6 return socket.socket(family, type, proto) def get_fqhostname(): ''' Returns the fully qualified hostname ''' # try getaddrinfo() fqdn = None try: addrinfo = socket.getaddrinfo( socket.gethostname(), 0, socket.AF_UNSPEC, socket.SOCK_STREAM, socket.SOL_TCP, socket.AI_CANONNAME ) for info in addrinfo: # info struct [family, socktype, proto, canonname, sockaddr] # On Windows `canonname` can be an empty string # This can cause the function to return `None` if len(info) > 3 and info[3]: fqdn = info[3] break except socket.gaierror: pass # NOTE: this used to log.error() but it was later disabled except socket.error as err: log.debug('socket.getaddrinfo() failure while finding fqdn: %s', err) if fqdn is None: fqdn = socket.getfqdn() return fqdn def ip_to_host(ip): ''' Returns the hostname of a given IP ''' try: hostname, aliaslist, ipaddrlist = socket.gethostbyaddr(ip) except Exception as exc: log.debug('salt.utils.network.ip_to_host(%r) failed: %s', ip, exc) hostname = None return hostname # pylint: enable=C0103 def is_reachable_host(entity_name): ''' Returns a bool telling if the entity name is a reachable host (IPv4/IPv6/FQDN/etc). :param hostname: :return: ''' try: assert type(socket.getaddrinfo(entity_name, 0, 0, 0, 0)) == list ret = True except socket.gaierror: ret = False return ret def is_ip(ip): ''' Returns a bool telling if the passed IP is a valid IPv4 or IPv6 address. ''' return is_ipv4(ip) or is_ipv6(ip) def is_ipv4(ip): ''' Returns a bool telling if the value passed to it was a valid IPv4 address ''' try: return ipaddress.ip_address(ip).version == 4 except ValueError: return False def is_ipv6(ip): ''' Returns a bool telling if the value passed to it was a valid IPv6 address ''' try: return ipaddress.ip_address(ip).version == 6 except ValueError: return False def is_subnet(cidr): ''' Returns a bool telling if the passed string is an IPv4 or IPv6 subnet ''' return is_ipv4_subnet(cidr) or is_ipv6_subnet(cidr) def is_ipv4_subnet(cidr): ''' Returns a bool telling if the passed string is an IPv4 subnet ''' try: return '/' in cidr and bool(ipaddress.IPv4Network(cidr)) except Exception: return False def is_ipv6_subnet(cidr): ''' Returns a bool telling if the passed string is an IPv6 subnet ''' try: return '/' in cidr and bool(ipaddress.IPv6Network(cidr)) except Exception: return False @jinja_filter('is_ip') def is_ip_filter(ip, options=None): ''' Returns a bool telling if the passed IP is a valid IPv4 or IPv6 address. ''' return is_ipv4_filter(ip, options=options) or is_ipv6_filter(ip, options=options) def _ip_options_global(ip_obj, version): return not ip_obj.is_private def _ip_options_multicast(ip_obj, version): return ip_obj.is_multicast def _ip_options_loopback(ip_obj, version): return ip_obj.is_loopback def _ip_options_link_local(ip_obj, version): return ip_obj.is_link_local def _ip_options_private(ip_obj, version): return ip_obj.is_private def _ip_options_reserved(ip_obj, version): return ip_obj.is_reserved def _ip_options_site_local(ip_obj, version): if version == 6: return ip_obj.is_site_local return False def _ip_options_unspecified(ip_obj, version): return ip_obj.is_unspecified def _ip_options(ip_obj, version, options=None): # will process and IP options options_fun_map = { 'global': _ip_options_global, 'link-local': _ip_options_link_local, 'linklocal': _ip_options_link_local, 'll': _ip_options_link_local, 'link_local': _ip_options_link_local, 'loopback': _ip_options_loopback, 'lo': _ip_options_loopback, 'multicast': _ip_options_multicast, 'private': _ip_options_private, 'public': _ip_options_global, 'reserved': _ip_options_reserved, 'site-local': _ip_options_site_local, 'sl': _ip_options_site_local, 'site_local': _ip_options_site_local, 'unspecified': _ip_options_unspecified } if not options: return six.text_type(ip_obj) # IP version already checked options_list = [option.strip() for option in options.split(',')] for option, fun in options_fun_map.items(): if option in options_list: fun_res = fun(ip_obj, version) if not fun_res: return None # stop at first failed test # else continue return six.text_type(ip_obj) def _is_ipv(ip, version, options=None): if not version: version = 4 if version not in (4, 6): return None try: ip_obj = ipaddress.ip_address(ip) except ValueError: # maybe it is an IP network try: ip_obj = ipaddress.ip_interface(ip) except ValueError: # nope, still not :( return None if not ip_obj.version == version: return None # has the right version, let's move on return _ip_options(ip_obj, version, options=options) @jinja_filter('is_ipv4') def is_ipv4_filter(ip, options=None): ''' Returns a bool telling if the value passed to it was a valid IPv4 address. ip The IP address. net: False Consider IP addresses followed by netmask. options CSV of options regarding the nature of the IP address. E.g.: loopback, multicast, private etc. ''' _is_ipv4 = _is_ipv(ip, 4, options=options) return isinstance(_is_ipv4, six.string_types) @jinja_filter('is_ipv6') def is_ipv6_filter(ip, options=None): ''' Returns a bool telling if the value passed to it was a valid IPv6 address. ip The IP address. net: False Consider IP addresses followed by netmask. options CSV of options regarding the nature of the IP address. E.g.: loopback, multicast, private etc. ''' _is_ipv6 = _is_ipv(ip, 6, options=options) return isinstance(_is_ipv6, six.string_types) def _ipv_filter(value, version, options=None): if version not in (4, 6): return if isinstance(value, (six.string_types, six.text_type, six.binary_type)): return _is_ipv(value, version, options=options) # calls is_ipv4 or is_ipv6 for `value` elif isinstance(value, (list, tuple, types.GeneratorType)): # calls is_ipv4 or is_ipv6 for each element in the list # os it filters and returns only those elements having the desired IP version return [ _is_ipv(addr, version, options=options) for addr in value if _is_ipv(addr, version, options=options) is not None ] return None @jinja_filter('ipv4') def ipv4(value, options=None): ''' Filters a list and returns IPv4 values only. ''' return _ipv_filter(value, 4, options=options) @jinja_filter('ipv6') def ipv6(value, options=None): ''' Filters a list and returns IPv6 values only. ''' return _ipv_filter(value, 6, options=options) @jinja_filter('ipaddr') def ipaddr(value, options=None): ''' Filters and returns only valid IP objects. ''' ipv4_obj = ipv4(value, options=options) ipv6_obj = ipv6(value, options=options) if ipv4_obj is None or ipv6_obj is None: # an IP address can be either IPv4 either IPv6 # therefofe if the value passed as arg is not a list, at least one of the calls above will return None # if one of them is none, means that we should return only one of them return ipv4_obj or ipv6_obj # one of them else: return ipv4_obj + ipv6_obj # extend lists def _filter_ipaddr(value, options, version=None): ipaddr_filter_out = None if version: if version == 4: ipaddr_filter_out = ipv4(value, options) elif version == 6: ipaddr_filter_out = ipv6(value, options) else: ipaddr_filter_out = ipaddr(value, options) if not ipaddr_filter_out: return if not isinstance(ipaddr_filter_out, (list, tuple, types.GeneratorType)): ipaddr_filter_out = [ipaddr_filter_out] return ipaddr_filter_out @jinja_filter('ip_host') def ip_host(value, options=None, version=None): ''' Returns the interfaces IP address, e.g.: 192.168.0.1/28. ''' ipaddr_filter_out = _filter_ipaddr(value, options=options, version=version) if not ipaddr_filter_out: return if not isinstance(value, (list, tuple, types.GeneratorType)): return six.text_type(ipaddress.ip_interface(ipaddr_filter_out[0])) return [six.text_type(ipaddress.ip_interface(ip_a)) for ip_a in ipaddr_filter_out] def _network_hosts(ip_addr_entry): return [ six.text_type(host) for host in ipaddress.ip_network(ip_addr_entry, strict=False).hosts() ] @jinja_filter('network_hosts') def _network_size(ip_addr_entry): return ipaddress.ip_network(ip_addr_entry, strict=False).num_addresses @jinja_filter('network_size') def network_size(value, options=None, version=None): ''' Get the size of a network. ''' ipaddr_filter_out = _filter_ipaddr(value, options=options, version=version) if not ipaddr_filter_out: return if not isinstance(value, (list, tuple, types.GeneratorType)): return _network_size(ipaddr_filter_out[0]) return [ _network_size(ip_a) for ip_a in ipaddr_filter_out ] def natural_ipv4_netmask(ip, fmt='prefixlen'): ''' Returns the "natural" mask of an IPv4 address ''' bits = _ipv4_to_bits(ip) if bits.startswith('11'): mask = '24' elif bits.startswith('1'): mask = '16' else: mask = '8' if fmt == 'netmask': return cidr_to_ipv4_netmask(mask) else: return '/' + mask def rpad_ipv4_network(ip): ''' Returns an IP network address padded with zeros. Ex: '192.168.3' -> '192.168.3.0' '10.209' -> '10.209.0.0' ''' return '.'.join(itertools.islice(itertools.chain(ip.split('.'), '0000'), 0, 4)) def cidr_to_ipv4_netmask(cidr_bits): ''' Returns an IPv4 netmask ''' try: cidr_bits = int(cidr_bits) if not 1 <= cidr_bits <= 32: return '' except ValueError: return '' netmask = '' for idx in range(4): if idx: netmask += '.' if cidr_bits >= 8: netmask += '255' cidr_bits -= 8 else: netmask += '{0:d}'.format(256 - (2 ** (8 - cidr_bits))) cidr_bits = 0 return netmask def _number_of_set_bits_to_ipv4_netmask(set_bits): # pylint: disable=C0103 ''' Returns an IPv4 netmask from the integer representation of that mask. Ex. 0xffffff00 -> '255.255.255.0' ''' return cidr_to_ipv4_netmask(_number_of_set_bits(set_bits)) # pylint: disable=C0103 def _number_of_set_bits(x): ''' Returns the number of bits that are set in a 32bit int ''' # Taken from http://stackoverflow.com/a/4912729. Many thanks! x -= (x >> 1) & 0x55555555 x = ((x >> 2) & 0x33333333) + (x & 0x33333333) x = ((x >> 4) + x) & 0x0f0f0f0f x += x >> 8 x += x >> 16 return x & 0x0000003f # pylint: enable=C0103 def _interfaces_ip(out): ''' Uses ip to return a dictionary of interfaces with various information about each (up/down state, ip address, netmask, and hwaddr) ''' ret = dict() def parse_network(value, cols): ''' Return a tuple of ip, netmask, broadcast based on the current set of cols ''' brd = None scope = None if '/' in value: # we have a CIDR in this address ip, cidr = value.split('/') # pylint: disable=C0103 else: ip = value # pylint: disable=C0103 cidr = 32 if type_ == 'inet': mask = cidr_to_ipv4_netmask(int(cidr)) if 'brd' in cols: brd = cols[cols.index('brd') + 1] elif type_ == 'inet6': mask = cidr if 'scope' in cols: scope = cols[cols.index('scope') + 1] return (ip, mask, brd, scope) groups = re.compile('\r?\n\\d').split(out) for group in groups: iface = None data = dict() for line in group.splitlines(): if ' ' not in line: continue match = re.match(r'^\d*:\s+([\w.\-]+)(?:@)?([\w.\-]+)?:\s+<(.+)>', line) if match: iface, parent, attrs = match.groups() if 'UP' in attrs.split(','): data['up'] = True else: data['up'] = False if parent: data['parent'] = parent continue cols = line.split() if len(cols) >= 2: type_, value = tuple(cols[0:2]) iflabel = cols[-1:][0] if type_ in ('inet', 'inet6'): if 'secondary' not in cols: ipaddr, netmask, broadcast, scope = parse_network(value, cols) if type_ == 'inet': if 'inet' not in data: data['inet'] = list() addr_obj = dict() addr_obj['address'] = ipaddr addr_obj['netmask'] = netmask addr_obj['broadcast'] = broadcast addr_obj['label'] = iflabel data['inet'].append(addr_obj) elif type_ == 'inet6': if 'inet6' not in data: data['inet6'] = list() addr_obj = dict() addr_obj['address'] = ipaddr addr_obj['prefixlen'] = netmask addr_obj['scope'] = scope data['inet6'].append(addr_obj) else: if 'secondary' not in data: data['secondary'] = list() ip_, mask, brd, scp = parse_network(value, cols) data['secondary'].append({ 'type': type_, 'address': ip_, 'netmask': mask, 'broadcast': brd, 'label': iflabel, }) del ip_, mask, brd, scp elif type_.startswith('link'): data['hwaddr'] = value if iface: ret[iface] = data del iface, data return ret def _interfaces_ifconfig(out): ''' Uses ifconfig to return a dictionary of interfaces with various information about each (up/down state, ip address, netmask, and hwaddr) ''' ret = dict() piface = re.compile(r'^([^\s:]+)') pmac = re.compile('.*?(?:HWaddr|ether|address:|lladdr) ([0-9a-fA-F:]+)') if salt.utils.platform.is_sunos(): pip = re.compile(r'.*?(?:inet\s+)([0-9]+\.[0-9]+\.[0-9]+\.[0-9]+)(.*)') pip6 = re.compile('.*?(?:inet6 )([0-9a-fA-F:]+)') pmask6 = re.compile(r'.*?(?:inet6 [0-9a-fA-F:]+/(\d+)).*') else: pip = re.compile(r'.*?(?:inet addr:|inet [^\d]*)(.*?)\s') pip6 = re.compile('.*?(?:inet6 addr: (.*?)/|inet6 )([0-9a-fA-F:]+)') pmask6 = re.compile(r'.*?(?:inet6 addr: [0-9a-fA-F:]+/(\d+)|prefixlen (\d+))(?: Scope:([a-zA-Z]+)| scopeid (0x[0-9a-fA-F]))?') pmask = re.compile(r'.*?(?:Mask:|netmask )(?:((?:0x)?[0-9a-fA-F]{8})|([\d\.]+))') pupdown = re.compile('UP') pbcast = re.compile(r'.*?(?:Bcast:|broadcast )([\d\.]+)') groups = re.compile('\r?\n(?=\\S)').split(out) for group in groups: data = dict() iface = '' updown = False for line in group.splitlines(): miface = piface.match(line) mmac = pmac.match(line) mip = pip.match(line) mip6 = pip6.match(line) mupdown = pupdown.search(line) if miface: iface = miface.group(1) if mmac: data['hwaddr'] = mmac.group(1) if salt.utils.platform.is_sunos(): expand_mac = [] for chunk in data['hwaddr'].split(':'): expand_mac.append('0{0}'.format(chunk) if len(chunk) < 2 else '{0}'.format(chunk)) data['hwaddr'] = ':'.join(expand_mac) if mip: if 'inet' not in data: data['inet'] = list() addr_obj = dict() addr_obj['address'] = mip.group(1) mmask = pmask.match(line) if mmask: if mmask.group(1): mmask = _number_of_set_bits_to_ipv4_netmask( int(mmask.group(1), 16)) else: mmask = mmask.group(2) addr_obj['netmask'] = mmask mbcast = pbcast.match(line) if mbcast: addr_obj['broadcast'] = mbcast.group(1) data['inet'].append(addr_obj) if mupdown: updown = True if mip6: if 'inet6' not in data: data['inet6'] = list() addr_obj = dict() addr_obj['address'] = mip6.group(1) or mip6.group(2) mmask6 = pmask6.match(line) if mmask6: addr_obj['prefixlen'] = mmask6.group(1) or mmask6.group(2) if not salt.utils.platform.is_sunos(): ipv6scope = mmask6.group(3) or mmask6.group(4) addr_obj['scope'] = ipv6scope.lower() if ipv6scope is not None else ipv6scope # SunOS sometimes has ::/0 as inet6 addr when using addrconf if not salt.utils.platform.is_sunos() \ or addr_obj['address'] != '::' \ and addr_obj['prefixlen'] != 0: data['inet6'].append(addr_obj) data['up'] = updown if iface in ret: # SunOS optimization, where interfaces occur twice in 'ifconfig -a' # output with the same name: for ipv4 and then for ipv6 addr family. # Every instance has it's own 'UP' status and we assume that ipv4 # status determines global interface status. # # merge items with higher priority for older values # after that merge the inet and inet6 sub items for both ret[iface] = dict(list(data.items()) + list(ret[iface].items())) if 'inet' in data: ret[iface]['inet'].extend(x for x in data['inet'] if x not in ret[iface]['inet']) if 'inet6' in data: ret[iface]['inet6'].extend(x for x in data['inet6'] if x not in ret[iface]['inet6']) else: ret[iface] = data del data return ret def linux_interfaces(): ''' Obtain interface information for *NIX/BSD variants ''' ifaces = dict() ip_path = salt.utils.path.which('ip') ifconfig_path = None if ip_path else salt.utils.path.which('ifconfig') if ip_path: cmd1 = subprocess.Popen( '{0} link show'.format(ip_path), shell=True, close_fds=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate()[0] cmd2 = subprocess.Popen( '{0} addr show'.format(ip_path), shell=True, close_fds=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate()[0] ifaces = _interfaces_ip("{0}\n{1}".format( salt.utils.stringutils.to_str(cmd1), salt.utils.stringutils.to_str(cmd2))) elif ifconfig_path: cmd = subprocess.Popen( '{0} -a'.format(ifconfig_path), shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate()[0] ifaces = _interfaces_ifconfig(salt.utils.stringutils.to_str(cmd)) return ifaces def _netbsd_interfaces_ifconfig(out): ''' Uses ifconfig to return a dictionary of interfaces with various information about each (up/down state, ip address, netmask, and hwaddr) ''' ret = dict() piface = re.compile(r'^([^\s:]+)') pmac = re.compile('.*?address: ([0-9a-f:]+)') pip = re.compile(r'.*?inet [^\d]*(.*?)/([\d]*)\s') pip6 = re.compile(r'.*?inet6 ([0-9a-f:]+)%([a-zA-Z0-9]*)/([\d]*)\s') pupdown = re.compile('UP') pbcast = re.compile(r'.*?broadcast ([\d\.]+)') groups = re.compile('\r?\n(?=\\S)').split(out) for group in groups: data = dict() iface = '' updown = False for line in group.splitlines(): miface = piface.match(line) mmac = pmac.match(line) mip = pip.match(line) mip6 = pip6.match(line) mupdown = pupdown.search(line) if miface: iface = miface.group(1) if mmac: data['hwaddr'] = mmac.group(1) if mip: if 'inet' not in data: data['inet'] = list() addr_obj = dict() addr_obj['address'] = mip.group(1) mmask = mip.group(2) if mip.group(2): addr_obj['netmask'] = cidr_to_ipv4_netmask(mip.group(2)) mbcast = pbcast.match(line) if mbcast: addr_obj['broadcast'] = mbcast.group(1) data['inet'].append(addr_obj) if mupdown: updown = True if mip6: if 'inet6' not in data: data['inet6'] = list() addr_obj = dict() addr_obj['address'] = mip6.group(1) mmask6 = mip6.group(3) addr_obj['scope'] = mip6.group(2) addr_obj['prefixlen'] = mip6.group(3) data['inet6'].append(addr_obj) data['up'] = updown ret[iface] = data del data return ret def netbsd_interfaces(): ''' Obtain interface information for NetBSD >= 8 where the ifconfig output diverged from other BSD variants (Netmask is now part of the address) ''' # NetBSD versions prior to 8.0 can still use linux_interfaces() if LooseVersion(os.uname()[2]) < LooseVersion('8.0'): return linux_interfaces() ifconfig_path = salt.utils.path.which('ifconfig') cmd = subprocess.Popen( '{0} -a'.format(ifconfig_path), shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate()[0] return _netbsd_interfaces_ifconfig(salt.utils.stringutils.to_str(cmd)) def _interfaces_ipconfig(out): ''' Returns a dictionary of interfaces with various information about each (up/down state, ip address, netmask, and hwaddr) NOTE: This is not used by any function and may be able to be removed in the future. ''' ifaces = dict() iface = None adapter_iface_regex = re.compile(r'adapter (\S.+):$') for line in out.splitlines(): if not line: continue # TODO what does Windows call Infiniband and 10/40gige adapters if line.startswith('Ethernet'): iface = ifaces[adapter_iface_regex.search(line).group(1)] iface['up'] = True addr = None continue if iface: key, val = line.split(',', 1) key = key.strip(' .') val = val.strip() if addr and key == 'Subnet Mask': addr['netmask'] = val elif key in ('IP Address', 'IPv4 Address'): if 'inet' not in iface: iface['inet'] = list() addr = {'address': val.rstrip('(Preferred)'), 'netmask': None, 'broadcast': None} # TODO find the broadcast iface['inet'].append(addr) elif 'IPv6 Address' in key: if 'inet6' not in iface: iface['inet'] = list() # XXX What is the prefixlen!? addr = {'address': val.rstrip('(Preferred)'), 'prefixlen': None} iface['inet6'].append(addr) elif key == 'Physical Address': iface['hwaddr'] = val elif key == 'Media State': # XXX seen used for tunnel adaptors # might be useful iface['up'] = (val != 'Media disconnected') def win_interfaces(): ''' Obtain interface information for Windows systems ''' with salt.utils.winapi.Com(): c = wmi.WMI() ifaces = {} for iface in c.Win32_NetworkAdapterConfiguration(IPEnabled=1): ifaces[iface.Description] = dict() if iface.MACAddress: ifaces[iface.Description]['hwaddr'] = iface.MACAddress if iface.IPEnabled: ifaces[iface.Description]['up'] = True for ip in iface.IPAddress: if '.' in ip: if 'inet' not in ifaces[iface.Description]: ifaces[iface.Description]['inet'] = [] item = {'address': ip, 'label': iface.Description} if iface.DefaultIPGateway: broadcast = next((i for i in iface.DefaultIPGateway if '.' in i), '') if broadcast: item['broadcast'] = broadcast if iface.IPSubnet: netmask = next((i for i in iface.IPSubnet if '.' in i), '') if netmask: item['netmask'] = netmask ifaces[iface.Description]['inet'].append(item) if ':' in ip: if 'inet6' not in ifaces[iface.Description]: ifaces[iface.Description]['inet6'] = [] item = {'address': ip} if iface.DefaultIPGateway: broadcast = next((i for i in iface.DefaultIPGateway if ':' in i), '') if broadcast: item['broadcast'] = broadcast if iface.IPSubnet: netmask = next((i for i in iface.IPSubnet if ':' in i), '') if netmask: item['netmask'] = netmask ifaces[iface.Description]['inet6'].append(item) else: ifaces[iface.Description]['up'] = False return ifaces def interfaces(): ''' Return a dictionary of information about all the interfaces on the minion ''' if salt.utils.platform.is_windows(): return win_interfaces() elif salt.utils.platform.is_netbsd(): return netbsd_interfaces() else: return linux_interfaces() def get_net_start(ipaddr, netmask): ''' Return the address of the network ''' net = ipaddress.ip_network('{0}/{1}'.format(ipaddr, netmask), strict=False) return six.text_type(net.network_address) def get_net_size(mask): ''' Turns an IPv4 netmask into it's corresponding prefix length (255.255.255.0 -> 24 as in 192.168.1.10/24). ''' binary_str = '' for octet in mask.split('.'): binary_str += bin(int(octet))[2:].zfill(8) return len(binary_str.rstrip('0')) def calc_net(ipaddr, netmask=None): ''' Takes IP (CIDR notation supported) and optionally netmask and returns the network in CIDR-notation. (The IP can be any IP inside the subnet) ''' if netmask is not None: ipaddr = '{0}/{1}'.format(ipaddr, netmask) return six.text_type(ipaddress.ip_network(ipaddr, strict=False)) def _ipv4_to_bits(ipaddr): ''' Accepts an IPv4 dotted quad and returns a string representing its binary counterpart ''' return ''.join([bin(int(x))[2:].rjust(8, '0') for x in ipaddr.split('.')]) def _get_iface_info(iface): ''' If `iface` is available, return interface info and no error, otherwise return no info and log and return an error ''' iface_info = interfaces() if iface in iface_info.keys(): return iface_info, False else: error_msg = ('Interface "{0}" not in available interfaces: "{1}"' ''.format(iface, '", "'.join(iface_info.keys()))) log.error(error_msg) return None, error_msg def _hw_addr_aix(iface): ''' Return the hardware address (a.k.a. MAC address) for a given interface on AIX MAC address not available in through interfaces ''' cmd = subprocess.Popen( 'entstat -d {0} | grep \'Hardware Address\''.format(iface), shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate()[0] if cmd: comps = cmd.split(' ') if len(comps) == 3: mac_addr = comps[2].strip('\'').strip() return mac_addr error_msg = ('Interface "{0}" either not available or does not contain a hardware address'.format(iface)) log.error(error_msg) return error_msg def hw_addr(iface): ''' Return the hardware address (a.k.a. MAC address) for a given interface .. versionchanged:: 2016.11.4 Added support for AIX ''' if salt.utils.platform.is_aix(): return _hw_addr_aix iface_info, error = _get_iface_info(iface) if error is False: return iface_info.get(iface, {}).get('hwaddr', '') else: return error def interface(iface): ''' Return the details of `iface` or an error if it does not exist ''' iface_info, error = _get_iface_info(iface) if error is False: return iface_info.get(iface, {}).get('inet', '') else: return error def interface_ip(iface): ''' Return `iface` IPv4 addr or an error if `iface` does not exist ''' iface_info, error = _get_iface_info(iface) if error is False: inet = iface_info.get(iface, {}).get('inet', None) return inet[0].get('address', '') if inet else '' else: return error def _subnets(proto='inet', interfaces_=None): ''' Returns a list of subnets to which the host belongs ''' if interfaces_ is None: ifaces = interfaces() elif isinstance(interfaces_, list): ifaces = {} for key, value in six.iteritems(interfaces()): if key in interfaces_: ifaces[key] = value else: ifaces = {interfaces_: interfaces().get(interfaces_, {})} ret = set() if proto == 'inet': subnet = 'netmask' dflt_cidr = 32 elif proto == 'inet6': subnet = 'prefixlen' dflt_cidr = 128 else: log.error('Invalid proto %s calling subnets()', proto) return for ip_info in six.itervalues(ifaces): addrs = ip_info.get(proto, []) addrs.extend([addr for addr in ip_info.get('secondary', []) if addr.get('type') == proto]) for intf in addrs: if subnet in intf: intf = ipaddress.ip_interface('{0}/{1}'.format(intf['address'], intf[subnet])) else: intf = ipaddress.ip_interface('{0}/{1}'.format(intf['address'], dflt_cidr)) if not intf.is_loopback: ret.add(intf.network) return [six.text_type(net) for net in sorted(ret)] def subnets(interfaces=None): ''' Returns a list of IPv4 subnets to which the host belongs ''' return _subnets('inet', interfaces_=interfaces) def subnets6(): ''' Returns a list of IPv6 subnets to which the host belongs ''' return _subnets('inet6') def in_subnet(cidr, addr=None): ''' Returns True if host or (any of) addrs is within specified subnet, otherwise False ''' try: cidr = ipaddress.ip_network(cidr) except ValueError: log.error('Invalid CIDR \'%s\'', cidr) return False if addr is None: addr = ip_addrs() addr.extend(ip_addrs6()) elif not isinstance(addr, (list, tuple)): addr = (addr,) return any(ipaddress.ip_address(item) in cidr for item in addr) def _ip_addrs(interface=None, include_loopback=False, interface_data=None, proto='inet'): ''' Return the full list of IP adresses matching the criteria proto = inet|inet6 ''' ret = set() ifaces = interface_data \ if isinstance(interface_data, dict) \ else interfaces() if interface is None: target_ifaces = ifaces else: target_ifaces = dict([(k, v) for k, v in six.iteritems(ifaces) if k == interface]) if not target_ifaces: log.error('Interface %s not found.', interface) for ip_info in six.itervalues(target_ifaces): addrs = ip_info.get(proto, []) addrs.extend([addr for addr in ip_info.get('secondary', []) if addr.get('type') == proto]) for addr in addrs: addr = ipaddress.ip_address(addr.get('address')) if not addr.is_loopback or include_loopback: ret.add(addr) return [six.text_type(addr) for addr in sorted(ret)] def ip_addrs(interface=None, include_loopback=False, interface_data=None): ''' Returns a list of IPv4 addresses assigned to the host. 127.0.0.1 is ignored, unless 'include_loopback=True' is indicated. If 'interface' is provided, then only IP addresses from that interface will be returned. ''' return _ip_addrs(interface, include_loopback, interface_data, 'inet') def ip_addrs6(interface=None, include_loopback=False, interface_data=None): ''' Returns a list of IPv6 addresses assigned to the host. ::1 is ignored, unless 'include_loopback=True' is indicated. If 'interface' is provided, then only IP addresses from that interface will be returned. ''' return _ip_addrs(interface, include_loopback, interface_data, 'inet6') def hex2ip(hex_ip, invert=False): ''' Convert a hex string to an ip, if a failure occurs the original hex is returned. If 'invert=True' assume that ip from /proc/net/<proto> ''' if len(hex_ip) == 32: # ipv6 ip = [] for i in range(0, 32, 8): ip_part = hex_ip[i:i + 8] ip_part = [ip_part[x:x + 2] for x in range(0, 8, 2)] if invert: ip.append("{0[3]}{0[2]}:{0[1]}{0[0]}".format(ip_part)) else: ip.append("{0[0]}{0[1]}:{0[2]}{0[3]}".format(ip_part)) try: address = ipaddress.IPv6Address(":".join(ip)) if address.ipv4_mapped: return str(address.ipv4_mapped) else: return address.compressed except ipaddress.AddressValueError as ex: log.error('hex2ip - ipv6 address error: %s', ex) return hex_ip try: hip = int(hex_ip, 16) except ValueError: return hex_ip if invert: return '{3}.{2}.{1}.{0}'.format(hip >> 24 & 255, hip >> 16 & 255, hip >> 8 & 255, hip & 255) return '{0}.{1}.{2}.{3}'.format(hip >> 24 & 255, hip >> 16 & 255, hip >> 8 & 255, hip & 255) def mac2eui64(mac, prefix=None): ''' Convert a MAC address to a EUI64 identifier or, with prefix provided, a full IPv6 address ''' # http://tools.ietf.org/html/rfc4291#section-2.5.1 eui64 = re.sub(r'[.:-]', '', mac).lower() eui64 = eui64[0:6] + 'fffe' + eui64[6:] eui64 = hex(int(eui64[0:2], 16) | 2)[2:].zfill(2) + eui64[2:] if prefix is None: return ':'.join(re.findall(r'.{4}', eui64)) else: try: net = ipaddress.ip_network(prefix, strict=False) euil = int('0x{0}'.format(eui64), 16) return '{0}/{1}'.format(net[euil], net.prefixlen) except Exception: return def active_tcp(): ''' Return a dict describing all active tcp connections as quickly as possible ''' ret = {} for statf in ['/proc/net/tcp', '/proc/net/tcp6']: if os.path.isfile(statf): with salt.utils.files.fopen(statf, 'rb') as fp_: for line in fp_: line = salt.utils.stringutils.to_unicode(line) if line.strip().startswith('sl'): continue iret = _parse_tcp_line(line) sl = next(iter(iret)) if iret[sl]['state'] == 1: # 1 is ESTABLISHED del iret[sl]['state'] ret[len(ret)] = iret[sl] return ret def local_port_tcp(port): ''' Return a set of remote ip addrs attached to the specified local port ''' ret = _remotes_on(port, 'local_port') return ret def remote_port_tcp(port): ''' Return a set of ip addrs the current host is connected to on given port ''' ret = _remotes_on(port, 'remote_port') return ret def _remotes_on(port, which_end): ''' Return a set of ip addrs active tcp connections ''' port = int(port) ret = _netlink_tool_remote_on(port, which_end) if ret is not None: return ret ret = set() proc_available = False for statf in ['/proc/net/tcp', '/proc/net/tcp6']: if os.path.isfile(statf): proc_available = True with salt.utils.files.fopen(statf, 'r') as fp_: for line in fp_: line = salt.utils.stringutils.to_unicode(line) if line.strip().startswith('sl'): continue iret = _parse_tcp_line(line) sl = next(iter(iret)) if iret[sl][which_end] == port and iret[sl]['state'] == 1: # 1 is ESTABLISHED ret.add(iret[sl]['remote_addr']) if not proc_available: # Fallback to use OS specific tools if salt.utils.platform.is_sunos(): return _sunos_remotes_on(port, which_end) if salt.utils.platform.is_freebsd(): return _freebsd_remotes_on(port, which_end) if salt.utils.platform.is_netbsd(): return _netbsd_remotes_on(port, which_end) if salt.utils.platform.is_openbsd(): return _openbsd_remotes_on(port, which_end) if salt.utils.platform.is_windows(): return _windows_remotes_on(port, which_end) if salt.utils.platform.is_aix(): return _aix_remotes_on(port, which_end) return _linux_remotes_on(port, which_end) return ret def _parse_tcp_line(line): ''' Parse a single line from the contents of /proc/net/tcp or /proc/net/tcp6 ''' ret = {} comps = line.strip().split() sl = comps[0].rstrip(':') ret[sl] = {} l_addr, l_port = comps[1].split(':') r_addr, r_port = comps[2].split(':') ret[sl]['local_addr'] = hex2ip(l_addr, True) ret[sl]['local_port'] = int(l_port, 16) ret[sl]['remote_addr'] = hex2ip(r_addr, True) ret[sl]['remote_port'] = int(r_port, 16) ret[sl]['state'] = int(comps[3], 16) return ret def _netlink_tool_remote_on(port, which_end): ''' Returns set of ipv4 host addresses of remote established connections on local or remote tcp port. Parses output of shell 'ss' to get connections [root@salt-master ~]# ss -ant State Recv-Q Send-Q Local Address:Port Peer Address:Port LISTEN 0 511 *:80 *:* LISTEN 0 128 *:22 *:* ESTAB 0 0 127.0.0.1:56726 127.0.0.1:4505 ''' remotes = set() valid = False try: data = subprocess.check_output(['ss', '-ant']) # pylint: disable=minimum-python-version except subprocess.CalledProcessError: log.error('Failed ss') raise except OSError: # not command "No such file or directory" return None lines = salt.utils.stringutils.to_str(data).split('\n') for line in lines: if 'Address:Port' in line: # ss tools may not be valid valid = True continue elif 'ESTAB' not in line: continue chunks = line.split() local_host, local_port = chunks[3].split(':', 1) remote_host, remote_port = chunks[4].split(':', 1) if which_end == 'remote_port' and int(remote_port) != port: continue if which_end == 'local_port' and int(local_port) != port: continue remotes.add(remote_host) if valid is False: remotes = None return remotes def _sunos_remotes_on(port, which_end): ''' SunOS specific helper function. Returns set of ipv4 host addresses of remote established connections on local or remote tcp port. Parses output of shell 'netstat' to get connections [root@salt-master ~]# netstat -f inet -n TCP: IPv4 Local Address Remote Address Swind Send-Q Rwind Recv-Q State -------------------- -------------------- ----- ------ ----- ------ ----------- 10.0.0.101.4505 10.0.0.1.45329 1064800 0 1055864 0 ESTABLISHED 10.0.0.101.4505 10.0.0.100.50798 1064800 0 1055864 0 ESTABLISHED ''' remotes = set() try: data = subprocess.check_output(['netstat', '-f', 'inet', '-n']) # pylint: disable=minimum-python-version except subprocess.CalledProcessError: log.error('Failed netstat') raise lines = salt.utils.stringutils.to_str(data).split('\n') for line in lines: if 'ESTABLISHED' not in line: continue chunks = line.split() local_host, local_port = chunks[0].rsplit('.', 1) remote_host, remote_port = chunks[1].rsplit('.', 1) if which_end == 'remote_port' and int(remote_port) != port: continue if which_end == 'local_port' and int(local_port) != port: continue remotes.add(remote_host) return remotes def _freebsd_remotes_on(port, which_end): ''' Returns set of ipv4 host addresses of remote established connections on local tcp port port. Parses output of shell 'sockstat' (FreeBSD) to get connections $ sudo sockstat -4 USER COMMAND PID FD PROTO LOCAL ADDRESS FOREIGN ADDRESS root python2.7 1456 29 tcp4 *:4505 *:* root python2.7 1445 17 tcp4 *:4506 *:* root python2.7 1294 14 tcp4 127.0.0.1:11813 127.0.0.1:4505 root python2.7 1294 41 tcp4 127.0.0.1:61115 127.0.0.1:4506 $ sudo sockstat -4 -c -p 4506 USER COMMAND PID FD PROTO LOCAL ADDRESS FOREIGN ADDRESS root python2.7 1294 41 tcp4 127.0.0.1:61115 127.0.0.1:4506 ''' port = int(port) remotes = set() try: cmd = salt.utils.args.shlex_split('sockstat -4 -c -p {0}'.format(port)) data = subprocess.check_output(cmd) # pylint: disable=minimum-python-version except subprocess.CalledProcessError as ex: log.error('Failed "sockstat" with returncode = %s', ex.returncode) raise lines = salt.utils.stringutils.to_str(data).split('\n') for line in lines: chunks = line.split() if not chunks: continue # ['root', 'python2.7', '1456', '37', 'tcp4', # '127.0.0.1:4505-', '127.0.0.1:55703'] # print chunks if 'COMMAND' in chunks[1]: continue # ignore header if len(chunks) < 2: continue # sockstat -4 -c -p 4506 does this with high PIDs: # USER COMMAND PID FD PROTO LOCAL ADDRESS FOREIGN ADDRESS # salt-master python2.781106 35 tcp4 192.168.12.34:4506 192.168.12.45:60143 local = chunks[-2] remote = chunks[-1] lhost, lport = local.split(':') rhost, rport = remote.split(':') if which_end == 'local' and int(lport) != port: # ignore if local port not port continue if which_end == 'remote' and int(rport) != port: # ignore if remote port not port continue remotes.add(rhost) return remotes def _netbsd_remotes_on(port, which_end): ''' Returns set of ipv4 host addresses of remote established connections on local tcp port port. Parses output of shell 'sockstat' (NetBSD) to get connections $ sudo sockstat -4 -n USER COMMAND PID FD PROTO LOCAL ADDRESS FOREIGN ADDRESS root python2.7 1456 29 tcp *.4505 *.* root python2.7 1445 17 tcp *.4506 *.* root python2.7 1294 14 tcp 127.0.0.1.11813 127.0.0.1.4505 root python2.7 1294 41 tcp 127.0.0.1.61115 127.0.0.1.4506 $ sudo sockstat -4 -c -n -p 4506 USER COMMAND PID FD PROTO LOCAL ADDRESS FOREIGN ADDRESS root python2.7 1294 41 tcp 127.0.0.1.61115 127.0.0.1.4506 ''' port = int(port) remotes = set() try: cmd = salt.utils.args.shlex_split('sockstat -4 -c -n -p {0}'.format(port)) data = subprocess.check_output(cmd) # pylint: disable=minimum-python-version except subprocess.CalledProcessError as ex: log.error('Failed "sockstat" with returncode = %s', ex.returncode) raise lines = salt.utils.stringutils.to_str(data).split('\n') for line in lines: chunks = line.split() if not chunks: continue # ['root', 'python2.7', '1456', '37', 'tcp', # '127.0.0.1.4505-', '127.0.0.1.55703'] # print chunks if 'COMMAND' in chunks[1]: continue # ignore header if len(chunks) < 2: continue local = chunks[5].split('.') lport = local.pop() lhost = '.'.join(local) remote = chunks[6].split('.') rport = remote.pop() rhost = '.'.join(remote) if which_end == 'local' and int(lport) != port: # ignore if local port not port continue if which_end == 'remote' and int(rport) != port: # ignore if remote port not port continue remotes.add(rhost) return remotes def _openbsd_remotes_on(port, which_end): ''' OpenBSD specific helper function. Returns set of ipv4 host addresses of remote established connections on local or remote tcp port. Parses output of shell 'netstat' to get connections $ netstat -nf inet Active Internet connections Proto Recv-Q Send-Q Local Address Foreign Address (state) tcp 0 0 10.0.0.101.4505 10.0.0.1.45329 ESTABLISHED tcp 0 0 10.0.0.101.4505 10.0.0.100.50798 ESTABLISHED ''' remotes = set() try: data = subprocess.check_output(['netstat', '-nf', 'inet']) # pylint: disable=minimum-python-version except subprocess.CalledProcessError: log.error('Failed netstat') raise lines = data.split('\n') for line in lines: if 'ESTABLISHED' not in line: continue chunks = line.split() local_host, local_port = chunks[3].rsplit('.', 1) remote_host, remote_port = chunks[4].rsplit('.', 1) if which_end == 'remote_port' and int(remote_port) != port: continue if which_end == 'local_port' and int(local_port) != port: continue remotes.add(remote_host) return remotes def _windows_remotes_on(port, which_end): r''' Windows specific helper function. Returns set of ipv4 host addresses of remote established connections on local or remote tcp port. Parses output of shell 'netstat' to get connections C:\>netstat -n Active Connections Proto Local Address Foreign Address State TCP 10.2.33.17:3007 130.164.12.233:10123 ESTABLISHED TCP 10.2.33.17:3389 130.164.30.5:10378 ESTABLISHED ''' remotes = set() try: data = subprocess.check_output(['netstat', '-n']) # pylint: disable=minimum-python-version except subprocess.CalledProcessError: log.error('Failed netstat') raise lines = salt.utils.stringutils.to_str(data).split('\n') for line in lines: if 'ESTABLISHED' not in line: continue chunks = line.split() local_host, local_port = chunks[1].rsplit(':', 1) remote_host, remote_port = chunks[2].rsplit(':', 1) if which_end == 'remote_port' and int(remote_port) != port: continue if which_end == 'local_port' and int(local_port) != port: continue remotes.add(remote_host) return remotes def _linux_remotes_on(port, which_end): ''' Linux specific helper function. Returns set of ip host addresses of remote established connections on local tcp port port. Parses output of shell 'lsof' to get connections $ sudo lsof -iTCP:4505 -n COMMAND PID USER FD TYPE DEVICE SIZE/OFF NODE NAME Python 9971 root 35u IPv4 0x18a8464a29ca329d 0t0 TCP *:4505 (LISTEN) Python 9971 root 37u IPv4 0x18a8464a29b2b29d 0t0 TCP 127.0.0.1:4505->127.0.0.1:55703 (ESTABLISHED) Python 10152 root 22u IPv4 0x18a8464a29c8cab5 0t0 TCP 127.0.0.1:55703->127.0.0.1:4505 (ESTABLISHED) Python 10153 root 22u IPv4 0x18a8464a29c8cab5 0t0 TCP [fe80::249a]:4505->[fe80::150]:59367 (ESTABLISHED) ''' remotes = set() try: data = subprocess.check_output( ['lsof', '-iTCP:{0:d}'.format(port), '-n', '-P'] # pylint: disable=minimum-python-version ) except subprocess.CalledProcessError as ex: if ex.returncode == 1: # Lsof return 1 if any error was detected, including the failure # to locate Internet addresses, and it is not an error in this case. log.warning('"lsof" returncode = 1, likely no active TCP sessions.') return remotes log.error('Failed "lsof" with returncode = %s', ex.returncode) raise lines = salt.utils.stringutils.to_str(data).split('\n') for line in lines: chunks = line.split() if not chunks: continue # ['Python', '9971', 'root', '37u', 'IPv4', '0x18a8464a29b2b29d', '0t0', # 'TCP', '127.0.0.1:4505->127.0.0.1:55703', '(ESTABLISHED)'] # print chunks if 'COMMAND' in chunks[0]: continue # ignore header if 'ESTABLISHED' not in chunks[-1]: continue # ignore if not ESTABLISHED # '127.0.0.1:4505->127.0.0.1:55703' local, remote = chunks[8].split('->') _, lport = local.rsplit(':', 1) rhost, rport = remote.rsplit(':', 1) if which_end == 'remote_port' and int(rport) != port: continue if which_end == 'local_port' and int(lport) != port: continue remotes.add(rhost.strip("[]")) return remotes def _aix_remotes_on(port, which_end): ''' AIX specific helper function. Returns set of ipv4 host addresses of remote established connections on local or remote tcp port. Parses output of shell 'netstat' to get connections root@la68pp002_pub:/opt/salt/lib/python2.7/site-packages/salt/modules# netstat -f inet -n Active Internet connections Proto Recv-Q Send-Q Local Address Foreign Address (state) tcp4 0 0 172.29.149.95.50093 209.41.78.13.4505 ESTABLISHED tcp4 0 0 127.0.0.1.9514 *.* LISTEN tcp4 0 0 127.0.0.1.9515 *.* LISTEN tcp4 0 0 127.0.0.1.199 127.0.0.1.32779 ESTABLISHED tcp4 0 0 127.0.0.1.32779 127.0.0.1.199 ESTABLISHED tcp4 0 40 172.29.149.95.22 172.29.96.83.41022 ESTABLISHED tcp4 0 0 172.29.149.95.22 172.29.96.83.41032 ESTABLISHED tcp4 0 0 127.0.0.1.32771 127.0.0.1.32775 ESTABLISHED tcp 0 0 127.0.0.1.32775 127.0.0.1.32771 ESTABLISHED tcp4 0 0 127.0.0.1.32771 127.0.0.1.32776 ESTABLISHED tcp 0 0 127.0.0.1.32776 127.0.0.1.32771 ESTABLISHED tcp4 0 0 127.0.0.1.32771 127.0.0.1.32777 ESTABLISHED tcp 0 0 127.0.0.1.32777 127.0.0.1.32771 ESTABLISHED tcp4 0 0 127.0.0.1.32771 127.0.0.1.32778 ESTABLISHED tcp 0 0 127.0.0.1.32778 127.0.0.1.32771 ESTABLISHED ''' remotes = set() try: data = subprocess.check_output(['netstat', '-f', 'inet', '-n']) # pylint: disable=minimum-python-version except subprocess.CalledProcessError: log.error('Failed netstat') raise lines = salt.utils.stringutils.to_str(data).split('\n') for line in lines: if 'ESTABLISHED' not in line: continue chunks = line.split() local_host, local_port = chunks[3].rsplit('.', 1) remote_host, remote_port = chunks[4].rsplit('.', 1) if which_end == 'remote_port' and int(remote_port) != port: continue if which_end == 'local_port' and int(local_port) != port: continue remotes.add(remote_host) return remotes @jinja_filter('gen_mac') def gen_mac(prefix='AC:DE:48'): ''' Generates a MAC address with the defined OUI prefix. Common prefixes: - ``00:16:3E`` -- Xen - ``00:18:51`` -- OpenVZ - ``00:50:56`` -- VMware (manually generated) - ``52:54:00`` -- QEMU/KVM - ``AC:DE:48`` -- PRIVATE References: - http://standards.ieee.org/develop/regauth/oui/oui.txt - https://www.wireshark.org/tools/oui-lookup.html - https://en.wikipedia.org/wiki/MAC_address ''' return '{0}:{1:02X}:{2:02X}:{3:02X}'.format(prefix, random.randint(0, 0xff), random.randint(0, 0xff), random.randint(0, 0xff)) @jinja_filter('mac_str_to_bytes') def mac_str_to_bytes(mac_str): ''' Convert a MAC address string into bytes. Works with or without separators: b1 = mac_str_to_bytes('08:00:27:13:69:77') b2 = mac_str_to_bytes('080027136977') assert b1 == b2 assert isinstance(b1, bytes) ''' if len(mac_str) == 12: pass elif len(mac_str) == 17: sep = mac_str[2] mac_str = mac_str.replace(sep, '') else: raise ValueError('Invalid MAC address') chars = (int(mac_str[s:s+2], 16) for s in range(0, 12, 2)) return bytes(chars) if six.PY3 else b''.join(chr(x) for x in chars) def refresh_dns(): ''' issue #21397: force glibc to re-read resolv.conf ''' try: res_init() except NameError: # Exception raised loading the library, thus res_init is not defined pass @jinja_filter('connection_check') def connection_check(addr, port=80, safe=False, ipv6=None): ''' Provides a convenient alias for the dns_check filter. ''' return dns_check(addr, port, safe, ipv6) @jinja_filter('dns_check') def dns_check(addr, port=80, safe=False, ipv6=None, attempt_connect=True): ''' Return the ip resolved by dns, but do not exit on failure, only raise an exception. Obeys system preference for IPv4/6 address resolution - this can be overridden by the ipv6 flag. Tries to connect to the address before considering it useful. If no address can be reached, the first one resolved is used as a fallback. ''' error = False lookup = addr seen_ipv6 = False family = socket.AF_INET6 if ipv6 else socket.AF_INET if ipv6 is False else socket.AF_UNSPEC hostnames = [] try: refresh_dns() hostnames = socket.getaddrinfo(addr, port, family, socket.SOCK_STREAM) except TypeError: err = ('Attempt to resolve address \'{0}\' failed. Invalid or unresolveable address').format(lookup) raise SaltSystemExit(code=42, msg=err) except socket.error: error = True # If ipv6 is set to True, attempt another lookup using the IPv4 family, # just in case we're attempting to lookup an IPv4 IP # as an IPv6 hostname. if error and ipv6: try: refresh_dns() hostnames = socket.getaddrinfo(addr, port, socket.AF_INET, socket.SOCK_STREAM) except TypeError: err = ('Attempt to resolve address \'{0}\' failed. Invalid or unresolveable address').format(lookup) raise SaltSystemExit(code=42, msg=err) except socket.error: error = True try: if not hostnames: error = True else: resolved = False candidates = [] for h in hostnames: # Input is IP address, passed through unchanged, just return it if h[4][0] == addr: resolved = salt.utils.zeromq.ip_bracket(addr) break candidate_addr = salt.utils.zeromq.ip_bracket(h[4][0]) # sometimes /etc/hosts contains ::1 localhost if not ipv6 and candidate_addr == '[::1]': continue candidates.append(candidate_addr) if attempt_connect: try: s = socket.socket(h[0], socket.SOCK_STREAM) s.settimeout(2) s.connect((candidate_addr.strip('[]'), h[4][1])) s.close() resolved = candidate_addr break except socket.error: pass if not resolved: if candidates: resolved = candidates[0] else: error = True except TypeError: err = ('Attempt to resolve address \'{0}\' failed. Invalid or unresolveable address').format(lookup) raise SaltSystemExit(code=42, msg=err) except socket.error: error = True if error: err = ('DNS lookup or connection check of \'{0}\' failed.').format(addr) if safe: if salt.log.is_console_configured(): # If logging is not configured it also means that either # the master or minion instance calling this hasn't even # started running log.error(err) raise SaltClientError() raise SaltSystemExit(code=42, msg=err) return resolved def parse_host_port(host_port): """ Takes a string argument specifying host or host:port. Returns a (hostname, port) or (ip_address, port) tuple. If no port is given, the second (port) element of the returned tuple will be None. host:port argument, for example, is accepted in the forms of: - hostname - hostname:1234 - hostname.domain.tld - hostname.domain.tld:5678 - [1234::5]:5678 - 1234::5 - 10.11.12.13:4567 - 10.11.12.13 """ host, port = None, None # default _s_ = host_port[:] if _s_[0] == "[": if "]" in host_port: host, _s_ = _s_.lstrip("[").rsplit("]", 1) host = ipaddress.IPv6Address(host).compressed if _s_[0] == ":": port = int(_s_.lstrip(":")) else: if len(_s_) > 1: raise ValueError('found ambiguous "{}" port in "{}"'.format(_s_, host_port)) else: if _s_.count(":") == 1: host, _hostport_separator_, port = _s_.partition(":") try: port = int(port) except ValueError as _e_: log.error('host_port "%s" port value "%s" is not an integer.', host_port, port) raise _e_ else: host = _s_ try: if not isinstance(host, ipaddress._BaseAddress): host_ip = ipaddress.ip_address(host).compressed host = host_ip except ValueError: log.debug('"%s" Not an IP address? Assuming it is a hostname.', host) if host != sanitize_host(host): log.error('bad hostname: "%s"', host) raise ValueError('bad hostname: "{}"'.format(host)) return host, port def is_fqdn(hostname): """ Verify if hostname conforms to be a FQDN. :param hostname: text string with the name of the host :return: bool, True if hostname is correct FQDN, False otherwise """ compliant = re.compile(r"(?!-)[A-Z\d\-\_]{1,63}(?<!-)$", re.IGNORECASE) return "." in hostname and len(hostname) < 0xff and all(compliant.match(x) for x in hostname.rstrip(".").split("."))
saltstack/salt
salt/utils/network.py
network_size
python
def network_size(value, options=None, version=None): ''' Get the size of a network. ''' ipaddr_filter_out = _filter_ipaddr(value, options=options, version=version) if not ipaddr_filter_out: return if not isinstance(value, (list, tuple, types.GeneratorType)): return _network_size(ipaddr_filter_out[0]) return [ _network_size(ip_a) for ip_a in ipaddr_filter_out ]
Get the size of a network.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/network.py#L561-L573
[ "def _filter_ipaddr(value, options, version=None):\n ipaddr_filter_out = None\n if version:\n if version == 4:\n ipaddr_filter_out = ipv4(value, options)\n elif version == 6:\n ipaddr_filter_out = ipv6(value, options)\n else:\n ipaddr_filter_out = ipaddr(value, options)\n if not ipaddr_filter_out:\n return\n if not isinstance(ipaddr_filter_out, (list, tuple, types.GeneratorType)):\n ipaddr_filter_out = [ipaddr_filter_out]\n return ipaddr_filter_out\n", "def _network_size(ip_addr_entry):\n return ipaddress.ip_network(ip_addr_entry, strict=False).num_addresses\n" ]
# -*- coding: utf-8 -*- ''' Define some generic socket functions for network modules ''' # Import python libs from __future__ import absolute_import, unicode_literals, print_function import itertools import os import re import types import socket import logging import platform import random import subprocess from string import ascii_letters, digits # Import 3rd-party libs from salt.ext import six from salt.ext.six.moves import range # pylint: disable=import-error,redefined-builtin # Attempt to import wmi try: import wmi import salt.utils.winapi except ImportError: pass # Import salt libs import salt.utils.args import salt.utils.files import salt.utils.path import salt.utils.platform import salt.utils.stringutils import salt.utils.zeromq from salt._compat import ipaddress from salt.exceptions import SaltClientError, SaltSystemExit from salt.utils.decorators.jinja import jinja_filter from salt.utils.versions import LooseVersion # inet_pton does not exist in Windows, this is a workaround if salt.utils.platform.is_windows(): from salt.ext import win_inet_pton # pylint: disable=unused-import log = logging.getLogger(__name__) try: import ctypes import ctypes.util libc = ctypes.cdll.LoadLibrary(ctypes.util.find_library("c")) res_init = libc.__res_init except (ImportError, OSError, AttributeError, TypeError): pass # pylint: disable=C0103 def sanitize_host(host): ''' Sanitize host string. https://tools.ietf.org/html/rfc1123#section-2.1 ''' RFC952_characters = ascii_letters + digits + ".-" return "".join([c for c in host[0:255] if c in RFC952_characters]) def isportopen(host, port): ''' Return status of a port ''' if not 1 <= int(port) <= 65535: return False sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) out = sock.connect_ex((sanitize_host(host), int(port))) return out def host_to_ips(host): ''' Returns a list of IP addresses of a given hostname or None if not found. ''' ips = [] try: for family, socktype, proto, canonname, sockaddr in socket.getaddrinfo( host, 0, socket.AF_UNSPEC, socket.SOCK_STREAM): if family == socket.AF_INET: ip, port = sockaddr elif family == socket.AF_INET6: ip, port, flow_info, scope_id = sockaddr ips.append(ip) if not ips: ips = None except Exception: ips = None return ips def _generate_minion_id(): ''' Get list of possible host names and convention names. :return: ''' # There are three types of hostnames: # 1. Network names. How host is accessed from the network. # 2. Host aliases. They might be not available in all the network or only locally (/etc/hosts) # 3. Convention names, an internal nodename. class DistinctList(list): ''' List, which allows one to append only distinct objects. Needs to work on Python 2.6, because of collections.OrderedDict only since 2.7 version. Override 'filter()' for custom filtering. ''' localhost_matchers = [r'localhost.*', r'ip6-.*', r'127[.]\d', r'0\.0\.0\.0', r'::1.*', r'ipv6-.*', r'fe00::.*', r'fe02::.*', r'1.0.0.*.ip6.arpa'] def append(self, p_object): if p_object and p_object not in self and not self.filter(p_object): super(DistinctList, self).append(p_object) return self def extend(self, iterable): for obj in iterable: self.append(obj) return self def filter(self, element): 'Returns True if element needs to be filtered' for rgx in self.localhost_matchers: if re.match(rgx, element): return True def first(self): return self and self[0] or None hostname = socket.gethostname() hosts = DistinctList().append( salt.utils.stringutils.to_unicode(socket.getfqdn(salt.utils.stringutils.to_bytes(hostname))) ).append(platform.node()).append(hostname) if not hosts: try: for a_nfo in socket.getaddrinfo(hosts.first() or 'localhost', None, socket.AF_INET, socket.SOCK_RAW, socket.IPPROTO_IP, socket.AI_CANONNAME): if len(a_nfo) > 3: hosts.append(a_nfo[3]) except socket.gaierror: log.warning('Cannot resolve address %s info via socket: %s', hosts.first() or 'localhost (N/A)', socket.gaierror) # Universal method for everywhere (Linux, Slowlaris, Windows etc) for f_name in ('/etc/hostname', '/etc/nodename', '/etc/hosts', r'{win}\system32\drivers\etc\hosts'.format(win=os.getenv('WINDIR'))): try: with salt.utils.files.fopen(f_name) as f_hdl: for line in f_hdl: line = salt.utils.stringutils.to_unicode(line) hst = line.strip().split('#')[0].strip().split() if hst: if hst[0][:4] in ('127.', '::1') or len(hst) == 1: hosts.extend(hst) except IOError: pass # include public and private ipaddresses return hosts.extend([addr for addr in ip_addrs() if not ipaddress.ip_address(addr).is_loopback]) def generate_minion_id(): ''' Return only first element of the hostname from all possible list. :return: ''' try: ret = salt.utils.stringutils.to_unicode(_generate_minion_id().first()) except TypeError: ret = None return ret or 'localhost' def get_socket(addr, type=socket.SOCK_STREAM, proto=0): ''' Return a socket object for the addr IP-version agnostic ''' version = ipaddress.ip_address(addr).version if version == 4: family = socket.AF_INET elif version == 6: family = socket.AF_INET6 return socket.socket(family, type, proto) def get_fqhostname(): ''' Returns the fully qualified hostname ''' # try getaddrinfo() fqdn = None try: addrinfo = socket.getaddrinfo( socket.gethostname(), 0, socket.AF_UNSPEC, socket.SOCK_STREAM, socket.SOL_TCP, socket.AI_CANONNAME ) for info in addrinfo: # info struct [family, socktype, proto, canonname, sockaddr] # On Windows `canonname` can be an empty string # This can cause the function to return `None` if len(info) > 3 and info[3]: fqdn = info[3] break except socket.gaierror: pass # NOTE: this used to log.error() but it was later disabled except socket.error as err: log.debug('socket.getaddrinfo() failure while finding fqdn: %s', err) if fqdn is None: fqdn = socket.getfqdn() return fqdn def ip_to_host(ip): ''' Returns the hostname of a given IP ''' try: hostname, aliaslist, ipaddrlist = socket.gethostbyaddr(ip) except Exception as exc: log.debug('salt.utils.network.ip_to_host(%r) failed: %s', ip, exc) hostname = None return hostname # pylint: enable=C0103 def is_reachable_host(entity_name): ''' Returns a bool telling if the entity name is a reachable host (IPv4/IPv6/FQDN/etc). :param hostname: :return: ''' try: assert type(socket.getaddrinfo(entity_name, 0, 0, 0, 0)) == list ret = True except socket.gaierror: ret = False return ret def is_ip(ip): ''' Returns a bool telling if the passed IP is a valid IPv4 or IPv6 address. ''' return is_ipv4(ip) or is_ipv6(ip) def is_ipv4(ip): ''' Returns a bool telling if the value passed to it was a valid IPv4 address ''' try: return ipaddress.ip_address(ip).version == 4 except ValueError: return False def is_ipv6(ip): ''' Returns a bool telling if the value passed to it was a valid IPv6 address ''' try: return ipaddress.ip_address(ip).version == 6 except ValueError: return False def is_subnet(cidr): ''' Returns a bool telling if the passed string is an IPv4 or IPv6 subnet ''' return is_ipv4_subnet(cidr) or is_ipv6_subnet(cidr) def is_ipv4_subnet(cidr): ''' Returns a bool telling if the passed string is an IPv4 subnet ''' try: return '/' in cidr and bool(ipaddress.IPv4Network(cidr)) except Exception: return False def is_ipv6_subnet(cidr): ''' Returns a bool telling if the passed string is an IPv6 subnet ''' try: return '/' in cidr and bool(ipaddress.IPv6Network(cidr)) except Exception: return False @jinja_filter('is_ip') def is_ip_filter(ip, options=None): ''' Returns a bool telling if the passed IP is a valid IPv4 or IPv6 address. ''' return is_ipv4_filter(ip, options=options) or is_ipv6_filter(ip, options=options) def _ip_options_global(ip_obj, version): return not ip_obj.is_private def _ip_options_multicast(ip_obj, version): return ip_obj.is_multicast def _ip_options_loopback(ip_obj, version): return ip_obj.is_loopback def _ip_options_link_local(ip_obj, version): return ip_obj.is_link_local def _ip_options_private(ip_obj, version): return ip_obj.is_private def _ip_options_reserved(ip_obj, version): return ip_obj.is_reserved def _ip_options_site_local(ip_obj, version): if version == 6: return ip_obj.is_site_local return False def _ip_options_unspecified(ip_obj, version): return ip_obj.is_unspecified def _ip_options(ip_obj, version, options=None): # will process and IP options options_fun_map = { 'global': _ip_options_global, 'link-local': _ip_options_link_local, 'linklocal': _ip_options_link_local, 'll': _ip_options_link_local, 'link_local': _ip_options_link_local, 'loopback': _ip_options_loopback, 'lo': _ip_options_loopback, 'multicast': _ip_options_multicast, 'private': _ip_options_private, 'public': _ip_options_global, 'reserved': _ip_options_reserved, 'site-local': _ip_options_site_local, 'sl': _ip_options_site_local, 'site_local': _ip_options_site_local, 'unspecified': _ip_options_unspecified } if not options: return six.text_type(ip_obj) # IP version already checked options_list = [option.strip() for option in options.split(',')] for option, fun in options_fun_map.items(): if option in options_list: fun_res = fun(ip_obj, version) if not fun_res: return None # stop at first failed test # else continue return six.text_type(ip_obj) def _is_ipv(ip, version, options=None): if not version: version = 4 if version not in (4, 6): return None try: ip_obj = ipaddress.ip_address(ip) except ValueError: # maybe it is an IP network try: ip_obj = ipaddress.ip_interface(ip) except ValueError: # nope, still not :( return None if not ip_obj.version == version: return None # has the right version, let's move on return _ip_options(ip_obj, version, options=options) @jinja_filter('is_ipv4') def is_ipv4_filter(ip, options=None): ''' Returns a bool telling if the value passed to it was a valid IPv4 address. ip The IP address. net: False Consider IP addresses followed by netmask. options CSV of options regarding the nature of the IP address. E.g.: loopback, multicast, private etc. ''' _is_ipv4 = _is_ipv(ip, 4, options=options) return isinstance(_is_ipv4, six.string_types) @jinja_filter('is_ipv6') def is_ipv6_filter(ip, options=None): ''' Returns a bool telling if the value passed to it was a valid IPv6 address. ip The IP address. net: False Consider IP addresses followed by netmask. options CSV of options regarding the nature of the IP address. E.g.: loopback, multicast, private etc. ''' _is_ipv6 = _is_ipv(ip, 6, options=options) return isinstance(_is_ipv6, six.string_types) def _ipv_filter(value, version, options=None): if version not in (4, 6): return if isinstance(value, (six.string_types, six.text_type, six.binary_type)): return _is_ipv(value, version, options=options) # calls is_ipv4 or is_ipv6 for `value` elif isinstance(value, (list, tuple, types.GeneratorType)): # calls is_ipv4 or is_ipv6 for each element in the list # os it filters and returns only those elements having the desired IP version return [ _is_ipv(addr, version, options=options) for addr in value if _is_ipv(addr, version, options=options) is not None ] return None @jinja_filter('ipv4') def ipv4(value, options=None): ''' Filters a list and returns IPv4 values only. ''' return _ipv_filter(value, 4, options=options) @jinja_filter('ipv6') def ipv6(value, options=None): ''' Filters a list and returns IPv6 values only. ''' return _ipv_filter(value, 6, options=options) @jinja_filter('ipaddr') def ipaddr(value, options=None): ''' Filters and returns only valid IP objects. ''' ipv4_obj = ipv4(value, options=options) ipv6_obj = ipv6(value, options=options) if ipv4_obj is None or ipv6_obj is None: # an IP address can be either IPv4 either IPv6 # therefofe if the value passed as arg is not a list, at least one of the calls above will return None # if one of them is none, means that we should return only one of them return ipv4_obj or ipv6_obj # one of them else: return ipv4_obj + ipv6_obj # extend lists def _filter_ipaddr(value, options, version=None): ipaddr_filter_out = None if version: if version == 4: ipaddr_filter_out = ipv4(value, options) elif version == 6: ipaddr_filter_out = ipv6(value, options) else: ipaddr_filter_out = ipaddr(value, options) if not ipaddr_filter_out: return if not isinstance(ipaddr_filter_out, (list, tuple, types.GeneratorType)): ipaddr_filter_out = [ipaddr_filter_out] return ipaddr_filter_out @jinja_filter('ip_host') def ip_host(value, options=None, version=None): ''' Returns the interfaces IP address, e.g.: 192.168.0.1/28. ''' ipaddr_filter_out = _filter_ipaddr(value, options=options, version=version) if not ipaddr_filter_out: return if not isinstance(value, (list, tuple, types.GeneratorType)): return six.text_type(ipaddress.ip_interface(ipaddr_filter_out[0])) return [six.text_type(ipaddress.ip_interface(ip_a)) for ip_a in ipaddr_filter_out] def _network_hosts(ip_addr_entry): return [ six.text_type(host) for host in ipaddress.ip_network(ip_addr_entry, strict=False).hosts() ] @jinja_filter('network_hosts') def network_hosts(value, options=None, version=None): ''' Return the list of hosts within a network. .. note:: When running this command with a large IPv6 network, the command will take a long time to gather all of the hosts. ''' ipaddr_filter_out = _filter_ipaddr(value, options=options, version=version) if not ipaddr_filter_out: return if not isinstance(value, (list, tuple, types.GeneratorType)): return _network_hosts(ipaddr_filter_out[0]) return [ _network_hosts(ip_a) for ip_a in ipaddr_filter_out ] def _network_size(ip_addr_entry): return ipaddress.ip_network(ip_addr_entry, strict=False).num_addresses @jinja_filter('network_size') def natural_ipv4_netmask(ip, fmt='prefixlen'): ''' Returns the "natural" mask of an IPv4 address ''' bits = _ipv4_to_bits(ip) if bits.startswith('11'): mask = '24' elif bits.startswith('1'): mask = '16' else: mask = '8' if fmt == 'netmask': return cidr_to_ipv4_netmask(mask) else: return '/' + mask def rpad_ipv4_network(ip): ''' Returns an IP network address padded with zeros. Ex: '192.168.3' -> '192.168.3.0' '10.209' -> '10.209.0.0' ''' return '.'.join(itertools.islice(itertools.chain(ip.split('.'), '0000'), 0, 4)) def cidr_to_ipv4_netmask(cidr_bits): ''' Returns an IPv4 netmask ''' try: cidr_bits = int(cidr_bits) if not 1 <= cidr_bits <= 32: return '' except ValueError: return '' netmask = '' for idx in range(4): if idx: netmask += '.' if cidr_bits >= 8: netmask += '255' cidr_bits -= 8 else: netmask += '{0:d}'.format(256 - (2 ** (8 - cidr_bits))) cidr_bits = 0 return netmask def _number_of_set_bits_to_ipv4_netmask(set_bits): # pylint: disable=C0103 ''' Returns an IPv4 netmask from the integer representation of that mask. Ex. 0xffffff00 -> '255.255.255.0' ''' return cidr_to_ipv4_netmask(_number_of_set_bits(set_bits)) # pylint: disable=C0103 def _number_of_set_bits(x): ''' Returns the number of bits that are set in a 32bit int ''' # Taken from http://stackoverflow.com/a/4912729. Many thanks! x -= (x >> 1) & 0x55555555 x = ((x >> 2) & 0x33333333) + (x & 0x33333333) x = ((x >> 4) + x) & 0x0f0f0f0f x += x >> 8 x += x >> 16 return x & 0x0000003f # pylint: enable=C0103 def _interfaces_ip(out): ''' Uses ip to return a dictionary of interfaces with various information about each (up/down state, ip address, netmask, and hwaddr) ''' ret = dict() def parse_network(value, cols): ''' Return a tuple of ip, netmask, broadcast based on the current set of cols ''' brd = None scope = None if '/' in value: # we have a CIDR in this address ip, cidr = value.split('/') # pylint: disable=C0103 else: ip = value # pylint: disable=C0103 cidr = 32 if type_ == 'inet': mask = cidr_to_ipv4_netmask(int(cidr)) if 'brd' in cols: brd = cols[cols.index('brd') + 1] elif type_ == 'inet6': mask = cidr if 'scope' in cols: scope = cols[cols.index('scope') + 1] return (ip, mask, brd, scope) groups = re.compile('\r?\n\\d').split(out) for group in groups: iface = None data = dict() for line in group.splitlines(): if ' ' not in line: continue match = re.match(r'^\d*:\s+([\w.\-]+)(?:@)?([\w.\-]+)?:\s+<(.+)>', line) if match: iface, parent, attrs = match.groups() if 'UP' in attrs.split(','): data['up'] = True else: data['up'] = False if parent: data['parent'] = parent continue cols = line.split() if len(cols) >= 2: type_, value = tuple(cols[0:2]) iflabel = cols[-1:][0] if type_ in ('inet', 'inet6'): if 'secondary' not in cols: ipaddr, netmask, broadcast, scope = parse_network(value, cols) if type_ == 'inet': if 'inet' not in data: data['inet'] = list() addr_obj = dict() addr_obj['address'] = ipaddr addr_obj['netmask'] = netmask addr_obj['broadcast'] = broadcast addr_obj['label'] = iflabel data['inet'].append(addr_obj) elif type_ == 'inet6': if 'inet6' not in data: data['inet6'] = list() addr_obj = dict() addr_obj['address'] = ipaddr addr_obj['prefixlen'] = netmask addr_obj['scope'] = scope data['inet6'].append(addr_obj) else: if 'secondary' not in data: data['secondary'] = list() ip_, mask, brd, scp = parse_network(value, cols) data['secondary'].append({ 'type': type_, 'address': ip_, 'netmask': mask, 'broadcast': brd, 'label': iflabel, }) del ip_, mask, brd, scp elif type_.startswith('link'): data['hwaddr'] = value if iface: ret[iface] = data del iface, data return ret def _interfaces_ifconfig(out): ''' Uses ifconfig to return a dictionary of interfaces with various information about each (up/down state, ip address, netmask, and hwaddr) ''' ret = dict() piface = re.compile(r'^([^\s:]+)') pmac = re.compile('.*?(?:HWaddr|ether|address:|lladdr) ([0-9a-fA-F:]+)') if salt.utils.platform.is_sunos(): pip = re.compile(r'.*?(?:inet\s+)([0-9]+\.[0-9]+\.[0-9]+\.[0-9]+)(.*)') pip6 = re.compile('.*?(?:inet6 )([0-9a-fA-F:]+)') pmask6 = re.compile(r'.*?(?:inet6 [0-9a-fA-F:]+/(\d+)).*') else: pip = re.compile(r'.*?(?:inet addr:|inet [^\d]*)(.*?)\s') pip6 = re.compile('.*?(?:inet6 addr: (.*?)/|inet6 )([0-9a-fA-F:]+)') pmask6 = re.compile(r'.*?(?:inet6 addr: [0-9a-fA-F:]+/(\d+)|prefixlen (\d+))(?: Scope:([a-zA-Z]+)| scopeid (0x[0-9a-fA-F]))?') pmask = re.compile(r'.*?(?:Mask:|netmask )(?:((?:0x)?[0-9a-fA-F]{8})|([\d\.]+))') pupdown = re.compile('UP') pbcast = re.compile(r'.*?(?:Bcast:|broadcast )([\d\.]+)') groups = re.compile('\r?\n(?=\\S)').split(out) for group in groups: data = dict() iface = '' updown = False for line in group.splitlines(): miface = piface.match(line) mmac = pmac.match(line) mip = pip.match(line) mip6 = pip6.match(line) mupdown = pupdown.search(line) if miface: iface = miface.group(1) if mmac: data['hwaddr'] = mmac.group(1) if salt.utils.platform.is_sunos(): expand_mac = [] for chunk in data['hwaddr'].split(':'): expand_mac.append('0{0}'.format(chunk) if len(chunk) < 2 else '{0}'.format(chunk)) data['hwaddr'] = ':'.join(expand_mac) if mip: if 'inet' not in data: data['inet'] = list() addr_obj = dict() addr_obj['address'] = mip.group(1) mmask = pmask.match(line) if mmask: if mmask.group(1): mmask = _number_of_set_bits_to_ipv4_netmask( int(mmask.group(1), 16)) else: mmask = mmask.group(2) addr_obj['netmask'] = mmask mbcast = pbcast.match(line) if mbcast: addr_obj['broadcast'] = mbcast.group(1) data['inet'].append(addr_obj) if mupdown: updown = True if mip6: if 'inet6' not in data: data['inet6'] = list() addr_obj = dict() addr_obj['address'] = mip6.group(1) or mip6.group(2) mmask6 = pmask6.match(line) if mmask6: addr_obj['prefixlen'] = mmask6.group(1) or mmask6.group(2) if not salt.utils.platform.is_sunos(): ipv6scope = mmask6.group(3) or mmask6.group(4) addr_obj['scope'] = ipv6scope.lower() if ipv6scope is not None else ipv6scope # SunOS sometimes has ::/0 as inet6 addr when using addrconf if not salt.utils.platform.is_sunos() \ or addr_obj['address'] != '::' \ and addr_obj['prefixlen'] != 0: data['inet6'].append(addr_obj) data['up'] = updown if iface in ret: # SunOS optimization, where interfaces occur twice in 'ifconfig -a' # output with the same name: for ipv4 and then for ipv6 addr family. # Every instance has it's own 'UP' status and we assume that ipv4 # status determines global interface status. # # merge items with higher priority for older values # after that merge the inet and inet6 sub items for both ret[iface] = dict(list(data.items()) + list(ret[iface].items())) if 'inet' in data: ret[iface]['inet'].extend(x for x in data['inet'] if x not in ret[iface]['inet']) if 'inet6' in data: ret[iface]['inet6'].extend(x for x in data['inet6'] if x not in ret[iface]['inet6']) else: ret[iface] = data del data return ret def linux_interfaces(): ''' Obtain interface information for *NIX/BSD variants ''' ifaces = dict() ip_path = salt.utils.path.which('ip') ifconfig_path = None if ip_path else salt.utils.path.which('ifconfig') if ip_path: cmd1 = subprocess.Popen( '{0} link show'.format(ip_path), shell=True, close_fds=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate()[0] cmd2 = subprocess.Popen( '{0} addr show'.format(ip_path), shell=True, close_fds=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate()[0] ifaces = _interfaces_ip("{0}\n{1}".format( salt.utils.stringutils.to_str(cmd1), salt.utils.stringutils.to_str(cmd2))) elif ifconfig_path: cmd = subprocess.Popen( '{0} -a'.format(ifconfig_path), shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate()[0] ifaces = _interfaces_ifconfig(salt.utils.stringutils.to_str(cmd)) return ifaces def _netbsd_interfaces_ifconfig(out): ''' Uses ifconfig to return a dictionary of interfaces with various information about each (up/down state, ip address, netmask, and hwaddr) ''' ret = dict() piface = re.compile(r'^([^\s:]+)') pmac = re.compile('.*?address: ([0-9a-f:]+)') pip = re.compile(r'.*?inet [^\d]*(.*?)/([\d]*)\s') pip6 = re.compile(r'.*?inet6 ([0-9a-f:]+)%([a-zA-Z0-9]*)/([\d]*)\s') pupdown = re.compile('UP') pbcast = re.compile(r'.*?broadcast ([\d\.]+)') groups = re.compile('\r?\n(?=\\S)').split(out) for group in groups: data = dict() iface = '' updown = False for line in group.splitlines(): miface = piface.match(line) mmac = pmac.match(line) mip = pip.match(line) mip6 = pip6.match(line) mupdown = pupdown.search(line) if miface: iface = miface.group(1) if mmac: data['hwaddr'] = mmac.group(1) if mip: if 'inet' not in data: data['inet'] = list() addr_obj = dict() addr_obj['address'] = mip.group(1) mmask = mip.group(2) if mip.group(2): addr_obj['netmask'] = cidr_to_ipv4_netmask(mip.group(2)) mbcast = pbcast.match(line) if mbcast: addr_obj['broadcast'] = mbcast.group(1) data['inet'].append(addr_obj) if mupdown: updown = True if mip6: if 'inet6' not in data: data['inet6'] = list() addr_obj = dict() addr_obj['address'] = mip6.group(1) mmask6 = mip6.group(3) addr_obj['scope'] = mip6.group(2) addr_obj['prefixlen'] = mip6.group(3) data['inet6'].append(addr_obj) data['up'] = updown ret[iface] = data del data return ret def netbsd_interfaces(): ''' Obtain interface information for NetBSD >= 8 where the ifconfig output diverged from other BSD variants (Netmask is now part of the address) ''' # NetBSD versions prior to 8.0 can still use linux_interfaces() if LooseVersion(os.uname()[2]) < LooseVersion('8.0'): return linux_interfaces() ifconfig_path = salt.utils.path.which('ifconfig') cmd = subprocess.Popen( '{0} -a'.format(ifconfig_path), shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate()[0] return _netbsd_interfaces_ifconfig(salt.utils.stringutils.to_str(cmd)) def _interfaces_ipconfig(out): ''' Returns a dictionary of interfaces with various information about each (up/down state, ip address, netmask, and hwaddr) NOTE: This is not used by any function and may be able to be removed in the future. ''' ifaces = dict() iface = None adapter_iface_regex = re.compile(r'adapter (\S.+):$') for line in out.splitlines(): if not line: continue # TODO what does Windows call Infiniband and 10/40gige adapters if line.startswith('Ethernet'): iface = ifaces[adapter_iface_regex.search(line).group(1)] iface['up'] = True addr = None continue if iface: key, val = line.split(',', 1) key = key.strip(' .') val = val.strip() if addr and key == 'Subnet Mask': addr['netmask'] = val elif key in ('IP Address', 'IPv4 Address'): if 'inet' not in iface: iface['inet'] = list() addr = {'address': val.rstrip('(Preferred)'), 'netmask': None, 'broadcast': None} # TODO find the broadcast iface['inet'].append(addr) elif 'IPv6 Address' in key: if 'inet6' not in iface: iface['inet'] = list() # XXX What is the prefixlen!? addr = {'address': val.rstrip('(Preferred)'), 'prefixlen': None} iface['inet6'].append(addr) elif key == 'Physical Address': iface['hwaddr'] = val elif key == 'Media State': # XXX seen used for tunnel adaptors # might be useful iface['up'] = (val != 'Media disconnected') def win_interfaces(): ''' Obtain interface information for Windows systems ''' with salt.utils.winapi.Com(): c = wmi.WMI() ifaces = {} for iface in c.Win32_NetworkAdapterConfiguration(IPEnabled=1): ifaces[iface.Description] = dict() if iface.MACAddress: ifaces[iface.Description]['hwaddr'] = iface.MACAddress if iface.IPEnabled: ifaces[iface.Description]['up'] = True for ip in iface.IPAddress: if '.' in ip: if 'inet' not in ifaces[iface.Description]: ifaces[iface.Description]['inet'] = [] item = {'address': ip, 'label': iface.Description} if iface.DefaultIPGateway: broadcast = next((i for i in iface.DefaultIPGateway if '.' in i), '') if broadcast: item['broadcast'] = broadcast if iface.IPSubnet: netmask = next((i for i in iface.IPSubnet if '.' in i), '') if netmask: item['netmask'] = netmask ifaces[iface.Description]['inet'].append(item) if ':' in ip: if 'inet6' not in ifaces[iface.Description]: ifaces[iface.Description]['inet6'] = [] item = {'address': ip} if iface.DefaultIPGateway: broadcast = next((i for i in iface.DefaultIPGateway if ':' in i), '') if broadcast: item['broadcast'] = broadcast if iface.IPSubnet: netmask = next((i for i in iface.IPSubnet if ':' in i), '') if netmask: item['netmask'] = netmask ifaces[iface.Description]['inet6'].append(item) else: ifaces[iface.Description]['up'] = False return ifaces def interfaces(): ''' Return a dictionary of information about all the interfaces on the minion ''' if salt.utils.platform.is_windows(): return win_interfaces() elif salt.utils.platform.is_netbsd(): return netbsd_interfaces() else: return linux_interfaces() def get_net_start(ipaddr, netmask): ''' Return the address of the network ''' net = ipaddress.ip_network('{0}/{1}'.format(ipaddr, netmask), strict=False) return six.text_type(net.network_address) def get_net_size(mask): ''' Turns an IPv4 netmask into it's corresponding prefix length (255.255.255.0 -> 24 as in 192.168.1.10/24). ''' binary_str = '' for octet in mask.split('.'): binary_str += bin(int(octet))[2:].zfill(8) return len(binary_str.rstrip('0')) def calc_net(ipaddr, netmask=None): ''' Takes IP (CIDR notation supported) and optionally netmask and returns the network in CIDR-notation. (The IP can be any IP inside the subnet) ''' if netmask is not None: ipaddr = '{0}/{1}'.format(ipaddr, netmask) return six.text_type(ipaddress.ip_network(ipaddr, strict=False)) def _ipv4_to_bits(ipaddr): ''' Accepts an IPv4 dotted quad and returns a string representing its binary counterpart ''' return ''.join([bin(int(x))[2:].rjust(8, '0') for x in ipaddr.split('.')]) def _get_iface_info(iface): ''' If `iface` is available, return interface info and no error, otherwise return no info and log and return an error ''' iface_info = interfaces() if iface in iface_info.keys(): return iface_info, False else: error_msg = ('Interface "{0}" not in available interfaces: "{1}"' ''.format(iface, '", "'.join(iface_info.keys()))) log.error(error_msg) return None, error_msg def _hw_addr_aix(iface): ''' Return the hardware address (a.k.a. MAC address) for a given interface on AIX MAC address not available in through interfaces ''' cmd = subprocess.Popen( 'entstat -d {0} | grep \'Hardware Address\''.format(iface), shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate()[0] if cmd: comps = cmd.split(' ') if len(comps) == 3: mac_addr = comps[2].strip('\'').strip() return mac_addr error_msg = ('Interface "{0}" either not available or does not contain a hardware address'.format(iface)) log.error(error_msg) return error_msg def hw_addr(iface): ''' Return the hardware address (a.k.a. MAC address) for a given interface .. versionchanged:: 2016.11.4 Added support for AIX ''' if salt.utils.platform.is_aix(): return _hw_addr_aix iface_info, error = _get_iface_info(iface) if error is False: return iface_info.get(iface, {}).get('hwaddr', '') else: return error def interface(iface): ''' Return the details of `iface` or an error if it does not exist ''' iface_info, error = _get_iface_info(iface) if error is False: return iface_info.get(iface, {}).get('inet', '') else: return error def interface_ip(iface): ''' Return `iface` IPv4 addr or an error if `iface` does not exist ''' iface_info, error = _get_iface_info(iface) if error is False: inet = iface_info.get(iface, {}).get('inet', None) return inet[0].get('address', '') if inet else '' else: return error def _subnets(proto='inet', interfaces_=None): ''' Returns a list of subnets to which the host belongs ''' if interfaces_ is None: ifaces = interfaces() elif isinstance(interfaces_, list): ifaces = {} for key, value in six.iteritems(interfaces()): if key in interfaces_: ifaces[key] = value else: ifaces = {interfaces_: interfaces().get(interfaces_, {})} ret = set() if proto == 'inet': subnet = 'netmask' dflt_cidr = 32 elif proto == 'inet6': subnet = 'prefixlen' dflt_cidr = 128 else: log.error('Invalid proto %s calling subnets()', proto) return for ip_info in six.itervalues(ifaces): addrs = ip_info.get(proto, []) addrs.extend([addr for addr in ip_info.get('secondary', []) if addr.get('type') == proto]) for intf in addrs: if subnet in intf: intf = ipaddress.ip_interface('{0}/{1}'.format(intf['address'], intf[subnet])) else: intf = ipaddress.ip_interface('{0}/{1}'.format(intf['address'], dflt_cidr)) if not intf.is_loopback: ret.add(intf.network) return [six.text_type(net) for net in sorted(ret)] def subnets(interfaces=None): ''' Returns a list of IPv4 subnets to which the host belongs ''' return _subnets('inet', interfaces_=interfaces) def subnets6(): ''' Returns a list of IPv6 subnets to which the host belongs ''' return _subnets('inet6') def in_subnet(cidr, addr=None): ''' Returns True if host or (any of) addrs is within specified subnet, otherwise False ''' try: cidr = ipaddress.ip_network(cidr) except ValueError: log.error('Invalid CIDR \'%s\'', cidr) return False if addr is None: addr = ip_addrs() addr.extend(ip_addrs6()) elif not isinstance(addr, (list, tuple)): addr = (addr,) return any(ipaddress.ip_address(item) in cidr for item in addr) def _ip_addrs(interface=None, include_loopback=False, interface_data=None, proto='inet'): ''' Return the full list of IP adresses matching the criteria proto = inet|inet6 ''' ret = set() ifaces = interface_data \ if isinstance(interface_data, dict) \ else interfaces() if interface is None: target_ifaces = ifaces else: target_ifaces = dict([(k, v) for k, v in six.iteritems(ifaces) if k == interface]) if not target_ifaces: log.error('Interface %s not found.', interface) for ip_info in six.itervalues(target_ifaces): addrs = ip_info.get(proto, []) addrs.extend([addr for addr in ip_info.get('secondary', []) if addr.get('type') == proto]) for addr in addrs: addr = ipaddress.ip_address(addr.get('address')) if not addr.is_loopback or include_loopback: ret.add(addr) return [six.text_type(addr) for addr in sorted(ret)] def ip_addrs(interface=None, include_loopback=False, interface_data=None): ''' Returns a list of IPv4 addresses assigned to the host. 127.0.0.1 is ignored, unless 'include_loopback=True' is indicated. If 'interface' is provided, then only IP addresses from that interface will be returned. ''' return _ip_addrs(interface, include_loopback, interface_data, 'inet') def ip_addrs6(interface=None, include_loopback=False, interface_data=None): ''' Returns a list of IPv6 addresses assigned to the host. ::1 is ignored, unless 'include_loopback=True' is indicated. If 'interface' is provided, then only IP addresses from that interface will be returned. ''' return _ip_addrs(interface, include_loopback, interface_data, 'inet6') def hex2ip(hex_ip, invert=False): ''' Convert a hex string to an ip, if a failure occurs the original hex is returned. If 'invert=True' assume that ip from /proc/net/<proto> ''' if len(hex_ip) == 32: # ipv6 ip = [] for i in range(0, 32, 8): ip_part = hex_ip[i:i + 8] ip_part = [ip_part[x:x + 2] for x in range(0, 8, 2)] if invert: ip.append("{0[3]}{0[2]}:{0[1]}{0[0]}".format(ip_part)) else: ip.append("{0[0]}{0[1]}:{0[2]}{0[3]}".format(ip_part)) try: address = ipaddress.IPv6Address(":".join(ip)) if address.ipv4_mapped: return str(address.ipv4_mapped) else: return address.compressed except ipaddress.AddressValueError as ex: log.error('hex2ip - ipv6 address error: %s', ex) return hex_ip try: hip = int(hex_ip, 16) except ValueError: return hex_ip if invert: return '{3}.{2}.{1}.{0}'.format(hip >> 24 & 255, hip >> 16 & 255, hip >> 8 & 255, hip & 255) return '{0}.{1}.{2}.{3}'.format(hip >> 24 & 255, hip >> 16 & 255, hip >> 8 & 255, hip & 255) def mac2eui64(mac, prefix=None): ''' Convert a MAC address to a EUI64 identifier or, with prefix provided, a full IPv6 address ''' # http://tools.ietf.org/html/rfc4291#section-2.5.1 eui64 = re.sub(r'[.:-]', '', mac).lower() eui64 = eui64[0:6] + 'fffe' + eui64[6:] eui64 = hex(int(eui64[0:2], 16) | 2)[2:].zfill(2) + eui64[2:] if prefix is None: return ':'.join(re.findall(r'.{4}', eui64)) else: try: net = ipaddress.ip_network(prefix, strict=False) euil = int('0x{0}'.format(eui64), 16) return '{0}/{1}'.format(net[euil], net.prefixlen) except Exception: return def active_tcp(): ''' Return a dict describing all active tcp connections as quickly as possible ''' ret = {} for statf in ['/proc/net/tcp', '/proc/net/tcp6']: if os.path.isfile(statf): with salt.utils.files.fopen(statf, 'rb') as fp_: for line in fp_: line = salt.utils.stringutils.to_unicode(line) if line.strip().startswith('sl'): continue iret = _parse_tcp_line(line) sl = next(iter(iret)) if iret[sl]['state'] == 1: # 1 is ESTABLISHED del iret[sl]['state'] ret[len(ret)] = iret[sl] return ret def local_port_tcp(port): ''' Return a set of remote ip addrs attached to the specified local port ''' ret = _remotes_on(port, 'local_port') return ret def remote_port_tcp(port): ''' Return a set of ip addrs the current host is connected to on given port ''' ret = _remotes_on(port, 'remote_port') return ret def _remotes_on(port, which_end): ''' Return a set of ip addrs active tcp connections ''' port = int(port) ret = _netlink_tool_remote_on(port, which_end) if ret is not None: return ret ret = set() proc_available = False for statf in ['/proc/net/tcp', '/proc/net/tcp6']: if os.path.isfile(statf): proc_available = True with salt.utils.files.fopen(statf, 'r') as fp_: for line in fp_: line = salt.utils.stringutils.to_unicode(line) if line.strip().startswith('sl'): continue iret = _parse_tcp_line(line) sl = next(iter(iret)) if iret[sl][which_end] == port and iret[sl]['state'] == 1: # 1 is ESTABLISHED ret.add(iret[sl]['remote_addr']) if not proc_available: # Fallback to use OS specific tools if salt.utils.platform.is_sunos(): return _sunos_remotes_on(port, which_end) if salt.utils.platform.is_freebsd(): return _freebsd_remotes_on(port, which_end) if salt.utils.platform.is_netbsd(): return _netbsd_remotes_on(port, which_end) if salt.utils.platform.is_openbsd(): return _openbsd_remotes_on(port, which_end) if salt.utils.platform.is_windows(): return _windows_remotes_on(port, which_end) if salt.utils.platform.is_aix(): return _aix_remotes_on(port, which_end) return _linux_remotes_on(port, which_end) return ret def _parse_tcp_line(line): ''' Parse a single line from the contents of /proc/net/tcp or /proc/net/tcp6 ''' ret = {} comps = line.strip().split() sl = comps[0].rstrip(':') ret[sl] = {} l_addr, l_port = comps[1].split(':') r_addr, r_port = comps[2].split(':') ret[sl]['local_addr'] = hex2ip(l_addr, True) ret[sl]['local_port'] = int(l_port, 16) ret[sl]['remote_addr'] = hex2ip(r_addr, True) ret[sl]['remote_port'] = int(r_port, 16) ret[sl]['state'] = int(comps[3], 16) return ret def _netlink_tool_remote_on(port, which_end): ''' Returns set of ipv4 host addresses of remote established connections on local or remote tcp port. Parses output of shell 'ss' to get connections [root@salt-master ~]# ss -ant State Recv-Q Send-Q Local Address:Port Peer Address:Port LISTEN 0 511 *:80 *:* LISTEN 0 128 *:22 *:* ESTAB 0 0 127.0.0.1:56726 127.0.0.1:4505 ''' remotes = set() valid = False try: data = subprocess.check_output(['ss', '-ant']) # pylint: disable=minimum-python-version except subprocess.CalledProcessError: log.error('Failed ss') raise except OSError: # not command "No such file or directory" return None lines = salt.utils.stringutils.to_str(data).split('\n') for line in lines: if 'Address:Port' in line: # ss tools may not be valid valid = True continue elif 'ESTAB' not in line: continue chunks = line.split() local_host, local_port = chunks[3].split(':', 1) remote_host, remote_port = chunks[4].split(':', 1) if which_end == 'remote_port' and int(remote_port) != port: continue if which_end == 'local_port' and int(local_port) != port: continue remotes.add(remote_host) if valid is False: remotes = None return remotes def _sunos_remotes_on(port, which_end): ''' SunOS specific helper function. Returns set of ipv4 host addresses of remote established connections on local or remote tcp port. Parses output of shell 'netstat' to get connections [root@salt-master ~]# netstat -f inet -n TCP: IPv4 Local Address Remote Address Swind Send-Q Rwind Recv-Q State -------------------- -------------------- ----- ------ ----- ------ ----------- 10.0.0.101.4505 10.0.0.1.45329 1064800 0 1055864 0 ESTABLISHED 10.0.0.101.4505 10.0.0.100.50798 1064800 0 1055864 0 ESTABLISHED ''' remotes = set() try: data = subprocess.check_output(['netstat', '-f', 'inet', '-n']) # pylint: disable=minimum-python-version except subprocess.CalledProcessError: log.error('Failed netstat') raise lines = salt.utils.stringutils.to_str(data).split('\n') for line in lines: if 'ESTABLISHED' not in line: continue chunks = line.split() local_host, local_port = chunks[0].rsplit('.', 1) remote_host, remote_port = chunks[1].rsplit('.', 1) if which_end == 'remote_port' and int(remote_port) != port: continue if which_end == 'local_port' and int(local_port) != port: continue remotes.add(remote_host) return remotes def _freebsd_remotes_on(port, which_end): ''' Returns set of ipv4 host addresses of remote established connections on local tcp port port. Parses output of shell 'sockstat' (FreeBSD) to get connections $ sudo sockstat -4 USER COMMAND PID FD PROTO LOCAL ADDRESS FOREIGN ADDRESS root python2.7 1456 29 tcp4 *:4505 *:* root python2.7 1445 17 tcp4 *:4506 *:* root python2.7 1294 14 tcp4 127.0.0.1:11813 127.0.0.1:4505 root python2.7 1294 41 tcp4 127.0.0.1:61115 127.0.0.1:4506 $ sudo sockstat -4 -c -p 4506 USER COMMAND PID FD PROTO LOCAL ADDRESS FOREIGN ADDRESS root python2.7 1294 41 tcp4 127.0.0.1:61115 127.0.0.1:4506 ''' port = int(port) remotes = set() try: cmd = salt.utils.args.shlex_split('sockstat -4 -c -p {0}'.format(port)) data = subprocess.check_output(cmd) # pylint: disable=minimum-python-version except subprocess.CalledProcessError as ex: log.error('Failed "sockstat" with returncode = %s', ex.returncode) raise lines = salt.utils.stringutils.to_str(data).split('\n') for line in lines: chunks = line.split() if not chunks: continue # ['root', 'python2.7', '1456', '37', 'tcp4', # '127.0.0.1:4505-', '127.0.0.1:55703'] # print chunks if 'COMMAND' in chunks[1]: continue # ignore header if len(chunks) < 2: continue # sockstat -4 -c -p 4506 does this with high PIDs: # USER COMMAND PID FD PROTO LOCAL ADDRESS FOREIGN ADDRESS # salt-master python2.781106 35 tcp4 192.168.12.34:4506 192.168.12.45:60143 local = chunks[-2] remote = chunks[-1] lhost, lport = local.split(':') rhost, rport = remote.split(':') if which_end == 'local' and int(lport) != port: # ignore if local port not port continue if which_end == 'remote' and int(rport) != port: # ignore if remote port not port continue remotes.add(rhost) return remotes def _netbsd_remotes_on(port, which_end): ''' Returns set of ipv4 host addresses of remote established connections on local tcp port port. Parses output of shell 'sockstat' (NetBSD) to get connections $ sudo sockstat -4 -n USER COMMAND PID FD PROTO LOCAL ADDRESS FOREIGN ADDRESS root python2.7 1456 29 tcp *.4505 *.* root python2.7 1445 17 tcp *.4506 *.* root python2.7 1294 14 tcp 127.0.0.1.11813 127.0.0.1.4505 root python2.7 1294 41 tcp 127.0.0.1.61115 127.0.0.1.4506 $ sudo sockstat -4 -c -n -p 4506 USER COMMAND PID FD PROTO LOCAL ADDRESS FOREIGN ADDRESS root python2.7 1294 41 tcp 127.0.0.1.61115 127.0.0.1.4506 ''' port = int(port) remotes = set() try: cmd = salt.utils.args.shlex_split('sockstat -4 -c -n -p {0}'.format(port)) data = subprocess.check_output(cmd) # pylint: disable=minimum-python-version except subprocess.CalledProcessError as ex: log.error('Failed "sockstat" with returncode = %s', ex.returncode) raise lines = salt.utils.stringutils.to_str(data).split('\n') for line in lines: chunks = line.split() if not chunks: continue # ['root', 'python2.7', '1456', '37', 'tcp', # '127.0.0.1.4505-', '127.0.0.1.55703'] # print chunks if 'COMMAND' in chunks[1]: continue # ignore header if len(chunks) < 2: continue local = chunks[5].split('.') lport = local.pop() lhost = '.'.join(local) remote = chunks[6].split('.') rport = remote.pop() rhost = '.'.join(remote) if which_end == 'local' and int(lport) != port: # ignore if local port not port continue if which_end == 'remote' and int(rport) != port: # ignore if remote port not port continue remotes.add(rhost) return remotes def _openbsd_remotes_on(port, which_end): ''' OpenBSD specific helper function. Returns set of ipv4 host addresses of remote established connections on local or remote tcp port. Parses output of shell 'netstat' to get connections $ netstat -nf inet Active Internet connections Proto Recv-Q Send-Q Local Address Foreign Address (state) tcp 0 0 10.0.0.101.4505 10.0.0.1.45329 ESTABLISHED tcp 0 0 10.0.0.101.4505 10.0.0.100.50798 ESTABLISHED ''' remotes = set() try: data = subprocess.check_output(['netstat', '-nf', 'inet']) # pylint: disable=minimum-python-version except subprocess.CalledProcessError: log.error('Failed netstat') raise lines = data.split('\n') for line in lines: if 'ESTABLISHED' not in line: continue chunks = line.split() local_host, local_port = chunks[3].rsplit('.', 1) remote_host, remote_port = chunks[4].rsplit('.', 1) if which_end == 'remote_port' and int(remote_port) != port: continue if which_end == 'local_port' and int(local_port) != port: continue remotes.add(remote_host) return remotes def _windows_remotes_on(port, which_end): r''' Windows specific helper function. Returns set of ipv4 host addresses of remote established connections on local or remote tcp port. Parses output of shell 'netstat' to get connections C:\>netstat -n Active Connections Proto Local Address Foreign Address State TCP 10.2.33.17:3007 130.164.12.233:10123 ESTABLISHED TCP 10.2.33.17:3389 130.164.30.5:10378 ESTABLISHED ''' remotes = set() try: data = subprocess.check_output(['netstat', '-n']) # pylint: disable=minimum-python-version except subprocess.CalledProcessError: log.error('Failed netstat') raise lines = salt.utils.stringutils.to_str(data).split('\n') for line in lines: if 'ESTABLISHED' not in line: continue chunks = line.split() local_host, local_port = chunks[1].rsplit(':', 1) remote_host, remote_port = chunks[2].rsplit(':', 1) if which_end == 'remote_port' and int(remote_port) != port: continue if which_end == 'local_port' and int(local_port) != port: continue remotes.add(remote_host) return remotes def _linux_remotes_on(port, which_end): ''' Linux specific helper function. Returns set of ip host addresses of remote established connections on local tcp port port. Parses output of shell 'lsof' to get connections $ sudo lsof -iTCP:4505 -n COMMAND PID USER FD TYPE DEVICE SIZE/OFF NODE NAME Python 9971 root 35u IPv4 0x18a8464a29ca329d 0t0 TCP *:4505 (LISTEN) Python 9971 root 37u IPv4 0x18a8464a29b2b29d 0t0 TCP 127.0.0.1:4505->127.0.0.1:55703 (ESTABLISHED) Python 10152 root 22u IPv4 0x18a8464a29c8cab5 0t0 TCP 127.0.0.1:55703->127.0.0.1:4505 (ESTABLISHED) Python 10153 root 22u IPv4 0x18a8464a29c8cab5 0t0 TCP [fe80::249a]:4505->[fe80::150]:59367 (ESTABLISHED) ''' remotes = set() try: data = subprocess.check_output( ['lsof', '-iTCP:{0:d}'.format(port), '-n', '-P'] # pylint: disable=minimum-python-version ) except subprocess.CalledProcessError as ex: if ex.returncode == 1: # Lsof return 1 if any error was detected, including the failure # to locate Internet addresses, and it is not an error in this case. log.warning('"lsof" returncode = 1, likely no active TCP sessions.') return remotes log.error('Failed "lsof" with returncode = %s', ex.returncode) raise lines = salt.utils.stringutils.to_str(data).split('\n') for line in lines: chunks = line.split() if not chunks: continue # ['Python', '9971', 'root', '37u', 'IPv4', '0x18a8464a29b2b29d', '0t0', # 'TCP', '127.0.0.1:4505->127.0.0.1:55703', '(ESTABLISHED)'] # print chunks if 'COMMAND' in chunks[0]: continue # ignore header if 'ESTABLISHED' not in chunks[-1]: continue # ignore if not ESTABLISHED # '127.0.0.1:4505->127.0.0.1:55703' local, remote = chunks[8].split('->') _, lport = local.rsplit(':', 1) rhost, rport = remote.rsplit(':', 1) if which_end == 'remote_port' and int(rport) != port: continue if which_end == 'local_port' and int(lport) != port: continue remotes.add(rhost.strip("[]")) return remotes def _aix_remotes_on(port, which_end): ''' AIX specific helper function. Returns set of ipv4 host addresses of remote established connections on local or remote tcp port. Parses output of shell 'netstat' to get connections root@la68pp002_pub:/opt/salt/lib/python2.7/site-packages/salt/modules# netstat -f inet -n Active Internet connections Proto Recv-Q Send-Q Local Address Foreign Address (state) tcp4 0 0 172.29.149.95.50093 209.41.78.13.4505 ESTABLISHED tcp4 0 0 127.0.0.1.9514 *.* LISTEN tcp4 0 0 127.0.0.1.9515 *.* LISTEN tcp4 0 0 127.0.0.1.199 127.0.0.1.32779 ESTABLISHED tcp4 0 0 127.0.0.1.32779 127.0.0.1.199 ESTABLISHED tcp4 0 40 172.29.149.95.22 172.29.96.83.41022 ESTABLISHED tcp4 0 0 172.29.149.95.22 172.29.96.83.41032 ESTABLISHED tcp4 0 0 127.0.0.1.32771 127.0.0.1.32775 ESTABLISHED tcp 0 0 127.0.0.1.32775 127.0.0.1.32771 ESTABLISHED tcp4 0 0 127.0.0.1.32771 127.0.0.1.32776 ESTABLISHED tcp 0 0 127.0.0.1.32776 127.0.0.1.32771 ESTABLISHED tcp4 0 0 127.0.0.1.32771 127.0.0.1.32777 ESTABLISHED tcp 0 0 127.0.0.1.32777 127.0.0.1.32771 ESTABLISHED tcp4 0 0 127.0.0.1.32771 127.0.0.1.32778 ESTABLISHED tcp 0 0 127.0.0.1.32778 127.0.0.1.32771 ESTABLISHED ''' remotes = set() try: data = subprocess.check_output(['netstat', '-f', 'inet', '-n']) # pylint: disable=minimum-python-version except subprocess.CalledProcessError: log.error('Failed netstat') raise lines = salt.utils.stringutils.to_str(data).split('\n') for line in lines: if 'ESTABLISHED' not in line: continue chunks = line.split() local_host, local_port = chunks[3].rsplit('.', 1) remote_host, remote_port = chunks[4].rsplit('.', 1) if which_end == 'remote_port' and int(remote_port) != port: continue if which_end == 'local_port' and int(local_port) != port: continue remotes.add(remote_host) return remotes @jinja_filter('gen_mac') def gen_mac(prefix='AC:DE:48'): ''' Generates a MAC address with the defined OUI prefix. Common prefixes: - ``00:16:3E`` -- Xen - ``00:18:51`` -- OpenVZ - ``00:50:56`` -- VMware (manually generated) - ``52:54:00`` -- QEMU/KVM - ``AC:DE:48`` -- PRIVATE References: - http://standards.ieee.org/develop/regauth/oui/oui.txt - https://www.wireshark.org/tools/oui-lookup.html - https://en.wikipedia.org/wiki/MAC_address ''' return '{0}:{1:02X}:{2:02X}:{3:02X}'.format(prefix, random.randint(0, 0xff), random.randint(0, 0xff), random.randint(0, 0xff)) @jinja_filter('mac_str_to_bytes') def mac_str_to_bytes(mac_str): ''' Convert a MAC address string into bytes. Works with or without separators: b1 = mac_str_to_bytes('08:00:27:13:69:77') b2 = mac_str_to_bytes('080027136977') assert b1 == b2 assert isinstance(b1, bytes) ''' if len(mac_str) == 12: pass elif len(mac_str) == 17: sep = mac_str[2] mac_str = mac_str.replace(sep, '') else: raise ValueError('Invalid MAC address') chars = (int(mac_str[s:s+2], 16) for s in range(0, 12, 2)) return bytes(chars) if six.PY3 else b''.join(chr(x) for x in chars) def refresh_dns(): ''' issue #21397: force glibc to re-read resolv.conf ''' try: res_init() except NameError: # Exception raised loading the library, thus res_init is not defined pass @jinja_filter('connection_check') def connection_check(addr, port=80, safe=False, ipv6=None): ''' Provides a convenient alias for the dns_check filter. ''' return dns_check(addr, port, safe, ipv6) @jinja_filter('dns_check') def dns_check(addr, port=80, safe=False, ipv6=None, attempt_connect=True): ''' Return the ip resolved by dns, but do not exit on failure, only raise an exception. Obeys system preference for IPv4/6 address resolution - this can be overridden by the ipv6 flag. Tries to connect to the address before considering it useful. If no address can be reached, the first one resolved is used as a fallback. ''' error = False lookup = addr seen_ipv6 = False family = socket.AF_INET6 if ipv6 else socket.AF_INET if ipv6 is False else socket.AF_UNSPEC hostnames = [] try: refresh_dns() hostnames = socket.getaddrinfo(addr, port, family, socket.SOCK_STREAM) except TypeError: err = ('Attempt to resolve address \'{0}\' failed. Invalid or unresolveable address').format(lookup) raise SaltSystemExit(code=42, msg=err) except socket.error: error = True # If ipv6 is set to True, attempt another lookup using the IPv4 family, # just in case we're attempting to lookup an IPv4 IP # as an IPv6 hostname. if error and ipv6: try: refresh_dns() hostnames = socket.getaddrinfo(addr, port, socket.AF_INET, socket.SOCK_STREAM) except TypeError: err = ('Attempt to resolve address \'{0}\' failed. Invalid or unresolveable address').format(lookup) raise SaltSystemExit(code=42, msg=err) except socket.error: error = True try: if not hostnames: error = True else: resolved = False candidates = [] for h in hostnames: # Input is IP address, passed through unchanged, just return it if h[4][0] == addr: resolved = salt.utils.zeromq.ip_bracket(addr) break candidate_addr = salt.utils.zeromq.ip_bracket(h[4][0]) # sometimes /etc/hosts contains ::1 localhost if not ipv6 and candidate_addr == '[::1]': continue candidates.append(candidate_addr) if attempt_connect: try: s = socket.socket(h[0], socket.SOCK_STREAM) s.settimeout(2) s.connect((candidate_addr.strip('[]'), h[4][1])) s.close() resolved = candidate_addr break except socket.error: pass if not resolved: if candidates: resolved = candidates[0] else: error = True except TypeError: err = ('Attempt to resolve address \'{0}\' failed. Invalid or unresolveable address').format(lookup) raise SaltSystemExit(code=42, msg=err) except socket.error: error = True if error: err = ('DNS lookup or connection check of \'{0}\' failed.').format(addr) if safe: if salt.log.is_console_configured(): # If logging is not configured it also means that either # the master or minion instance calling this hasn't even # started running log.error(err) raise SaltClientError() raise SaltSystemExit(code=42, msg=err) return resolved def parse_host_port(host_port): """ Takes a string argument specifying host or host:port. Returns a (hostname, port) or (ip_address, port) tuple. If no port is given, the second (port) element of the returned tuple will be None. host:port argument, for example, is accepted in the forms of: - hostname - hostname:1234 - hostname.domain.tld - hostname.domain.tld:5678 - [1234::5]:5678 - 1234::5 - 10.11.12.13:4567 - 10.11.12.13 """ host, port = None, None # default _s_ = host_port[:] if _s_[0] == "[": if "]" in host_port: host, _s_ = _s_.lstrip("[").rsplit("]", 1) host = ipaddress.IPv6Address(host).compressed if _s_[0] == ":": port = int(_s_.lstrip(":")) else: if len(_s_) > 1: raise ValueError('found ambiguous "{}" port in "{}"'.format(_s_, host_port)) else: if _s_.count(":") == 1: host, _hostport_separator_, port = _s_.partition(":") try: port = int(port) except ValueError as _e_: log.error('host_port "%s" port value "%s" is not an integer.', host_port, port) raise _e_ else: host = _s_ try: if not isinstance(host, ipaddress._BaseAddress): host_ip = ipaddress.ip_address(host).compressed host = host_ip except ValueError: log.debug('"%s" Not an IP address? Assuming it is a hostname.', host) if host != sanitize_host(host): log.error('bad hostname: "%s"', host) raise ValueError('bad hostname: "{}"'.format(host)) return host, port def is_fqdn(hostname): """ Verify if hostname conforms to be a FQDN. :param hostname: text string with the name of the host :return: bool, True if hostname is correct FQDN, False otherwise """ compliant = re.compile(r"(?!-)[A-Z\d\-\_]{1,63}(?<!-)$", re.IGNORECASE) return "." in hostname and len(hostname) < 0xff and all(compliant.match(x) for x in hostname.rstrip(".").split("."))