repo stringlengths 7 55 | path stringlengths 4 223 | func_name stringlengths 1 134 | original_string stringlengths 75 104k | language stringclasses 1 value | code stringlengths 75 104k | code_tokens listlengths 19 28.4k | docstring stringlengths 1 46.9k | docstring_tokens listlengths 1 1.97k | sha stringlengths 40 40 | url stringlengths 87 315 | partition stringclasses 1 value |
|---|---|---|---|---|---|---|---|---|---|---|---|
saltstack/salt | salt/modules/publish.py | runner | def runner(fun, arg=None, timeout=5):
'''
Execute a runner on the master and return the data from the runner
function
CLI Example:
.. code-block:: bash
salt publish.runner manage.down
'''
arg = _parse_args(arg)
if 'master_uri' not in __opts__:
return 'No access to master. If using salt-call with --local, please remove.'
log.info('Publishing runner \'%s\' to %s', fun, __opts__['master_uri'])
auth = salt.crypt.SAuth(__opts__)
tok = auth.gen_token(b'salt')
load = {'cmd': 'minion_runner',
'fun': fun,
'arg': arg,
'tok': tok,
'tmo': timeout,
'id': __opts__['id'],
'no_parse': __opts__.get('no_parse', [])}
channel = salt.transport.client.ReqChannel.factory(__opts__)
try:
return channel.send(load)
except SaltReqTimeoutError:
return '\'{0}\' runner publish timed out'.format(fun)
finally:
channel.close() | python | def runner(fun, arg=None, timeout=5):
'''
Execute a runner on the master and return the data from the runner
function
CLI Example:
.. code-block:: bash
salt publish.runner manage.down
'''
arg = _parse_args(arg)
if 'master_uri' not in __opts__:
return 'No access to master. If using salt-call with --local, please remove.'
log.info('Publishing runner \'%s\' to %s', fun, __opts__['master_uri'])
auth = salt.crypt.SAuth(__opts__)
tok = auth.gen_token(b'salt')
load = {'cmd': 'minion_runner',
'fun': fun,
'arg': arg,
'tok': tok,
'tmo': timeout,
'id': __opts__['id'],
'no_parse': __opts__.get('no_parse', [])}
channel = salt.transport.client.ReqChannel.factory(__opts__)
try:
return channel.send(load)
except SaltReqTimeoutError:
return '\'{0}\' runner publish timed out'.format(fun)
finally:
channel.close() | [
"def",
"runner",
"(",
"fun",
",",
"arg",
"=",
"None",
",",
"timeout",
"=",
"5",
")",
":",
"arg",
"=",
"_parse_args",
"(",
"arg",
")",
"if",
"'master_uri'",
"not",
"in",
"__opts__",
":",
"return",
"'No access to master. If using salt-call with --local, please rem... | Execute a runner on the master and return the data from the runner
function
CLI Example:
.. code-block:: bash
salt publish.runner manage.down | [
"Execute",
"a",
"runner",
"on",
"the",
"master",
"and",
"return",
"the",
"data",
"from",
"the",
"runner",
"function"
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/publish.py#L303-L335 | train |
saltstack/salt | salt/modules/kerberos.py | __execute_kadmin | def __execute_kadmin(cmd):
'''
Execute kadmin commands
'''
ret = {}
auth_keytab = __opts__.get('auth_keytab', None)
auth_principal = __opts__.get('auth_principal', None)
if __salt__['file.file_exists'](auth_keytab) and auth_principal:
return __salt__['cmd.run_all'](
'kadmin -k -t {0} -p {1} -q "{2}"'.format(
auth_keytab, auth_principal, cmd
)
)
else:
log.error('Unable to find kerberos keytab/principal')
ret['retcode'] = 1
ret['comment'] = 'Missing authentication keytab/principal'
return ret | python | def __execute_kadmin(cmd):
'''
Execute kadmin commands
'''
ret = {}
auth_keytab = __opts__.get('auth_keytab', None)
auth_principal = __opts__.get('auth_principal', None)
if __salt__['file.file_exists'](auth_keytab) and auth_principal:
return __salt__['cmd.run_all'](
'kadmin -k -t {0} -p {1} -q "{2}"'.format(
auth_keytab, auth_principal, cmd
)
)
else:
log.error('Unable to find kerberos keytab/principal')
ret['retcode'] = 1
ret['comment'] = 'Missing authentication keytab/principal'
return ret | [
"def",
"__execute_kadmin",
"(",
"cmd",
")",
":",
"ret",
"=",
"{",
"}",
"auth_keytab",
"=",
"__opts__",
".",
"get",
"(",
"'auth_keytab'",
",",
"None",
")",
"auth_principal",
"=",
"__opts__",
".",
"get",
"(",
"'auth_principal'",
",",
"None",
")",
"if",
"__... | Execute kadmin commands | [
"Execute",
"kadmin",
"commands"
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/kerberos.py#L40-L60 | train |
saltstack/salt | salt/modules/kerberos.py | list_principals | def list_principals():
'''
Get all principals
CLI Example:
.. code-block:: bash
salt 'kde.example.com' kerberos.list_principals
'''
ret = {}
cmd = __execute_kadmin('list_principals')
if cmd['retcode'] != 0 or cmd['stderr']:
ret['comment'] = cmd['stderr'].splitlines()[-1]
ret['result'] = False
return ret
ret = {'principals': []}
for i in cmd['stdout'].splitlines()[1:]:
ret['principals'].append(i)
return ret | python | def list_principals():
'''
Get all principals
CLI Example:
.. code-block:: bash
salt 'kde.example.com' kerberos.list_principals
'''
ret = {}
cmd = __execute_kadmin('list_principals')
if cmd['retcode'] != 0 or cmd['stderr']:
ret['comment'] = cmd['stderr'].splitlines()[-1]
ret['result'] = False
return ret
ret = {'principals': []}
for i in cmd['stdout'].splitlines()[1:]:
ret['principals'].append(i)
return ret | [
"def",
"list_principals",
"(",
")",
":",
"ret",
"=",
"{",
"}",
"cmd",
"=",
"__execute_kadmin",
"(",
"'list_principals'",
")",
"if",
"cmd",
"[",
"'retcode'",
"]",
"!=",
"0",
"or",
"cmd",
"[",
"'stderr'",
"]",
":",
"ret",
"[",
"'comment'",
"]",
"=",
"c... | Get all principals
CLI Example:
.. code-block:: bash
salt 'kde.example.com' kerberos.list_principals | [
"Get",
"all",
"principals"
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/kerberos.py#L63-L88 | train |
saltstack/salt | salt/modules/kerberos.py | get_principal | def get_principal(name):
'''
Get princial details
CLI Example:
.. code-block:: bash
salt 'kdc.example.com' kerberos.get_principal root/admin
'''
ret = {}
cmd = __execute_kadmin('get_principal {0}'.format(name))
if cmd['retcode'] != 0 or cmd['stderr']:
ret['comment'] = cmd['stderr'].splitlines()[-1]
ret['result'] = False
return ret
for i in cmd['stdout'].splitlines()[1:]:
(prop, val) = i.split(':', 1)
ret[prop] = val
return ret | python | def get_principal(name):
'''
Get princial details
CLI Example:
.. code-block:: bash
salt 'kdc.example.com' kerberos.get_principal root/admin
'''
ret = {}
cmd = __execute_kadmin('get_principal {0}'.format(name))
if cmd['retcode'] != 0 or cmd['stderr']:
ret['comment'] = cmd['stderr'].splitlines()[-1]
ret['result'] = False
return ret
for i in cmd['stdout'].splitlines()[1:]:
(prop, val) = i.split(':', 1)
ret[prop] = val
return ret | [
"def",
"get_principal",
"(",
"name",
")",
":",
"ret",
"=",
"{",
"}",
"cmd",
"=",
"__execute_kadmin",
"(",
"'get_principal {0}'",
".",
"format",
"(",
"name",
")",
")",
"if",
"cmd",
"[",
"'retcode'",
"]",
"!=",
"0",
"or",
"cmd",
"[",
"'stderr'",
"]",
"... | Get princial details
CLI Example:
.. code-block:: bash
salt 'kdc.example.com' kerberos.get_principal root/admin | [
"Get",
"princial",
"details"
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/kerberos.py#L91-L116 | train |
saltstack/salt | salt/modules/kerberos.py | list_policies | def list_policies():
'''
List policies
CLI Example:
.. code-block:: bash
salt 'kdc.example.com' kerberos.list_policies
'''
ret = {}
cmd = __execute_kadmin('list_policies')
if cmd['retcode'] != 0 or cmd['stderr']:
ret['comment'] = cmd['stderr'].splitlines()[-1]
ret['result'] = False
return ret
ret = {'policies': []}
for i in cmd['stdout'].splitlines()[1:]:
ret['policies'].append(i)
return ret | python | def list_policies():
'''
List policies
CLI Example:
.. code-block:: bash
salt 'kdc.example.com' kerberos.list_policies
'''
ret = {}
cmd = __execute_kadmin('list_policies')
if cmd['retcode'] != 0 or cmd['stderr']:
ret['comment'] = cmd['stderr'].splitlines()[-1]
ret['result'] = False
return ret
ret = {'policies': []}
for i in cmd['stdout'].splitlines()[1:]:
ret['policies'].append(i)
return ret | [
"def",
"list_policies",
"(",
")",
":",
"ret",
"=",
"{",
"}",
"cmd",
"=",
"__execute_kadmin",
"(",
"'list_policies'",
")",
"if",
"cmd",
"[",
"'retcode'",
"]",
"!=",
"0",
"or",
"cmd",
"[",
"'stderr'",
"]",
":",
"ret",
"[",
"'comment'",
"]",
"=",
"cmd",... | List policies
CLI Example:
.. code-block:: bash
salt 'kdc.example.com' kerberos.list_policies | [
"List",
"policies"
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/kerberos.py#L119-L144 | train |
saltstack/salt | salt/modules/kerberos.py | get_privs | def get_privs():
'''
Current privileges
CLI Example:
.. code-block:: bash
salt 'kdc.example.com' kerberos.get_privs
'''
ret = {}
cmd = __execute_kadmin('get_privs')
if cmd['retcode'] != 0 or cmd['stderr']:
ret['comment'] = cmd['stderr'].splitlines()[-1]
ret['result'] = False
return ret
for i in cmd['stdout'].splitlines()[1:]:
(prop, val) = i.split(':', 1)
ret[prop] = [j for j in val.split()]
return ret | python | def get_privs():
'''
Current privileges
CLI Example:
.. code-block:: bash
salt 'kdc.example.com' kerberos.get_privs
'''
ret = {}
cmd = __execute_kadmin('get_privs')
if cmd['retcode'] != 0 or cmd['stderr']:
ret['comment'] = cmd['stderr'].splitlines()[-1]
ret['result'] = False
return ret
for i in cmd['stdout'].splitlines()[1:]:
(prop, val) = i.split(':', 1)
ret[prop] = [j for j in val.split()]
return ret | [
"def",
"get_privs",
"(",
")",
":",
"ret",
"=",
"{",
"}",
"cmd",
"=",
"__execute_kadmin",
"(",
"'get_privs'",
")",
"if",
"cmd",
"[",
"'retcode'",
"]",
"!=",
"0",
"or",
"cmd",
"[",
"'stderr'",
"]",
":",
"ret",
"[",
"'comment'",
"]",
"=",
"cmd",
"[",
... | Current privileges
CLI Example:
.. code-block:: bash
salt 'kdc.example.com' kerberos.get_privs | [
"Current",
"privileges"
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/kerberos.py#L175-L200 | train |
saltstack/salt | salt/modules/kerberos.py | create_principal | def create_principal(name, enctypes=None):
'''
Create Principal
CLI Example:
.. code-block:: bash
salt 'kdc.example.com' kerberos.create_principal host/example.com
'''
ret = {}
krb_cmd = 'addprinc -randkey'
if enctypes:
krb_cmd += ' -e {0}'.format(enctypes)
krb_cmd += ' {0}'.format(name)
cmd = __execute_kadmin(krb_cmd)
if cmd['retcode'] != 0 or cmd['stderr']:
if not cmd['stderr'].splitlines()[-1].startswith('WARNING:'):
ret['comment'] = cmd['stderr'].splitlines()[-1]
ret['result'] = False
return ret
return True | python | def create_principal(name, enctypes=None):
'''
Create Principal
CLI Example:
.. code-block:: bash
salt 'kdc.example.com' kerberos.create_principal host/example.com
'''
ret = {}
krb_cmd = 'addprinc -randkey'
if enctypes:
krb_cmd += ' -e {0}'.format(enctypes)
krb_cmd += ' {0}'.format(name)
cmd = __execute_kadmin(krb_cmd)
if cmd['retcode'] != 0 or cmd['stderr']:
if not cmd['stderr'].splitlines()[-1].startswith('WARNING:'):
ret['comment'] = cmd['stderr'].splitlines()[-1]
ret['result'] = False
return ret
return True | [
"def",
"create_principal",
"(",
"name",
",",
"enctypes",
"=",
"None",
")",
":",
"ret",
"=",
"{",
"}",
"krb_cmd",
"=",
"'addprinc -randkey'",
"if",
"enctypes",
":",
"krb_cmd",
"+=",
"' -e {0}'",
".",
"format",
"(",
"enctypes",
")",
"krb_cmd",
"+=",
"' {0}'"... | Create Principal
CLI Example:
.. code-block:: bash
salt 'kdc.example.com' kerberos.create_principal host/example.com | [
"Create",
"Principal"
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/kerberos.py#L203-L231 | train |
saltstack/salt | salt/modules/kerberos.py | delete_principal | def delete_principal(name):
'''
Delete Principal
CLI Example:
.. code-block:: bash
salt 'kdc.example.com' kerberos.delete_principal host/example.com@EXAMPLE.COM
'''
ret = {}
cmd = __execute_kadmin('delprinc -force {0}'.format(name))
if cmd['retcode'] != 0 or cmd['stderr']:
ret['comment'] = cmd['stderr'].splitlines()[-1]
ret['result'] = False
return ret
return True | python | def delete_principal(name):
'''
Delete Principal
CLI Example:
.. code-block:: bash
salt 'kdc.example.com' kerberos.delete_principal host/example.com@EXAMPLE.COM
'''
ret = {}
cmd = __execute_kadmin('delprinc -force {0}'.format(name))
if cmd['retcode'] != 0 or cmd['stderr']:
ret['comment'] = cmd['stderr'].splitlines()[-1]
ret['result'] = False
return ret
return True | [
"def",
"delete_principal",
"(",
"name",
")",
":",
"ret",
"=",
"{",
"}",
"cmd",
"=",
"__execute_kadmin",
"(",
"'delprinc -force {0}'",
".",
"format",
"(",
"name",
")",
")",
"if",
"cmd",
"[",
"'retcode'",
"]",
"!=",
"0",
"or",
"cmd",
"[",
"'stderr'",
"]"... | Delete Principal
CLI Example:
.. code-block:: bash
salt 'kdc.example.com' kerberos.delete_principal host/example.com@EXAMPLE.COM | [
"Delete",
"Principal"
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/kerberos.py#L234-L254 | train |
saltstack/salt | salt/modules/kerberos.py | create_keytab | def create_keytab(name, keytab, enctypes=None):
'''
Create keytab
CLI Example:
.. code-block:: bash
salt 'kdc.example.com' kerberos.create_keytab host/host1.example.com host1.example.com.keytab
'''
ret = {}
krb_cmd = 'ktadd -k {0}'.format(keytab)
if enctypes:
krb_cmd += ' -e {0}'.format(enctypes)
krb_cmd += ' {0}'.format(name)
cmd = __execute_kadmin(krb_cmd)
if cmd['retcode'] != 0 or cmd['stderr']:
ret['comment'] = cmd['stderr'].splitlines()[-1]
ret['result'] = False
return ret
return True | python | def create_keytab(name, keytab, enctypes=None):
'''
Create keytab
CLI Example:
.. code-block:: bash
salt 'kdc.example.com' kerberos.create_keytab host/host1.example.com host1.example.com.keytab
'''
ret = {}
krb_cmd = 'ktadd -k {0}'.format(keytab)
if enctypes:
krb_cmd += ' -e {0}'.format(enctypes)
krb_cmd += ' {0}'.format(name)
cmd = __execute_kadmin(krb_cmd)
if cmd['retcode'] != 0 or cmd['stderr']:
ret['comment'] = cmd['stderr'].splitlines()[-1]
ret['result'] = False
return ret
return True | [
"def",
"create_keytab",
"(",
"name",
",",
"keytab",
",",
"enctypes",
"=",
"None",
")",
":",
"ret",
"=",
"{",
"}",
"krb_cmd",
"=",
"'ktadd -k {0}'",
".",
"format",
"(",
"keytab",
")",
"if",
"enctypes",
":",
"krb_cmd",
"+=",
"' -e {0}'",
".",
"format",
"... | Create keytab
CLI Example:
.. code-block:: bash
salt 'kdc.example.com' kerberos.create_keytab host/host1.example.com host1.example.com.keytab | [
"Create",
"keytab"
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/kerberos.py#L257-L284 | train |
saltstack/salt | salt/states/host.py | present | def present(name, ip, clean=False): # pylint: disable=C0103
'''
Ensures that the named host is present with the given ip
name
The host to assign an ip to
ip
The ip addr(s) to apply to the host. Can be a single IP or a list of IP
addresses.
clean : False
Remove any entries which don't match those configured in the ``ip``
option.
.. versionadded:: 2018.3.4
'''
ret = {'name': name,
'changes': {},
'result': None if __opts__['test'] else True,
'comment': ''}
if not isinstance(ip, list):
ip = [ip]
all_hosts = __salt__['hosts.list_hosts']()
comments = []
to_add = set()
to_remove = set()
# First check for IPs not currently in the hosts file
to_add.update([(addr, name) for addr in ip if addr not in all_hosts])
# Now sweep through the hosts file and look for entries matching either the
# IP address(es) or hostname.
for addr, aliases in six.iteritems(all_hosts):
if addr not in ip:
if name in aliases:
# Found match for hostname, but the corresponding IP is not in
# our list, so we need to remove it.
if clean:
to_remove.add((addr, name))
else:
ret.setdefault('warnings', []).append(
'Host {0} present for IP address {1}. To get rid of '
'this warning, either run this state with \'clean\' '
'set to True to remove {0} from {1}, or add {1} to '
'the \'ip\' argument.'.format(name, addr)
)
else:
if name in aliases:
# No changes needed for this IP address and hostname
comments.append(
'Host {0} ({1}) already present'.format(name, addr)
)
else:
# IP address listed in hosts file, but hostname is not present.
# We will need to add it.
if salt.utils.validate.net.ip_addr(addr):
to_add.add((addr, name))
else:
ret['result'] = False
comments.append(
'Invalid IP Address for {0} ({1})'.format(name, addr)
)
for addr, name in to_add:
if __opts__['test']:
comments.append(
'Host {0} ({1}) would be added'.format(name, addr)
)
else:
if __salt__['hosts.add_host'](addr, name):
comments.append('Added host {0} ({1})'.format(name, addr))
else:
ret['result'] = False
comments.append('Failed to add host {0} ({1})'.format(name, addr))
continue
ret['changes'].setdefault('added', {}).setdefault(addr, []).append(name)
for addr, name in to_remove:
if __opts__['test']:
comments.append(
'Host {0} ({1}) would be removed'.format(name, addr)
)
else:
if __salt__['hosts.rm_host'](addr, name):
comments.append('Removed host {0} ({1})'.format(name, addr))
else:
ret['result'] = False
comments.append('Failed to remove host {0} ({1})'.format(name, addr))
continue
ret['changes'].setdefault('removed', {}).setdefault(addr, []).append(name)
ret['comment'] = '\n'.join(comments)
return ret | python | def present(name, ip, clean=False): # pylint: disable=C0103
'''
Ensures that the named host is present with the given ip
name
The host to assign an ip to
ip
The ip addr(s) to apply to the host. Can be a single IP or a list of IP
addresses.
clean : False
Remove any entries which don't match those configured in the ``ip``
option.
.. versionadded:: 2018.3.4
'''
ret = {'name': name,
'changes': {},
'result': None if __opts__['test'] else True,
'comment': ''}
if not isinstance(ip, list):
ip = [ip]
all_hosts = __salt__['hosts.list_hosts']()
comments = []
to_add = set()
to_remove = set()
# First check for IPs not currently in the hosts file
to_add.update([(addr, name) for addr in ip if addr not in all_hosts])
# Now sweep through the hosts file and look for entries matching either the
# IP address(es) or hostname.
for addr, aliases in six.iteritems(all_hosts):
if addr not in ip:
if name in aliases:
# Found match for hostname, but the corresponding IP is not in
# our list, so we need to remove it.
if clean:
to_remove.add((addr, name))
else:
ret.setdefault('warnings', []).append(
'Host {0} present for IP address {1}. To get rid of '
'this warning, either run this state with \'clean\' '
'set to True to remove {0} from {1}, or add {1} to '
'the \'ip\' argument.'.format(name, addr)
)
else:
if name in aliases:
# No changes needed for this IP address and hostname
comments.append(
'Host {0} ({1}) already present'.format(name, addr)
)
else:
# IP address listed in hosts file, but hostname is not present.
# We will need to add it.
if salt.utils.validate.net.ip_addr(addr):
to_add.add((addr, name))
else:
ret['result'] = False
comments.append(
'Invalid IP Address for {0} ({1})'.format(name, addr)
)
for addr, name in to_add:
if __opts__['test']:
comments.append(
'Host {0} ({1}) would be added'.format(name, addr)
)
else:
if __salt__['hosts.add_host'](addr, name):
comments.append('Added host {0} ({1})'.format(name, addr))
else:
ret['result'] = False
comments.append('Failed to add host {0} ({1})'.format(name, addr))
continue
ret['changes'].setdefault('added', {}).setdefault(addr, []).append(name)
for addr, name in to_remove:
if __opts__['test']:
comments.append(
'Host {0} ({1}) would be removed'.format(name, addr)
)
else:
if __salt__['hosts.rm_host'](addr, name):
comments.append('Removed host {0} ({1})'.format(name, addr))
else:
ret['result'] = False
comments.append('Failed to remove host {0} ({1})'.format(name, addr))
continue
ret['changes'].setdefault('removed', {}).setdefault(addr, []).append(name)
ret['comment'] = '\n'.join(comments)
return ret | [
"def",
"present",
"(",
"name",
",",
"ip",
",",
"clean",
"=",
"False",
")",
":",
"# pylint: disable=C0103",
"ret",
"=",
"{",
"'name'",
":",
"name",
",",
"'changes'",
":",
"{",
"}",
",",
"'result'",
":",
"None",
"if",
"__opts__",
"[",
"'test'",
"]",
"e... | Ensures that the named host is present with the given ip
name
The host to assign an ip to
ip
The ip addr(s) to apply to the host. Can be a single IP or a list of IP
addresses.
clean : False
Remove any entries which don't match those configured in the ``ip``
option.
.. versionadded:: 2018.3.4 | [
"Ensures",
"that",
"the",
"named",
"host",
"is",
"present",
"with",
"the",
"given",
"ip"
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/host.py#L70-L165 | train |
saltstack/salt | salt/states/host.py | absent | def absent(name, ip): # pylint: disable=C0103
'''
Ensure that the named host is absent
name
The host to remove
ip
The ip addr(s) of the host to remove
'''
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
if not isinstance(ip, list):
ip = [ip]
comments = []
for _ip in ip:
if not __salt__['hosts.has_pair'](_ip, name):
ret['result'] = True
comments.append('Host {0} ({1}) already absent'.format(name, _ip))
else:
if __opts__['test']:
comments.append('Host {0} ({1}) needs to be removed'.format(name, _ip))
else:
if __salt__['hosts.rm_host'](_ip, name):
ret['changes'] = {'host': name}
ret['result'] = True
comments.append('Removed host {0} ({1})'.format(name, _ip))
else:
ret['result'] = False
comments.append('Failed to remove host')
ret['comment'] = '\n'.join(comments)
return ret | python | def absent(name, ip): # pylint: disable=C0103
'''
Ensure that the named host is absent
name
The host to remove
ip
The ip addr(s) of the host to remove
'''
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
if not isinstance(ip, list):
ip = [ip]
comments = []
for _ip in ip:
if not __salt__['hosts.has_pair'](_ip, name):
ret['result'] = True
comments.append('Host {0} ({1}) already absent'.format(name, _ip))
else:
if __opts__['test']:
comments.append('Host {0} ({1}) needs to be removed'.format(name, _ip))
else:
if __salt__['hosts.rm_host'](_ip, name):
ret['changes'] = {'host': name}
ret['result'] = True
comments.append('Removed host {0} ({1})'.format(name, _ip))
else:
ret['result'] = False
comments.append('Failed to remove host')
ret['comment'] = '\n'.join(comments)
return ret | [
"def",
"absent",
"(",
"name",
",",
"ip",
")",
":",
"# pylint: disable=C0103",
"ret",
"=",
"{",
"'name'",
":",
"name",
",",
"'changes'",
":",
"{",
"}",
",",
"'result'",
":",
"None",
",",
"'comment'",
":",
"''",
"}",
"if",
"not",
"isinstance",
"(",
"ip... | Ensure that the named host is absent
name
The host to remove
ip
The ip addr(s) of the host to remove | [
"Ensure",
"that",
"the",
"named",
"host",
"is",
"absent"
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/host.py#L168-L203 | train |
saltstack/salt | salt/states/host.py | only | def only(name, hostnames):
'''
Ensure that only the given hostnames are associated with the
given IP address.
.. versionadded:: 2016.3.0
name
The IP address to associate with the given hostnames.
hostnames
Either a single hostname or a list of hostnames to associate
with the given IP address in the given order. Any other
hostname associated with the IP address is removed. If no
hostnames are specified, all hostnames associated with the
given IP address are removed.
'''
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
if isinstance(hostnames, six.string_types):
hostnames = [hostnames]
old = ' '.join(__salt__['hosts.get_alias'](name))
new = ' '.join((x.strip() for x in hostnames))
if old == new:
ret['comment'] = 'IP address {0} already set to "{1}"'.format(
name, new)
ret['result'] = True
return ret
if __opts__['test']:
ret['comment'] = 'Would change {0} from "{1}" to "{2}"'.format(
name, old, new)
return ret
ret['result'] = __salt__['hosts.set_host'](name, new)
if not ret['result']:
ret['comment'] = ('hosts.set_host failed to change {0}'
+ ' from "{1}" to "{2}"').format(name, old, new)
return ret
ret['comment'] = 'successfully changed {0} from "{1}" to "{2}"'.format(
name, old, new)
ret['changes'] = {name: {'old': old, 'new': new}}
return ret | python | def only(name, hostnames):
'''
Ensure that only the given hostnames are associated with the
given IP address.
.. versionadded:: 2016.3.0
name
The IP address to associate with the given hostnames.
hostnames
Either a single hostname or a list of hostnames to associate
with the given IP address in the given order. Any other
hostname associated with the IP address is removed. If no
hostnames are specified, all hostnames associated with the
given IP address are removed.
'''
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
if isinstance(hostnames, six.string_types):
hostnames = [hostnames]
old = ' '.join(__salt__['hosts.get_alias'](name))
new = ' '.join((x.strip() for x in hostnames))
if old == new:
ret['comment'] = 'IP address {0} already set to "{1}"'.format(
name, new)
ret['result'] = True
return ret
if __opts__['test']:
ret['comment'] = 'Would change {0} from "{1}" to "{2}"'.format(
name, old, new)
return ret
ret['result'] = __salt__['hosts.set_host'](name, new)
if not ret['result']:
ret['comment'] = ('hosts.set_host failed to change {0}'
+ ' from "{1}" to "{2}"').format(name, old, new)
return ret
ret['comment'] = 'successfully changed {0} from "{1}" to "{2}"'.format(
name, old, new)
ret['changes'] = {name: {'old': old, 'new': new}}
return ret | [
"def",
"only",
"(",
"name",
",",
"hostnames",
")",
":",
"ret",
"=",
"{",
"'name'",
":",
"name",
",",
"'changes'",
":",
"{",
"}",
",",
"'result'",
":",
"None",
",",
"'comment'",
":",
"''",
"}",
"if",
"isinstance",
"(",
"hostnames",
",",
"six",
".",
... | Ensure that only the given hostnames are associated with the
given IP address.
.. versionadded:: 2016.3.0
name
The IP address to associate with the given hostnames.
hostnames
Either a single hostname or a list of hostnames to associate
with the given IP address in the given order. Any other
hostname associated with the IP address is removed. If no
hostnames are specified, all hostnames associated with the
given IP address are removed. | [
"Ensure",
"that",
"only",
"the",
"given",
"hostnames",
"are",
"associated",
"with",
"the",
"given",
"IP",
"address",
"."
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/host.py#L206-L254 | train |
saltstack/salt | salt/fileserver/svnfs.py | _rev | def _rev(repo):
'''
Returns revision ID of repo
'''
try:
repo_info = dict(six.iteritems(CLIENT.info(repo['repo'])))
except (pysvn._pysvn.ClientError, TypeError,
KeyError, AttributeError) as exc:
log.error(
'Error retrieving revision ID for svnfs remote %s '
'(cachedir: %s): %s',
repo['url'], repo['repo'], exc
)
else:
return repo_info['revision'].number
return None | python | def _rev(repo):
'''
Returns revision ID of repo
'''
try:
repo_info = dict(six.iteritems(CLIENT.info(repo['repo'])))
except (pysvn._pysvn.ClientError, TypeError,
KeyError, AttributeError) as exc:
log.error(
'Error retrieving revision ID for svnfs remote %s '
'(cachedir: %s): %s',
repo['url'], repo['repo'], exc
)
else:
return repo_info['revision'].number
return None | [
"def",
"_rev",
"(",
"repo",
")",
":",
"try",
":",
"repo_info",
"=",
"dict",
"(",
"six",
".",
"iteritems",
"(",
"CLIENT",
".",
"info",
"(",
"repo",
"[",
"'repo'",
"]",
")",
")",
")",
"except",
"(",
"pysvn",
".",
"_pysvn",
".",
"ClientError",
",",
... | Returns revision ID of repo | [
"Returns",
"revision",
"ID",
"of",
"repo"
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/fileserver/svnfs.py#L103-L118 | train |
saltstack/salt | salt/fileserver/svnfs.py | init | def init():
'''
Return the list of svn remotes and their configuration information
'''
bp_ = os.path.join(__opts__['cachedir'], 'svnfs')
new_remote = False
repos = []
per_remote_defaults = {}
for param in PER_REMOTE_OVERRIDES:
per_remote_defaults[param] = \
six.text_type(__opts__['svnfs_{0}'.format(param)])
for remote in __opts__['svnfs_remotes']:
repo_conf = copy.deepcopy(per_remote_defaults)
if isinstance(remote, dict):
repo_url = next(iter(remote))
per_remote_conf = dict(
[(key, six.text_type(val)) for key, val in
six.iteritems(salt.utils.data.repack_dictlist(remote[repo_url]))]
)
if not per_remote_conf:
log.error(
'Invalid per-remote configuration for remote %s. If no '
'per-remote parameters are being specified, there may be '
'a trailing colon after the URL, which should be removed. '
'Check the master configuration file.', repo_url
)
_failhard()
per_remote_errors = False
for param in (x for x in per_remote_conf
if x not in PER_REMOTE_OVERRIDES):
log.error(
'Invalid configuration parameter \'%s\' for remote %s. '
'Valid parameters are: %s. See the documentation for '
'further information.',
param, repo_url, ', '.join(PER_REMOTE_OVERRIDES)
)
per_remote_errors = True
if per_remote_errors:
_failhard()
repo_conf.update(per_remote_conf)
else:
repo_url = remote
if not isinstance(repo_url, six.string_types):
log.error(
'Invalid svnfs remote %s. Remotes must be strings, you may '
'need to enclose the URL in quotes', repo_url
)
_failhard()
try:
repo_conf['mountpoint'] = salt.utils.url.strip_proto(
repo_conf['mountpoint']
)
except TypeError:
# mountpoint not specified
pass
hash_type = getattr(hashlib, __opts__.get('hash_type', 'md5'))
repo_hash = hash_type(repo_url).hexdigest()
rp_ = os.path.join(bp_, repo_hash)
if not os.path.isdir(rp_):
os.makedirs(rp_)
if not os.listdir(rp_):
# Only attempt a new checkout if the directory is empty.
try:
CLIENT.checkout(repo_url, rp_)
repos.append(rp_)
new_remote = True
except pysvn._pysvn.ClientError as exc:
log.error(
'Failed to initialize svnfs remote \'%s\': %s',
repo_url, exc
)
_failhard()
else:
# Confirm that there is an svn checkout at the necessary path by
# running pysvn.Client().status()
try:
CLIENT.status(rp_)
except pysvn._pysvn.ClientError as exc:
log.error(
'Cache path %s (corresponding remote: %s) exists but is '
'not a valid subversion checkout. You will need to '
'manually delete this directory on the master to continue '
'to use this svnfs remote.', rp_, repo_url
)
_failhard()
repo_conf.update({
'repo': rp_,
'url': repo_url,
'hash': repo_hash,
'cachedir': rp_,
'lockfile': os.path.join(rp_, 'update.lk')
})
repos.append(repo_conf)
if new_remote:
remote_map = os.path.join(__opts__['cachedir'], 'svnfs/remote_map.txt')
try:
with salt.utils.files.fopen(remote_map, 'w+') as fp_:
timestamp = datetime.now().strftime('%d %b %Y %H:%M:%S.%f')
fp_.write('# svnfs_remote map as of {0}\n'.format(timestamp))
for repo_conf in repos:
fp_.write(
salt.utils.stringutils.to_str(
'{0} = {1}\n'.format(
repo_conf['hash'], repo_conf['url']
)
)
)
except OSError:
pass
else:
log.info('Wrote new svnfs_remote map to %s', remote_map)
return repos | python | def init():
'''
Return the list of svn remotes and their configuration information
'''
bp_ = os.path.join(__opts__['cachedir'], 'svnfs')
new_remote = False
repos = []
per_remote_defaults = {}
for param in PER_REMOTE_OVERRIDES:
per_remote_defaults[param] = \
six.text_type(__opts__['svnfs_{0}'.format(param)])
for remote in __opts__['svnfs_remotes']:
repo_conf = copy.deepcopy(per_remote_defaults)
if isinstance(remote, dict):
repo_url = next(iter(remote))
per_remote_conf = dict(
[(key, six.text_type(val)) for key, val in
six.iteritems(salt.utils.data.repack_dictlist(remote[repo_url]))]
)
if not per_remote_conf:
log.error(
'Invalid per-remote configuration for remote %s. If no '
'per-remote parameters are being specified, there may be '
'a trailing colon after the URL, which should be removed. '
'Check the master configuration file.', repo_url
)
_failhard()
per_remote_errors = False
for param in (x for x in per_remote_conf
if x not in PER_REMOTE_OVERRIDES):
log.error(
'Invalid configuration parameter \'%s\' for remote %s. '
'Valid parameters are: %s. See the documentation for '
'further information.',
param, repo_url, ', '.join(PER_REMOTE_OVERRIDES)
)
per_remote_errors = True
if per_remote_errors:
_failhard()
repo_conf.update(per_remote_conf)
else:
repo_url = remote
if not isinstance(repo_url, six.string_types):
log.error(
'Invalid svnfs remote %s. Remotes must be strings, you may '
'need to enclose the URL in quotes', repo_url
)
_failhard()
try:
repo_conf['mountpoint'] = salt.utils.url.strip_proto(
repo_conf['mountpoint']
)
except TypeError:
# mountpoint not specified
pass
hash_type = getattr(hashlib, __opts__.get('hash_type', 'md5'))
repo_hash = hash_type(repo_url).hexdigest()
rp_ = os.path.join(bp_, repo_hash)
if not os.path.isdir(rp_):
os.makedirs(rp_)
if not os.listdir(rp_):
# Only attempt a new checkout if the directory is empty.
try:
CLIENT.checkout(repo_url, rp_)
repos.append(rp_)
new_remote = True
except pysvn._pysvn.ClientError as exc:
log.error(
'Failed to initialize svnfs remote \'%s\': %s',
repo_url, exc
)
_failhard()
else:
# Confirm that there is an svn checkout at the necessary path by
# running pysvn.Client().status()
try:
CLIENT.status(rp_)
except pysvn._pysvn.ClientError as exc:
log.error(
'Cache path %s (corresponding remote: %s) exists but is '
'not a valid subversion checkout. You will need to '
'manually delete this directory on the master to continue '
'to use this svnfs remote.', rp_, repo_url
)
_failhard()
repo_conf.update({
'repo': rp_,
'url': repo_url,
'hash': repo_hash,
'cachedir': rp_,
'lockfile': os.path.join(rp_, 'update.lk')
})
repos.append(repo_conf)
if new_remote:
remote_map = os.path.join(__opts__['cachedir'], 'svnfs/remote_map.txt')
try:
with salt.utils.files.fopen(remote_map, 'w+') as fp_:
timestamp = datetime.now().strftime('%d %b %Y %H:%M:%S.%f')
fp_.write('# svnfs_remote map as of {0}\n'.format(timestamp))
for repo_conf in repos:
fp_.write(
salt.utils.stringutils.to_str(
'{0} = {1}\n'.format(
repo_conf['hash'], repo_conf['url']
)
)
)
except OSError:
pass
else:
log.info('Wrote new svnfs_remote map to %s', remote_map)
return repos | [
"def",
"init",
"(",
")",
":",
"bp_",
"=",
"os",
".",
"path",
".",
"join",
"(",
"__opts__",
"[",
"'cachedir'",
"]",
",",
"'svnfs'",
")",
"new_remote",
"=",
"False",
"repos",
"=",
"[",
"]",
"per_remote_defaults",
"=",
"{",
"}",
"for",
"param",
"in",
... | Return the list of svn remotes and their configuration information | [
"Return",
"the",
"list",
"of",
"svn",
"remotes",
"and",
"their",
"configuration",
"information"
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/fileserver/svnfs.py#L130-L252 | train |
saltstack/salt | salt/fileserver/svnfs.py | _clear_old_remotes | def _clear_old_remotes():
'''
Remove cache directories for remotes no longer configured
'''
bp_ = os.path.join(__opts__['cachedir'], 'svnfs')
try:
cachedir_ls = os.listdir(bp_)
except OSError:
cachedir_ls = []
repos = init()
# Remove actively-used remotes from list
for repo in repos:
try:
cachedir_ls.remove(repo['hash'])
except ValueError:
pass
to_remove = []
for item in cachedir_ls:
if item in ('hash', 'refs'):
continue
path = os.path.join(bp_, item)
if os.path.isdir(path):
to_remove.append(path)
failed = []
if to_remove:
for rdir in to_remove:
try:
shutil.rmtree(rdir)
except OSError as exc:
log.error(
'Unable to remove old svnfs remote cachedir %s: %s',
rdir, exc
)
failed.append(rdir)
else:
log.debug('svnfs removed old cachedir %s', rdir)
for fdir in failed:
to_remove.remove(fdir)
return bool(to_remove), repos | python | def _clear_old_remotes():
'''
Remove cache directories for remotes no longer configured
'''
bp_ = os.path.join(__opts__['cachedir'], 'svnfs')
try:
cachedir_ls = os.listdir(bp_)
except OSError:
cachedir_ls = []
repos = init()
# Remove actively-used remotes from list
for repo in repos:
try:
cachedir_ls.remove(repo['hash'])
except ValueError:
pass
to_remove = []
for item in cachedir_ls:
if item in ('hash', 'refs'):
continue
path = os.path.join(bp_, item)
if os.path.isdir(path):
to_remove.append(path)
failed = []
if to_remove:
for rdir in to_remove:
try:
shutil.rmtree(rdir)
except OSError as exc:
log.error(
'Unable to remove old svnfs remote cachedir %s: %s',
rdir, exc
)
failed.append(rdir)
else:
log.debug('svnfs removed old cachedir %s', rdir)
for fdir in failed:
to_remove.remove(fdir)
return bool(to_remove), repos | [
"def",
"_clear_old_remotes",
"(",
")",
":",
"bp_",
"=",
"os",
".",
"path",
".",
"join",
"(",
"__opts__",
"[",
"'cachedir'",
"]",
",",
"'svnfs'",
")",
"try",
":",
"cachedir_ls",
"=",
"os",
".",
"listdir",
"(",
"bp_",
")",
"except",
"OSError",
":",
"ca... | Remove cache directories for remotes no longer configured | [
"Remove",
"cache",
"directories",
"for",
"remotes",
"no",
"longer",
"configured"
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/fileserver/svnfs.py#L255-L293 | train |
saltstack/salt | salt/fileserver/svnfs.py | clear_cache | def clear_cache():
'''
Completely clear svnfs cache
'''
fsb_cachedir = os.path.join(__opts__['cachedir'], 'svnfs')
list_cachedir = os.path.join(__opts__['cachedir'], 'file_lists/svnfs')
errors = []
for rdir in (fsb_cachedir, list_cachedir):
if os.path.exists(rdir):
try:
shutil.rmtree(rdir)
except OSError as exc:
errors.append('Unable to delete {0}: {1}'.format(rdir, exc))
return errors | python | def clear_cache():
'''
Completely clear svnfs cache
'''
fsb_cachedir = os.path.join(__opts__['cachedir'], 'svnfs')
list_cachedir = os.path.join(__opts__['cachedir'], 'file_lists/svnfs')
errors = []
for rdir in (fsb_cachedir, list_cachedir):
if os.path.exists(rdir):
try:
shutil.rmtree(rdir)
except OSError as exc:
errors.append('Unable to delete {0}: {1}'.format(rdir, exc))
return errors | [
"def",
"clear_cache",
"(",
")",
":",
"fsb_cachedir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"__opts__",
"[",
"'cachedir'",
"]",
",",
"'svnfs'",
")",
"list_cachedir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"__opts__",
"[",
"'cachedir'",
"]",
",",
... | Completely clear svnfs cache | [
"Completely",
"clear",
"svnfs",
"cache"
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/fileserver/svnfs.py#L296-L309 | train |
saltstack/salt | salt/fileserver/svnfs.py | clear_lock | def clear_lock(remote=None):
'''
Clear update.lk
``remote`` can either be a dictionary containing repo configuration
information, or a pattern. If the latter, then remotes for which the URL
matches the pattern will be locked.
'''
def _do_clear_lock(repo):
def _add_error(errlist, repo, exc):
msg = ('Unable to remove update lock for {0} ({1}): {2} '
.format(repo['url'], repo['lockfile'], exc))
log.debug(msg)
errlist.append(msg)
success = []
failed = []
if os.path.exists(repo['lockfile']):
try:
os.remove(repo['lockfile'])
except OSError as exc:
if exc.errno == errno.EISDIR:
# Somehow this path is a directory. Should never happen
# unless some wiseguy manually creates a directory at this
# path, but just in case, handle it.
try:
shutil.rmtree(repo['lockfile'])
except OSError as exc:
_add_error(failed, repo, exc)
else:
_add_error(failed, repo, exc)
else:
msg = 'Removed lock for {0}'.format(repo['url'])
log.debug(msg)
success.append(msg)
return success, failed
if isinstance(remote, dict):
return _do_clear_lock(remote)
cleared = []
errors = []
for repo in init():
if remote:
try:
if remote not in repo['url']:
continue
except TypeError:
# remote was non-string, try again
if six.text_type(remote) not in repo['url']:
continue
success, failed = _do_clear_lock(repo)
cleared.extend(success)
errors.extend(failed)
return cleared, errors | python | def clear_lock(remote=None):
'''
Clear update.lk
``remote`` can either be a dictionary containing repo configuration
information, or a pattern. If the latter, then remotes for which the URL
matches the pattern will be locked.
'''
def _do_clear_lock(repo):
def _add_error(errlist, repo, exc):
msg = ('Unable to remove update lock for {0} ({1}): {2} '
.format(repo['url'], repo['lockfile'], exc))
log.debug(msg)
errlist.append(msg)
success = []
failed = []
if os.path.exists(repo['lockfile']):
try:
os.remove(repo['lockfile'])
except OSError as exc:
if exc.errno == errno.EISDIR:
# Somehow this path is a directory. Should never happen
# unless some wiseguy manually creates a directory at this
# path, but just in case, handle it.
try:
shutil.rmtree(repo['lockfile'])
except OSError as exc:
_add_error(failed, repo, exc)
else:
_add_error(failed, repo, exc)
else:
msg = 'Removed lock for {0}'.format(repo['url'])
log.debug(msg)
success.append(msg)
return success, failed
if isinstance(remote, dict):
return _do_clear_lock(remote)
cleared = []
errors = []
for repo in init():
if remote:
try:
if remote not in repo['url']:
continue
except TypeError:
# remote was non-string, try again
if six.text_type(remote) not in repo['url']:
continue
success, failed = _do_clear_lock(repo)
cleared.extend(success)
errors.extend(failed)
return cleared, errors | [
"def",
"clear_lock",
"(",
"remote",
"=",
"None",
")",
":",
"def",
"_do_clear_lock",
"(",
"repo",
")",
":",
"def",
"_add_error",
"(",
"errlist",
",",
"repo",
",",
"exc",
")",
":",
"msg",
"=",
"(",
"'Unable to remove update lock for {0} ({1}): {2} '",
".",
"fo... | Clear update.lk
``remote`` can either be a dictionary containing repo configuration
information, or a pattern. If the latter, then remotes for which the URL
matches the pattern will be locked. | [
"Clear",
"update",
".",
"lk"
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/fileserver/svnfs.py#L312-L365 | train |
saltstack/salt | salt/fileserver/svnfs.py | update | def update():
'''
Execute an svn update on all of the repos
'''
# data for the fileserver event
data = {'changed': False,
'backend': 'svnfs'}
# _clear_old_remotes runs init(), so use the value from there to avoid a
# second init()
data['changed'], repos = _clear_old_remotes()
for repo in repos:
if os.path.exists(repo['lockfile']):
log.warning(
'Update lockfile is present for svnfs remote %s, skipping. '
'If this warning persists, it is possible that the update '
'process was interrupted. Removing %s or running '
'\'salt-run fileserver.clear_lock svnfs\' will allow updates '
'to continue for this remote.', repo['url'], repo['lockfile']
)
continue
_, errors = lock(repo)
if errors:
log.error(
'Unable to set update lock for svnfs remote %s, skipping.',
repo['url']
)
continue
log.debug('svnfs is fetching from %s', repo['url'])
old_rev = _rev(repo)
try:
CLIENT.update(repo['repo'])
except pysvn._pysvn.ClientError as exc:
log.error(
'Error updating svnfs remote %s (cachedir: %s): %s',
repo['url'], repo['cachedir'], exc
)
new_rev = _rev(repo)
if any((x is None for x in (old_rev, new_rev))):
# There were problems getting the revision ID
continue
if new_rev != old_rev:
data['changed'] = True
clear_lock(repo)
env_cache = os.path.join(__opts__['cachedir'], 'svnfs/envs.p')
if data.get('changed', False) is True or not os.path.isfile(env_cache):
env_cachedir = os.path.dirname(env_cache)
if not os.path.exists(env_cachedir):
os.makedirs(env_cachedir)
new_envs = envs(ignore_cache=True)
serial = salt.payload.Serial(__opts__)
with salt.utils.files.fopen(env_cache, 'wb+') as fp_:
fp_.write(serial.dumps(new_envs))
log.trace('Wrote env cache data to %s', env_cache)
# if there is a change, fire an event
if __opts__.get('fileserver_events', False):
event = salt.utils.event.get_event(
'master',
__opts__['sock_dir'],
__opts__['transport'],
opts=__opts__,
listen=False)
event.fire_event(data, tagify(['svnfs', 'update'], prefix='fileserver'))
try:
salt.fileserver.reap_fileserver_cache_dir(
os.path.join(__opts__['cachedir'], 'svnfs/hash'),
find_file
)
except (IOError, OSError):
# Hash file won't exist if no files have yet been served up
pass | python | def update():
'''
Execute an svn update on all of the repos
'''
# data for the fileserver event
data = {'changed': False,
'backend': 'svnfs'}
# _clear_old_remotes runs init(), so use the value from there to avoid a
# second init()
data['changed'], repos = _clear_old_remotes()
for repo in repos:
if os.path.exists(repo['lockfile']):
log.warning(
'Update lockfile is present for svnfs remote %s, skipping. '
'If this warning persists, it is possible that the update '
'process was interrupted. Removing %s or running '
'\'salt-run fileserver.clear_lock svnfs\' will allow updates '
'to continue for this remote.', repo['url'], repo['lockfile']
)
continue
_, errors = lock(repo)
if errors:
log.error(
'Unable to set update lock for svnfs remote %s, skipping.',
repo['url']
)
continue
log.debug('svnfs is fetching from %s', repo['url'])
old_rev = _rev(repo)
try:
CLIENT.update(repo['repo'])
except pysvn._pysvn.ClientError as exc:
log.error(
'Error updating svnfs remote %s (cachedir: %s): %s',
repo['url'], repo['cachedir'], exc
)
new_rev = _rev(repo)
if any((x is None for x in (old_rev, new_rev))):
# There were problems getting the revision ID
continue
if new_rev != old_rev:
data['changed'] = True
clear_lock(repo)
env_cache = os.path.join(__opts__['cachedir'], 'svnfs/envs.p')
if data.get('changed', False) is True or not os.path.isfile(env_cache):
env_cachedir = os.path.dirname(env_cache)
if not os.path.exists(env_cachedir):
os.makedirs(env_cachedir)
new_envs = envs(ignore_cache=True)
serial = salt.payload.Serial(__opts__)
with salt.utils.files.fopen(env_cache, 'wb+') as fp_:
fp_.write(serial.dumps(new_envs))
log.trace('Wrote env cache data to %s', env_cache)
# if there is a change, fire an event
if __opts__.get('fileserver_events', False):
event = salt.utils.event.get_event(
'master',
__opts__['sock_dir'],
__opts__['transport'],
opts=__opts__,
listen=False)
event.fire_event(data, tagify(['svnfs', 'update'], prefix='fileserver'))
try:
salt.fileserver.reap_fileserver_cache_dir(
os.path.join(__opts__['cachedir'], 'svnfs/hash'),
find_file
)
except (IOError, OSError):
# Hash file won't exist if no files have yet been served up
pass | [
"def",
"update",
"(",
")",
":",
"# data for the fileserver event",
"data",
"=",
"{",
"'changed'",
":",
"False",
",",
"'backend'",
":",
"'svnfs'",
"}",
"# _clear_old_remotes runs init(), so use the value from there to avoid a",
"# second init()",
"data",
"[",
"'changed'",
... | Execute an svn update on all of the repos | [
"Execute",
"an",
"svn",
"update",
"on",
"all",
"of",
"the",
"repos"
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/fileserver/svnfs.py#L415-L488 | train |
saltstack/salt | salt/fileserver/svnfs.py | _env_is_exposed | def _env_is_exposed(env):
'''
Check if an environment is exposed by comparing it against a whitelist and
blacklist.
'''
if __opts__['svnfs_env_whitelist']:
salt.utils.versions.warn_until(
'Neon',
'The svnfs_env_whitelist config option has been renamed to '
'svnfs_saltenv_whitelist. Please update your configuration.'
)
whitelist = __opts__['svnfs_env_whitelist']
else:
whitelist = __opts__['svnfs_saltenv_whitelist']
if __opts__['svnfs_env_blacklist']:
salt.utils.versions.warn_until(
'Neon',
'The svnfs_env_blacklist config option has been renamed to '
'svnfs_saltenv_blacklist. Please update your configuration.'
)
blacklist = __opts__['svnfs_env_blacklist']
else:
blacklist = __opts__['svnfs_saltenv_blacklist']
return salt.utils.stringutils.check_whitelist_blacklist(
env,
whitelist=whitelist,
blacklist=blacklist,
) | python | def _env_is_exposed(env):
'''
Check if an environment is exposed by comparing it against a whitelist and
blacklist.
'''
if __opts__['svnfs_env_whitelist']:
salt.utils.versions.warn_until(
'Neon',
'The svnfs_env_whitelist config option has been renamed to '
'svnfs_saltenv_whitelist. Please update your configuration.'
)
whitelist = __opts__['svnfs_env_whitelist']
else:
whitelist = __opts__['svnfs_saltenv_whitelist']
if __opts__['svnfs_env_blacklist']:
salt.utils.versions.warn_until(
'Neon',
'The svnfs_env_blacklist config option has been renamed to '
'svnfs_saltenv_blacklist. Please update your configuration.'
)
blacklist = __opts__['svnfs_env_blacklist']
else:
blacklist = __opts__['svnfs_saltenv_blacklist']
return salt.utils.stringutils.check_whitelist_blacklist(
env,
whitelist=whitelist,
blacklist=blacklist,
) | [
"def",
"_env_is_exposed",
"(",
"env",
")",
":",
"if",
"__opts__",
"[",
"'svnfs_env_whitelist'",
"]",
":",
"salt",
".",
"utils",
".",
"versions",
".",
"warn_until",
"(",
"'Neon'",
",",
"'The svnfs_env_whitelist config option has been renamed to '",
"'svnfs_saltenv_whitel... | Check if an environment is exposed by comparing it against a whitelist and
blacklist. | [
"Check",
"if",
"an",
"environment",
"is",
"exposed",
"by",
"comparing",
"it",
"against",
"a",
"whitelist",
"and",
"blacklist",
"."
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/fileserver/svnfs.py#L491-L520 | train |
saltstack/salt | salt/fileserver/svnfs.py | envs | def envs(ignore_cache=False):
'''
Return a list of refs that can be used as environments
'''
if not ignore_cache:
env_cache = os.path.join(__opts__['cachedir'], 'svnfs/envs.p')
cache_match = salt.fileserver.check_env_cache(__opts__, env_cache)
if cache_match is not None:
return cache_match
ret = set()
for repo in init():
trunk = os.path.join(repo['repo'], repo['trunk'])
if os.path.isdir(trunk):
# Add base as the env for trunk
ret.add('base')
else:
log.error(
'svnfs trunk path \'%s\' does not exist in repo %s, no base '
'environment will be provided by this remote',
repo['trunk'], repo['url']
)
branches = os.path.join(repo['repo'], repo['branches'])
if os.path.isdir(branches):
ret.update(os.listdir(branches))
else:
log.error(
'svnfs branches path \'%s\' does not exist in repo %s',
repo['branches'], repo['url']
)
tags = os.path.join(repo['repo'], repo['tags'])
if os.path.isdir(tags):
ret.update(os.listdir(tags))
else:
log.error(
'svnfs tags path \'%s\' does not exist in repo %s',
repo['tags'], repo['url']
)
return [x for x in sorted(ret) if _env_is_exposed(x)] | python | def envs(ignore_cache=False):
'''
Return a list of refs that can be used as environments
'''
if not ignore_cache:
env_cache = os.path.join(__opts__['cachedir'], 'svnfs/envs.p')
cache_match = salt.fileserver.check_env_cache(__opts__, env_cache)
if cache_match is not None:
return cache_match
ret = set()
for repo in init():
trunk = os.path.join(repo['repo'], repo['trunk'])
if os.path.isdir(trunk):
# Add base as the env for trunk
ret.add('base')
else:
log.error(
'svnfs trunk path \'%s\' does not exist in repo %s, no base '
'environment will be provided by this remote',
repo['trunk'], repo['url']
)
branches = os.path.join(repo['repo'], repo['branches'])
if os.path.isdir(branches):
ret.update(os.listdir(branches))
else:
log.error(
'svnfs branches path \'%s\' does not exist in repo %s',
repo['branches'], repo['url']
)
tags = os.path.join(repo['repo'], repo['tags'])
if os.path.isdir(tags):
ret.update(os.listdir(tags))
else:
log.error(
'svnfs tags path \'%s\' does not exist in repo %s',
repo['tags'], repo['url']
)
return [x for x in sorted(ret) if _env_is_exposed(x)] | [
"def",
"envs",
"(",
"ignore_cache",
"=",
"False",
")",
":",
"if",
"not",
"ignore_cache",
":",
"env_cache",
"=",
"os",
".",
"path",
".",
"join",
"(",
"__opts__",
"[",
"'cachedir'",
"]",
",",
"'svnfs/envs.p'",
")",
"cache_match",
"=",
"salt",
".",
"fileser... | Return a list of refs that can be used as environments | [
"Return",
"a",
"list",
"of",
"refs",
"that",
"can",
"be",
"used",
"as",
"environments"
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/fileserver/svnfs.py#L523-L562 | train |
saltstack/salt | salt/fileserver/svnfs.py | _env_root | def _env_root(repo, saltenv):
'''
Return the root of the directory corresponding to the desired environment,
or None if the environment was not found.
'''
# If 'base' is desired, look for the trunk
if saltenv == 'base':
trunk = os.path.join(repo['repo'], repo['trunk'])
if os.path.isdir(trunk):
return trunk
else:
return None
# Check branches
branches = os.path.join(repo['repo'], repo['branches'])
if os.path.isdir(branches) and saltenv in os.listdir(branches):
return os.path.join(branches, saltenv)
# Check tags
tags = os.path.join(repo['repo'], repo['tags'])
if os.path.isdir(tags) and saltenv in os.listdir(tags):
return os.path.join(tags, saltenv)
return None | python | def _env_root(repo, saltenv):
'''
Return the root of the directory corresponding to the desired environment,
or None if the environment was not found.
'''
# If 'base' is desired, look for the trunk
if saltenv == 'base':
trunk = os.path.join(repo['repo'], repo['trunk'])
if os.path.isdir(trunk):
return trunk
else:
return None
# Check branches
branches = os.path.join(repo['repo'], repo['branches'])
if os.path.isdir(branches) and saltenv in os.listdir(branches):
return os.path.join(branches, saltenv)
# Check tags
tags = os.path.join(repo['repo'], repo['tags'])
if os.path.isdir(tags) and saltenv in os.listdir(tags):
return os.path.join(tags, saltenv)
return None | [
"def",
"_env_root",
"(",
"repo",
",",
"saltenv",
")",
":",
"# If 'base' is desired, look for the trunk",
"if",
"saltenv",
"==",
"'base'",
":",
"trunk",
"=",
"os",
".",
"path",
".",
"join",
"(",
"repo",
"[",
"'repo'",
"]",
",",
"repo",
"[",
"'trunk'",
"]",
... | Return the root of the directory corresponding to the desired environment,
or None if the environment was not found. | [
"Return",
"the",
"root",
"of",
"the",
"directory",
"corresponding",
"to",
"the",
"desired",
"environment",
"or",
"None",
"if",
"the",
"environment",
"was",
"not",
"found",
"."
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/fileserver/svnfs.py#L565-L588 | train |
saltstack/salt | salt/fileserver/svnfs.py | find_file | def find_file(path, tgt_env='base', **kwargs): # pylint: disable=W0613
'''
Find the first file to match the path and ref. This operates similarly to
the roots file sever but with assumptions of the directory structure
based on svn standard practices.
'''
fnd = {'path': '',
'rel': ''}
if os.path.isabs(path) or tgt_env not in envs():
return fnd
for repo in init():
env_root = _env_root(repo, tgt_env)
if env_root is None:
# Environment not found, try the next repo
continue
if repo['mountpoint'] \
and not path.startswith(repo['mountpoint'] + os.path.sep):
continue
repo_path = path[len(repo['mountpoint']):].lstrip(os.path.sep)
if repo['root']:
repo_path = os.path.join(repo['root'], repo_path)
full = os.path.join(env_root, repo_path)
if os.path.isfile(full):
fnd['rel'] = path
fnd['path'] = full
try:
# Converting the stat result to a list, the elements of the
# list correspond to the following stat_result params:
# 0 => st_mode=33188
# 1 => st_ino=10227377
# 2 => st_dev=65026
# 3 => st_nlink=1
# 4 => st_uid=1000
# 5 => st_gid=1000
# 6 => st_size=1056233
# 7 => st_atime=1468284229
# 8 => st_mtime=1456338235
# 9 => st_ctime=1456338235
fnd['stat'] = list(os.stat(full))
except Exception:
pass
return fnd
return fnd | python | def find_file(path, tgt_env='base', **kwargs): # pylint: disable=W0613
'''
Find the first file to match the path and ref. This operates similarly to
the roots file sever but with assumptions of the directory structure
based on svn standard practices.
'''
fnd = {'path': '',
'rel': ''}
if os.path.isabs(path) or tgt_env not in envs():
return fnd
for repo in init():
env_root = _env_root(repo, tgt_env)
if env_root is None:
# Environment not found, try the next repo
continue
if repo['mountpoint'] \
and not path.startswith(repo['mountpoint'] + os.path.sep):
continue
repo_path = path[len(repo['mountpoint']):].lstrip(os.path.sep)
if repo['root']:
repo_path = os.path.join(repo['root'], repo_path)
full = os.path.join(env_root, repo_path)
if os.path.isfile(full):
fnd['rel'] = path
fnd['path'] = full
try:
# Converting the stat result to a list, the elements of the
# list correspond to the following stat_result params:
# 0 => st_mode=33188
# 1 => st_ino=10227377
# 2 => st_dev=65026
# 3 => st_nlink=1
# 4 => st_uid=1000
# 5 => st_gid=1000
# 6 => st_size=1056233
# 7 => st_atime=1468284229
# 8 => st_mtime=1456338235
# 9 => st_ctime=1456338235
fnd['stat'] = list(os.stat(full))
except Exception:
pass
return fnd
return fnd | [
"def",
"find_file",
"(",
"path",
",",
"tgt_env",
"=",
"'base'",
",",
"*",
"*",
"kwargs",
")",
":",
"# pylint: disable=W0613",
"fnd",
"=",
"{",
"'path'",
":",
"''",
",",
"'rel'",
":",
"''",
"}",
"if",
"os",
".",
"path",
".",
"isabs",
"(",
"path",
")... | Find the first file to match the path and ref. This operates similarly to
the roots file sever but with assumptions of the directory structure
based on svn standard practices. | [
"Find",
"the",
"first",
"file",
"to",
"match",
"the",
"path",
"and",
"ref",
".",
"This",
"operates",
"similarly",
"to",
"the",
"roots",
"file",
"sever",
"but",
"with",
"assumptions",
"of",
"the",
"directory",
"structure",
"based",
"on",
"svn",
"standard",
... | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/fileserver/svnfs.py#L591-L635 | train |
saltstack/salt | salt/fileserver/svnfs.py | file_hash | def file_hash(load, fnd):
'''
Return a file hash, the hash type is set in the master config file
'''
if 'env' in load:
# "env" is not supported; Use "saltenv".
load.pop('env')
if not all(x in load for x in ('path', 'saltenv')):
return ''
saltenv = load['saltenv']
if saltenv == 'base':
saltenv = 'trunk'
ret = {}
relpath = fnd['rel']
path = fnd['path']
# If the file doesn't exist, we can't get a hash
if not path or not os.path.isfile(path):
return ret
# Set the hash_type as it is determined by config
ret['hash_type'] = __opts__['hash_type']
# Check if the hash is cached
# Cache file's contents should be "hash:mtime"
cache_path = os.path.join(__opts__['cachedir'],
'svnfs',
'hash',
saltenv,
'{0}.hash.{1}'.format(relpath,
__opts__['hash_type']))
# If we have a cache, serve that if the mtime hasn't changed
if os.path.exists(cache_path):
with salt.utils.files.fopen(cache_path, 'rb') as fp_:
hsum, mtime = fp_.read().split(':')
if os.path.getmtime(path) == mtime:
# check if mtime changed
ret['hsum'] = hsum
return ret
# if we don't have a cache entry-- lets make one
ret['hsum'] = salt.utils.hashutils.get_hash(path, __opts__['hash_type'])
cache_dir = os.path.dirname(cache_path)
# make cache directory if it doesn't exist
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
# save the cache object "hash:mtime"
with salt.utils.files.fopen(cache_path, 'w') as fp_:
fp_.write('{0}:{1}'.format(ret['hsum'], os.path.getmtime(path)))
return ret | python | def file_hash(load, fnd):
'''
Return a file hash, the hash type is set in the master config file
'''
if 'env' in load:
# "env" is not supported; Use "saltenv".
load.pop('env')
if not all(x in load for x in ('path', 'saltenv')):
return ''
saltenv = load['saltenv']
if saltenv == 'base':
saltenv = 'trunk'
ret = {}
relpath = fnd['rel']
path = fnd['path']
# If the file doesn't exist, we can't get a hash
if not path or not os.path.isfile(path):
return ret
# Set the hash_type as it is determined by config
ret['hash_type'] = __opts__['hash_type']
# Check if the hash is cached
# Cache file's contents should be "hash:mtime"
cache_path = os.path.join(__opts__['cachedir'],
'svnfs',
'hash',
saltenv,
'{0}.hash.{1}'.format(relpath,
__opts__['hash_type']))
# If we have a cache, serve that if the mtime hasn't changed
if os.path.exists(cache_path):
with salt.utils.files.fopen(cache_path, 'rb') as fp_:
hsum, mtime = fp_.read().split(':')
if os.path.getmtime(path) == mtime:
# check if mtime changed
ret['hsum'] = hsum
return ret
# if we don't have a cache entry-- lets make one
ret['hsum'] = salt.utils.hashutils.get_hash(path, __opts__['hash_type'])
cache_dir = os.path.dirname(cache_path)
# make cache directory if it doesn't exist
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
# save the cache object "hash:mtime"
with salt.utils.files.fopen(cache_path, 'w') as fp_:
fp_.write('{0}:{1}'.format(ret['hsum'], os.path.getmtime(path)))
return ret | [
"def",
"file_hash",
"(",
"load",
",",
"fnd",
")",
":",
"if",
"'env'",
"in",
"load",
":",
"# \"env\" is not supported; Use \"saltenv\".",
"load",
".",
"pop",
"(",
"'env'",
")",
"if",
"not",
"all",
"(",
"x",
"in",
"load",
"for",
"x",
"in",
"(",
"'path'",
... | Return a file hash, the hash type is set in the master config file | [
"Return",
"a",
"file",
"hash",
"the",
"hash",
"type",
"is",
"set",
"in",
"the",
"master",
"config",
"file"
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/fileserver/svnfs.py#L667-L718 | train |
saltstack/salt | salt/fileserver/svnfs.py | _file_lists | def _file_lists(load, form):
'''
Return a dict containing the file lists for files, dirs, emptydirs and symlinks
'''
if 'env' in load:
# "env" is not supported; Use "saltenv".
load.pop('env')
if 'saltenv' not in load or load['saltenv'] not in envs():
return []
list_cachedir = os.path.join(__opts__['cachedir'], 'file_lists/svnfs')
if not os.path.isdir(list_cachedir):
try:
os.makedirs(list_cachedir)
except os.error:
log.critical('Unable to make cachedir %s', list_cachedir)
return []
list_cache = os.path.join(list_cachedir, '{0}.p'.format(load['saltenv']))
w_lock = os.path.join(list_cachedir, '.{0}.w'.format(load['saltenv']))
cache_match, refresh_cache, save_cache = \
salt.fileserver.check_file_list_cache(
__opts__, form, list_cache, w_lock
)
if cache_match is not None:
return cache_match
if refresh_cache:
ret = {
'files': set(),
'dirs': set(),
'empty_dirs': set()
}
for repo in init():
env_root = _env_root(repo, load['saltenv'])
if env_root is None:
# Environment not found, try the next repo
continue
if repo['root']:
env_root = \
os.path.join(env_root, repo['root']).rstrip(os.path.sep)
if not os.path.isdir(env_root):
# svnfs root (global or per-remote) does not exist in env
continue
for root, dirs, files in salt.utils.path.os_walk(env_root):
relpath = os.path.relpath(root, env_root)
dir_rel_fn = os.path.join(repo['mountpoint'], relpath)
if relpath != '.':
ret['dirs'].add(dir_rel_fn)
if not dirs and not files:
ret['empty_dirs'].add(dir_rel_fn)
for fname in files:
rel_fn = os.path.relpath(
os.path.join(root, fname),
env_root
)
ret['files'].add(os.path.join(repo['mountpoint'], rel_fn))
if repo['mountpoint']:
ret['dirs'].add(repo['mountpoint'])
# Convert all compiled sets to lists
for key in ret:
ret[key] = sorted(ret[key])
if save_cache:
salt.fileserver.write_file_list_cache(
__opts__, ret, list_cache, w_lock
)
return ret.get(form, [])
# Shouldn't get here, but if we do, this prevents a TypeError
return [] | python | def _file_lists(load, form):
'''
Return a dict containing the file lists for files, dirs, emptydirs and symlinks
'''
if 'env' in load:
# "env" is not supported; Use "saltenv".
load.pop('env')
if 'saltenv' not in load or load['saltenv'] not in envs():
return []
list_cachedir = os.path.join(__opts__['cachedir'], 'file_lists/svnfs')
if not os.path.isdir(list_cachedir):
try:
os.makedirs(list_cachedir)
except os.error:
log.critical('Unable to make cachedir %s', list_cachedir)
return []
list_cache = os.path.join(list_cachedir, '{0}.p'.format(load['saltenv']))
w_lock = os.path.join(list_cachedir, '.{0}.w'.format(load['saltenv']))
cache_match, refresh_cache, save_cache = \
salt.fileserver.check_file_list_cache(
__opts__, form, list_cache, w_lock
)
if cache_match is not None:
return cache_match
if refresh_cache:
ret = {
'files': set(),
'dirs': set(),
'empty_dirs': set()
}
for repo in init():
env_root = _env_root(repo, load['saltenv'])
if env_root is None:
# Environment not found, try the next repo
continue
if repo['root']:
env_root = \
os.path.join(env_root, repo['root']).rstrip(os.path.sep)
if not os.path.isdir(env_root):
# svnfs root (global or per-remote) does not exist in env
continue
for root, dirs, files in salt.utils.path.os_walk(env_root):
relpath = os.path.relpath(root, env_root)
dir_rel_fn = os.path.join(repo['mountpoint'], relpath)
if relpath != '.':
ret['dirs'].add(dir_rel_fn)
if not dirs and not files:
ret['empty_dirs'].add(dir_rel_fn)
for fname in files:
rel_fn = os.path.relpath(
os.path.join(root, fname),
env_root
)
ret['files'].add(os.path.join(repo['mountpoint'], rel_fn))
if repo['mountpoint']:
ret['dirs'].add(repo['mountpoint'])
# Convert all compiled sets to lists
for key in ret:
ret[key] = sorted(ret[key])
if save_cache:
salt.fileserver.write_file_list_cache(
__opts__, ret, list_cache, w_lock
)
return ret.get(form, [])
# Shouldn't get here, but if we do, this prevents a TypeError
return [] | [
"def",
"_file_lists",
"(",
"load",
",",
"form",
")",
":",
"if",
"'env'",
"in",
"load",
":",
"# \"env\" is not supported; Use \"saltenv\".",
"load",
".",
"pop",
"(",
"'env'",
")",
"if",
"'saltenv'",
"not",
"in",
"load",
"or",
"load",
"[",
"'saltenv'",
"]",
... | Return a dict containing the file lists for files, dirs, emptydirs and symlinks | [
"Return",
"a",
"dict",
"containing",
"the",
"file",
"lists",
"for",
"files",
"dirs",
"emptydirs",
"and",
"symlinks"
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/fileserver/svnfs.py#L721-L789 | train |
saltstack/salt | salt/auth/pam.py | authenticate | def authenticate(username, password):
'''
Returns True if the given username and password authenticate for the
given service. Returns False otherwise
``username``: the username to authenticate
``password``: the password in plain text
'''
service = __opts__.get('auth.pam.service', 'login')
if isinstance(username, six.text_type):
username = username.encode(__salt_system_encoding__)
if isinstance(password, six.text_type):
password = password.encode(__salt_system_encoding__)
if isinstance(service, six.text_type):
service = service.encode(__salt_system_encoding__)
@CONV_FUNC
def my_conv(n_messages, messages, p_response, app_data):
'''
Simple conversation function that responds to any
prompt where the echo is off with the supplied password
'''
# Create an array of n_messages response objects
addr = CALLOC(n_messages, sizeof(PamResponse))
p_response[0] = cast(addr, POINTER(PamResponse))
for i in range(n_messages):
if messages[i].contents.msg_style == PAM_PROMPT_ECHO_OFF:
pw_copy = STRDUP(password)
p_response.contents[i].resp = cast(pw_copy, c_char_p)
p_response.contents[i].resp_retcode = 0
return 0
handle = PamHandle()
conv = PamConv(my_conv, 0)
retval = PAM_START(service, username, pointer(conv), pointer(handle))
if retval != 0:
# TODO: This is not an authentication error, something
# has gone wrong starting up PAM
PAM_END(handle, retval)
return False
retval = PAM_AUTHENTICATE(handle, 0)
if retval == 0:
PAM_ACCT_MGMT(handle, 0)
PAM_END(handle, 0)
return retval == 0 | python | def authenticate(username, password):
'''
Returns True if the given username and password authenticate for the
given service. Returns False otherwise
``username``: the username to authenticate
``password``: the password in plain text
'''
service = __opts__.get('auth.pam.service', 'login')
if isinstance(username, six.text_type):
username = username.encode(__salt_system_encoding__)
if isinstance(password, six.text_type):
password = password.encode(__salt_system_encoding__)
if isinstance(service, six.text_type):
service = service.encode(__salt_system_encoding__)
@CONV_FUNC
def my_conv(n_messages, messages, p_response, app_data):
'''
Simple conversation function that responds to any
prompt where the echo is off with the supplied password
'''
# Create an array of n_messages response objects
addr = CALLOC(n_messages, sizeof(PamResponse))
p_response[0] = cast(addr, POINTER(PamResponse))
for i in range(n_messages):
if messages[i].contents.msg_style == PAM_PROMPT_ECHO_OFF:
pw_copy = STRDUP(password)
p_response.contents[i].resp = cast(pw_copy, c_char_p)
p_response.contents[i].resp_retcode = 0
return 0
handle = PamHandle()
conv = PamConv(my_conv, 0)
retval = PAM_START(service, username, pointer(conv), pointer(handle))
if retval != 0:
# TODO: This is not an authentication error, something
# has gone wrong starting up PAM
PAM_END(handle, retval)
return False
retval = PAM_AUTHENTICATE(handle, 0)
if retval == 0:
PAM_ACCT_MGMT(handle, 0)
PAM_END(handle, 0)
return retval == 0 | [
"def",
"authenticate",
"(",
"username",
",",
"password",
")",
":",
"service",
"=",
"__opts__",
".",
"get",
"(",
"'auth.pam.service'",
",",
"'login'",
")",
"if",
"isinstance",
"(",
"username",
",",
"six",
".",
"text_type",
")",
":",
"username",
"=",
"userna... | Returns True if the given username and password authenticate for the
given service. Returns False otherwise
``username``: the username to authenticate
``password``: the password in plain text | [
"Returns",
"True",
"if",
"the",
"given",
"username",
"and",
"password",
"authenticate",
"for",
"the",
"given",
"service",
".",
"Returns",
"False",
"otherwise"
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/auth/pam.py#L168-L216 | train |
saltstack/salt | salt/grains/napalm.py | _retrieve_grains_cache | def _retrieve_grains_cache(proxy=None):
'''
Retrieves the grains from the network device if not cached already.
'''
global GRAINS_CACHE
if not GRAINS_CACHE:
if proxy and salt.utils.napalm.is_proxy(__opts__):
# if proxy var passed and is NAPALM-type proxy minion
GRAINS_CACHE = proxy['napalm.get_grains']()
elif not proxy and salt.utils.napalm.is_minion(__opts__):
# if proxy var not passed and is running in a straight minion
GRAINS_CACHE = salt.utils.napalm.call(
DEVICE_CACHE,
'get_facts',
**{}
)
return GRAINS_CACHE | python | def _retrieve_grains_cache(proxy=None):
'''
Retrieves the grains from the network device if not cached already.
'''
global GRAINS_CACHE
if not GRAINS_CACHE:
if proxy and salt.utils.napalm.is_proxy(__opts__):
# if proxy var passed and is NAPALM-type proxy minion
GRAINS_CACHE = proxy['napalm.get_grains']()
elif not proxy and salt.utils.napalm.is_minion(__opts__):
# if proxy var not passed and is running in a straight minion
GRAINS_CACHE = salt.utils.napalm.call(
DEVICE_CACHE,
'get_facts',
**{}
)
return GRAINS_CACHE | [
"def",
"_retrieve_grains_cache",
"(",
"proxy",
"=",
"None",
")",
":",
"global",
"GRAINS_CACHE",
"if",
"not",
"GRAINS_CACHE",
":",
"if",
"proxy",
"and",
"salt",
".",
"utils",
".",
"napalm",
".",
"is_proxy",
"(",
"__opts__",
")",
":",
"# if proxy var passed and ... | Retrieves the grains from the network device if not cached already. | [
"Retrieves",
"the",
"grains",
"from",
"the",
"network",
"device",
"if",
"not",
"cached",
"already",
"."
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/grains/napalm.py#L63-L79 | train |
saltstack/salt | salt/grains/napalm.py | _retrieve_device_cache | def _retrieve_device_cache(proxy=None):
'''
Loads the network device details if not cached already.
'''
global DEVICE_CACHE
if not DEVICE_CACHE:
if proxy and salt.utils.napalm.is_proxy(__opts__):
# if proxy var passed and is NAPALM-type proxy minion
if 'napalm.get_device' in proxy:
DEVICE_CACHE = proxy['napalm.get_device']()
elif not proxy and salt.utils.napalm.is_minion(__opts__):
# if proxy var not passed and is running in a straight minion
DEVICE_CACHE = salt.utils.napalm.get_device(__opts__)
return DEVICE_CACHE | python | def _retrieve_device_cache(proxy=None):
'''
Loads the network device details if not cached already.
'''
global DEVICE_CACHE
if not DEVICE_CACHE:
if proxy and salt.utils.napalm.is_proxy(__opts__):
# if proxy var passed and is NAPALM-type proxy minion
if 'napalm.get_device' in proxy:
DEVICE_CACHE = proxy['napalm.get_device']()
elif not proxy and salt.utils.napalm.is_minion(__opts__):
# if proxy var not passed and is running in a straight minion
DEVICE_CACHE = salt.utils.napalm.get_device(__opts__)
return DEVICE_CACHE | [
"def",
"_retrieve_device_cache",
"(",
"proxy",
"=",
"None",
")",
":",
"global",
"DEVICE_CACHE",
"if",
"not",
"DEVICE_CACHE",
":",
"if",
"proxy",
"and",
"salt",
".",
"utils",
".",
"napalm",
".",
"is_proxy",
"(",
"__opts__",
")",
":",
"# if proxy var passed and ... | Loads the network device details if not cached already. | [
"Loads",
"the",
"network",
"device",
"details",
"if",
"not",
"cached",
"already",
"."
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/grains/napalm.py#L82-L95 | train |
saltstack/salt | salt/grains/napalm.py | _get_grain | def _get_grain(name, proxy=None):
'''
Retrieves the grain value from the cached dictionary.
'''
grains = _retrieve_grains_cache(proxy=proxy)
if grains.get('result', False) and grains.get('out', {}):
return grains.get('out').get(name) | python | def _get_grain(name, proxy=None):
'''
Retrieves the grain value from the cached dictionary.
'''
grains = _retrieve_grains_cache(proxy=proxy)
if grains.get('result', False) and grains.get('out', {}):
return grains.get('out').get(name) | [
"def",
"_get_grain",
"(",
"name",
",",
"proxy",
"=",
"None",
")",
":",
"grains",
"=",
"_retrieve_grains_cache",
"(",
"proxy",
"=",
"proxy",
")",
"if",
"grains",
".",
"get",
"(",
"'result'",
",",
"False",
")",
"and",
"grains",
".",
"get",
"(",
"'out'",
... | Retrieves the grain value from the cached dictionary. | [
"Retrieves",
"the",
"grain",
"value",
"from",
"the",
"cached",
"dictionary",
"."
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/grains/napalm.py#L98-L104 | train |
saltstack/salt | salt/grains/napalm.py | _get_device_grain | def _get_device_grain(name, proxy=None):
'''
Retrieves device-specific grains.
'''
device = _retrieve_device_cache(proxy=proxy)
return device.get(name.upper()) | python | def _get_device_grain(name, proxy=None):
'''
Retrieves device-specific grains.
'''
device = _retrieve_device_cache(proxy=proxy)
return device.get(name.upper()) | [
"def",
"_get_device_grain",
"(",
"name",
",",
"proxy",
"=",
"None",
")",
":",
"device",
"=",
"_retrieve_device_cache",
"(",
"proxy",
"=",
"proxy",
")",
"return",
"device",
".",
"get",
"(",
"name",
".",
"upper",
"(",
")",
")"
] | Retrieves device-specific grains. | [
"Retrieves",
"device",
"-",
"specific",
"grains",
"."
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/grains/napalm.py#L107-L112 | train |
saltstack/salt | salt/grains/napalm.py | username | def username(proxy=None):
'''
Return the username.
.. versionadded:: 2017.7.0
CLI Example - select all devices using `foobar` as username for connection:
.. code-block:: bash
salt -G 'username:foobar' test.ping
Output:
.. code-block::yaml
device1:
True
device2:
True
'''
if proxy and salt.utils.napalm.is_proxy(__opts__):
# only if proxy will override the username
# otherwise will use the default Salt grains
return {'username': _get_device_grain('username', proxy=proxy)} | python | def username(proxy=None):
'''
Return the username.
.. versionadded:: 2017.7.0
CLI Example - select all devices using `foobar` as username for connection:
.. code-block:: bash
salt -G 'username:foobar' test.ping
Output:
.. code-block::yaml
device1:
True
device2:
True
'''
if proxy and salt.utils.napalm.is_proxy(__opts__):
# only if proxy will override the username
# otherwise will use the default Salt grains
return {'username': _get_device_grain('username', proxy=proxy)} | [
"def",
"username",
"(",
"proxy",
"=",
"None",
")",
":",
"if",
"proxy",
"and",
"salt",
".",
"utils",
".",
"napalm",
".",
"is_proxy",
"(",
"__opts__",
")",
":",
"# only if proxy will override the username",
"# otherwise will use the default Salt grains",
"return",
"{"... | Return the username.
.. versionadded:: 2017.7.0
CLI Example - select all devices using `foobar` as username for connection:
.. code-block:: bash
salt -G 'username:foobar' test.ping
Output:
.. code-block::yaml
device1:
True
device2:
True | [
"Return",
"the",
"username",
"."
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/grains/napalm.py#L265-L289 | train |
saltstack/salt | salt/grains/napalm.py | host | def host(proxy=None):
'''
This grain is set by the NAPALM grain module
only when running in a proxy minion.
When Salt is installed directly on the network device,
thus running a regular minion, the ``host`` grain
provides the physical hostname of the network device,
as it would be on an ordinary minion server.
When running in a proxy minion, ``host`` points to the
value configured in the pillar: :mod:`NAPALM proxy module <salt.proxy.napalm>`.
.. note::
The diference between ``host`` and ``hostname`` is that
``host`` provides the physical location - either domain name or IP address,
while ``hostname`` provides the hostname as configured on the device.
They are not necessarily the same.
.. versionadded:: 2017.7.0
CLI Example:
.. code-block:: bash
salt 'device*' grains.get host
Output:
.. code-block:: yaml
device1:
ip-172-31-13-136.us-east-2.compute.internal
device2:
ip-172-31-11-193.us-east-2.compute.internal
device3:
ip-172-31-2-181.us-east-2.compute.internal
'''
if proxy and salt.utils.napalm.is_proxy(__opts__):
# this grain is set only when running in a proxy minion
# otherwise will use the default Salt grains
return {'host': _get_device_grain('hostname', proxy=proxy)} | python | def host(proxy=None):
'''
This grain is set by the NAPALM grain module
only when running in a proxy minion.
When Salt is installed directly on the network device,
thus running a regular minion, the ``host`` grain
provides the physical hostname of the network device,
as it would be on an ordinary minion server.
When running in a proxy minion, ``host`` points to the
value configured in the pillar: :mod:`NAPALM proxy module <salt.proxy.napalm>`.
.. note::
The diference between ``host`` and ``hostname`` is that
``host`` provides the physical location - either domain name or IP address,
while ``hostname`` provides the hostname as configured on the device.
They are not necessarily the same.
.. versionadded:: 2017.7.0
CLI Example:
.. code-block:: bash
salt 'device*' grains.get host
Output:
.. code-block:: yaml
device1:
ip-172-31-13-136.us-east-2.compute.internal
device2:
ip-172-31-11-193.us-east-2.compute.internal
device3:
ip-172-31-2-181.us-east-2.compute.internal
'''
if proxy and salt.utils.napalm.is_proxy(__opts__):
# this grain is set only when running in a proxy minion
# otherwise will use the default Salt grains
return {'host': _get_device_grain('hostname', proxy=proxy)} | [
"def",
"host",
"(",
"proxy",
"=",
"None",
")",
":",
"if",
"proxy",
"and",
"salt",
".",
"utils",
".",
"napalm",
".",
"is_proxy",
"(",
"__opts__",
")",
":",
"# this grain is set only when running in a proxy minion",
"# otherwise will use the default Salt grains",
"retur... | This grain is set by the NAPALM grain module
only when running in a proxy minion.
When Salt is installed directly on the network device,
thus running a regular minion, the ``host`` grain
provides the physical hostname of the network device,
as it would be on an ordinary minion server.
When running in a proxy minion, ``host`` points to the
value configured in the pillar: :mod:`NAPALM proxy module <salt.proxy.napalm>`.
.. note::
The diference between ``host`` and ``hostname`` is that
``host`` provides the physical location - either domain name or IP address,
while ``hostname`` provides the hostname as configured on the device.
They are not necessarily the same.
.. versionadded:: 2017.7.0
CLI Example:
.. code-block:: bash
salt 'device*' grains.get host
Output:
.. code-block:: yaml
device1:
ip-172-31-13-136.us-east-2.compute.internal
device2:
ip-172-31-11-193.us-east-2.compute.internal
device3:
ip-172-31-2-181.us-east-2.compute.internal | [
"This",
"grain",
"is",
"set",
"by",
"the",
"NAPALM",
"grain",
"module",
"only",
"when",
"running",
"in",
"a",
"proxy",
"minion",
".",
"When",
"Salt",
"is",
"installed",
"directly",
"on",
"the",
"network",
"device",
"thus",
"running",
"a",
"regular",
"minio... | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/grains/napalm.py#L316-L356 | train |
saltstack/salt | salt/grains/napalm.py | host_dns | def host_dns(proxy=None):
'''
Return the DNS information of the host.
This grain is a dictionary having two keys:
- ``A``
- ``AAAA``
.. note::
This grain is disabled by default, as the proxy startup may be slower
when the lookup fails.
The user can enable it using the ``napalm_host_dns_grain`` option (in
the pillar or proxy configuration file):
.. code-block:: yaml
napalm_host_dns_grain: true
.. versionadded:: 2017.7.0
CLI Example:
.. code-block:: bash
salt 'device*' grains.get host_dns
Output:
.. code-block:: yaml
device1:
A:
- 172.31.9.153
AAAA:
- fd52:188c:c068::1
device2:
A:
- 172.31.46.249
AAAA:
- fdca:3b17:31ab::17
device3:
A:
- 172.31.8.167
AAAA:
- fd0f:9fd6:5fab::1
'''
if not __opts__.get('napalm_host_dns_grain', False):
return
device_host = host(proxy=proxy)
if device_host:
device_host_value = device_host['host']
host_dns_ret = {
'host_dns': {
'A': [],
'AAAA': []
}
}
dns_a = salt.utils.dns.lookup(device_host_value, 'A')
if dns_a:
host_dns_ret['host_dns']['A'] = dns_a
dns_aaaa = salt.utils.dns.lookup(device_host_value, 'AAAA')
if dns_aaaa:
host_dns_ret['host_dns']['AAAA'] = dns_aaaa
return host_dns_ret | python | def host_dns(proxy=None):
'''
Return the DNS information of the host.
This grain is a dictionary having two keys:
- ``A``
- ``AAAA``
.. note::
This grain is disabled by default, as the proxy startup may be slower
when the lookup fails.
The user can enable it using the ``napalm_host_dns_grain`` option (in
the pillar or proxy configuration file):
.. code-block:: yaml
napalm_host_dns_grain: true
.. versionadded:: 2017.7.0
CLI Example:
.. code-block:: bash
salt 'device*' grains.get host_dns
Output:
.. code-block:: yaml
device1:
A:
- 172.31.9.153
AAAA:
- fd52:188c:c068::1
device2:
A:
- 172.31.46.249
AAAA:
- fdca:3b17:31ab::17
device3:
A:
- 172.31.8.167
AAAA:
- fd0f:9fd6:5fab::1
'''
if not __opts__.get('napalm_host_dns_grain', False):
return
device_host = host(proxy=proxy)
if device_host:
device_host_value = device_host['host']
host_dns_ret = {
'host_dns': {
'A': [],
'AAAA': []
}
}
dns_a = salt.utils.dns.lookup(device_host_value, 'A')
if dns_a:
host_dns_ret['host_dns']['A'] = dns_a
dns_aaaa = salt.utils.dns.lookup(device_host_value, 'AAAA')
if dns_aaaa:
host_dns_ret['host_dns']['AAAA'] = dns_aaaa
return host_dns_ret | [
"def",
"host_dns",
"(",
"proxy",
"=",
"None",
")",
":",
"if",
"not",
"__opts__",
".",
"get",
"(",
"'napalm_host_dns_grain'",
",",
"False",
")",
":",
"return",
"device_host",
"=",
"host",
"(",
"proxy",
"=",
"proxy",
")",
"if",
"device_host",
":",
"device_... | Return the DNS information of the host.
This grain is a dictionary having two keys:
- ``A``
- ``AAAA``
.. note::
This grain is disabled by default, as the proxy startup may be slower
when the lookup fails.
The user can enable it using the ``napalm_host_dns_grain`` option (in
the pillar or proxy configuration file):
.. code-block:: yaml
napalm_host_dns_grain: true
.. versionadded:: 2017.7.0
CLI Example:
.. code-block:: bash
salt 'device*' grains.get host_dns
Output:
.. code-block:: yaml
device1:
A:
- 172.31.9.153
AAAA:
- fd52:188c:c068::1
device2:
A:
- 172.31.46.249
AAAA:
- fdca:3b17:31ab::17
device3:
A:
- 172.31.8.167
AAAA:
- fd0f:9fd6:5fab::1 | [
"Return",
"the",
"DNS",
"information",
"of",
"the",
"host",
".",
"This",
"grain",
"is",
"a",
"dictionary",
"having",
"two",
"keys",
":"
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/grains/napalm.py#L359-L422 | train |
saltstack/salt | salt/grains/napalm.py | optional_args | def optional_args(proxy=None):
'''
Return the connection optional args.
.. note::
Sensible data will not be returned.
.. versionadded:: 2017.7.0
CLI Example - select all devices connecting via port 1234:
.. code-block:: bash
salt -G 'optional_args:port:1234' test.ping
Output:
.. code-block:: yaml
device1:
True
device2:
True
'''
opt_args = _get_device_grain('optional_args', proxy=proxy) or {}
if opt_args and _FORBIDDEN_OPT_ARGS:
for arg in _FORBIDDEN_OPT_ARGS:
opt_args.pop(arg, None)
return {'optional_args': opt_args} | python | def optional_args(proxy=None):
'''
Return the connection optional args.
.. note::
Sensible data will not be returned.
.. versionadded:: 2017.7.0
CLI Example - select all devices connecting via port 1234:
.. code-block:: bash
salt -G 'optional_args:port:1234' test.ping
Output:
.. code-block:: yaml
device1:
True
device2:
True
'''
opt_args = _get_device_grain('optional_args', proxy=proxy) or {}
if opt_args and _FORBIDDEN_OPT_ARGS:
for arg in _FORBIDDEN_OPT_ARGS:
opt_args.pop(arg, None)
return {'optional_args': opt_args} | [
"def",
"optional_args",
"(",
"proxy",
"=",
"None",
")",
":",
"opt_args",
"=",
"_get_device_grain",
"(",
"'optional_args'",
",",
"proxy",
"=",
"proxy",
")",
"or",
"{",
"}",
"if",
"opt_args",
"and",
"_FORBIDDEN_OPT_ARGS",
":",
"for",
"arg",
"in",
"_FORBIDDEN_O... | Return the connection optional args.
.. note::
Sensible data will not be returned.
.. versionadded:: 2017.7.0
CLI Example - select all devices connecting via port 1234:
.. code-block:: bash
salt -G 'optional_args:port:1234' test.ping
Output:
.. code-block:: yaml
device1:
True
device2:
True | [
"Return",
"the",
"connection",
"optional",
"args",
"."
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/grains/napalm.py#L425-L454 | train |
saltstack/salt | salt/states/pkgbuild.py | _get_missing_results | def _get_missing_results(results, dest_dir):
'''
Return a list of the filenames specified in the ``results`` argument, which
are not present in the dest_dir.
'''
try:
present = set(os.listdir(dest_dir))
except OSError as exc:
if exc.errno == errno.ENOENT:
log.debug('pkgbuild.built: dest_dir \'%s\' does not exist', dest_dir)
elif exc.errno == errno.EACCES:
log.error('pkgbuilt.built: cannot access dest_dir \'%s\'', dest_dir)
present = set()
return sorted(set(results).difference(present)) | python | def _get_missing_results(results, dest_dir):
'''
Return a list of the filenames specified in the ``results`` argument, which
are not present in the dest_dir.
'''
try:
present = set(os.listdir(dest_dir))
except OSError as exc:
if exc.errno == errno.ENOENT:
log.debug('pkgbuild.built: dest_dir \'%s\' does not exist', dest_dir)
elif exc.errno == errno.EACCES:
log.error('pkgbuilt.built: cannot access dest_dir \'%s\'', dest_dir)
present = set()
return sorted(set(results).difference(present)) | [
"def",
"_get_missing_results",
"(",
"results",
",",
"dest_dir",
")",
":",
"try",
":",
"present",
"=",
"set",
"(",
"os",
".",
"listdir",
"(",
"dest_dir",
")",
")",
"except",
"OSError",
"as",
"exc",
":",
"if",
"exc",
".",
"errno",
"==",
"errno",
".",
"... | Return a list of the filenames specified in the ``results`` argument, which
are not present in the dest_dir. | [
"Return",
"a",
"list",
"of",
"the",
"filenames",
"specified",
"in",
"the",
"results",
"argument",
"which",
"are",
"not",
"present",
"in",
"the",
"dest_dir",
"."
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/pkgbuild.py#L57-L70 | train |
saltstack/salt | salt/states/pkgbuild.py | built | def built(name,
runas,
dest_dir,
spec,
sources,
tgt,
template=None,
deps=None,
env=None,
results=None,
force=False,
saltenv='base',
log_dir='/var/log/salt/pkgbuild'):
'''
Ensure that the named package is built and exists in the named directory
name
The name to track the build, the name value is otherwise unused
runas
The user to run the build process as
dest_dir
The directory on the minion to place the built package(s)
spec
The location of the spec file (used for rpms)
sources
The list of package sources
tgt
The target platform to run the build on
template
Run the spec file through a templating engine
.. versionchanged:: 2015.8.2
This argument is now optional, allowing for no templating engine to
be used if none is desired.
deps
Packages required to ensure that the named package is built
can be hosted on either the salt master server or on an HTTP
or FTP server. Both HTTPS and HTTP are supported as well as
downloading directly from Amazon S3 compatible URLs with both
pre-configured and automatic IAM credentials
env
A dictionary of environment variables to be set prior to execution.
Example:
.. code-block:: yaml
- env:
DEB_BUILD_OPTIONS: 'nocheck'
.. warning::
The above illustrates a common PyYAML pitfall, that **yes**,
**no**, **on**, **off**, **true**, and **false** are all loaded as
boolean ``True`` and ``False`` values, and must be enclosed in
quotes to be used as strings. More info on this (and other) PyYAML
idiosyncrasies can be found :ref:`here <yaml-idiosyncrasies>`.
results
The names of the expected rpms that will be built
force : False
If ``True``, packages will be built even if they already exist in the
``dest_dir``. This is useful when building a package for continuous or
nightly package builds.
.. versionadded:: 2015.8.2
saltenv
The saltenv to use for files downloaded from the salt filesever
log_dir : /var/log/salt/rpmbuild
Root directory for log files created from the build. Logs will be
organized by package name, version, OS release, and CPU architecture
under this directory.
.. versionadded:: 2015.8.2
'''
ret = {'name': name,
'changes': {},
'comment': '',
'result': True}
if not results:
ret['comment'] = '\'results\' argument is required'
ret['result'] = False
return ret
if isinstance(results, six.string_types):
results = results.split(',')
needed = _get_missing_results(results, dest_dir)
if not force and not needed:
ret['comment'] = 'All needed packages exist'
return ret
if __opts__['test']:
ret['result'] = None
if force:
ret['comment'] = 'Packages will be force-built'
else:
ret['comment'] = 'The following packages need to be built: '
ret['comment'] += ', '.join(needed)
return ret
# Need the check for None here, if env is not provided then it falls back
# to None and it is assumed that the environment is not being overridden.
if env is not None and not isinstance(env, dict):
ret['comment'] = ('Invalidly-formatted \'env\' parameter. See '
'documentation.')
ret['result'] = False
return ret
func = 'pkgbuild.build'
if __grains__.get('os_family', False) not in ('RedHat', 'Suse'):
for res in results:
if res.endswith('.rpm'):
func = 'rpmbuild.build'
break
ret['changes'] = __salt__[func](
runas,
tgt,
dest_dir,
spec,
sources,
deps,
env,
template,
saltenv,
log_dir)
needed = _get_missing_results(results, dest_dir)
if needed:
ret['comment'] = 'The following packages were not built: '
ret['comment'] += ', '.join(needed)
ret['result'] = False
else:
ret['comment'] = 'All needed packages were built'
return ret | python | def built(name,
runas,
dest_dir,
spec,
sources,
tgt,
template=None,
deps=None,
env=None,
results=None,
force=False,
saltenv='base',
log_dir='/var/log/salt/pkgbuild'):
'''
Ensure that the named package is built and exists in the named directory
name
The name to track the build, the name value is otherwise unused
runas
The user to run the build process as
dest_dir
The directory on the minion to place the built package(s)
spec
The location of the spec file (used for rpms)
sources
The list of package sources
tgt
The target platform to run the build on
template
Run the spec file through a templating engine
.. versionchanged:: 2015.8.2
This argument is now optional, allowing for no templating engine to
be used if none is desired.
deps
Packages required to ensure that the named package is built
can be hosted on either the salt master server or on an HTTP
or FTP server. Both HTTPS and HTTP are supported as well as
downloading directly from Amazon S3 compatible URLs with both
pre-configured and automatic IAM credentials
env
A dictionary of environment variables to be set prior to execution.
Example:
.. code-block:: yaml
- env:
DEB_BUILD_OPTIONS: 'nocheck'
.. warning::
The above illustrates a common PyYAML pitfall, that **yes**,
**no**, **on**, **off**, **true**, and **false** are all loaded as
boolean ``True`` and ``False`` values, and must be enclosed in
quotes to be used as strings. More info on this (and other) PyYAML
idiosyncrasies can be found :ref:`here <yaml-idiosyncrasies>`.
results
The names of the expected rpms that will be built
force : False
If ``True``, packages will be built even if they already exist in the
``dest_dir``. This is useful when building a package for continuous or
nightly package builds.
.. versionadded:: 2015.8.2
saltenv
The saltenv to use for files downloaded from the salt filesever
log_dir : /var/log/salt/rpmbuild
Root directory for log files created from the build. Logs will be
organized by package name, version, OS release, and CPU architecture
under this directory.
.. versionadded:: 2015.8.2
'''
ret = {'name': name,
'changes': {},
'comment': '',
'result': True}
if not results:
ret['comment'] = '\'results\' argument is required'
ret['result'] = False
return ret
if isinstance(results, six.string_types):
results = results.split(',')
needed = _get_missing_results(results, dest_dir)
if not force and not needed:
ret['comment'] = 'All needed packages exist'
return ret
if __opts__['test']:
ret['result'] = None
if force:
ret['comment'] = 'Packages will be force-built'
else:
ret['comment'] = 'The following packages need to be built: '
ret['comment'] += ', '.join(needed)
return ret
# Need the check for None here, if env is not provided then it falls back
# to None and it is assumed that the environment is not being overridden.
if env is not None and not isinstance(env, dict):
ret['comment'] = ('Invalidly-formatted \'env\' parameter. See '
'documentation.')
ret['result'] = False
return ret
func = 'pkgbuild.build'
if __grains__.get('os_family', False) not in ('RedHat', 'Suse'):
for res in results:
if res.endswith('.rpm'):
func = 'rpmbuild.build'
break
ret['changes'] = __salt__[func](
runas,
tgt,
dest_dir,
spec,
sources,
deps,
env,
template,
saltenv,
log_dir)
needed = _get_missing_results(results, dest_dir)
if needed:
ret['comment'] = 'The following packages were not built: '
ret['comment'] += ', '.join(needed)
ret['result'] = False
else:
ret['comment'] = 'All needed packages were built'
return ret | [
"def",
"built",
"(",
"name",
",",
"runas",
",",
"dest_dir",
",",
"spec",
",",
"sources",
",",
"tgt",
",",
"template",
"=",
"None",
",",
"deps",
"=",
"None",
",",
"env",
"=",
"None",
",",
"results",
"=",
"None",
",",
"force",
"=",
"False",
",",
"s... | Ensure that the named package is built and exists in the named directory
name
The name to track the build, the name value is otherwise unused
runas
The user to run the build process as
dest_dir
The directory on the minion to place the built package(s)
spec
The location of the spec file (used for rpms)
sources
The list of package sources
tgt
The target platform to run the build on
template
Run the spec file through a templating engine
.. versionchanged:: 2015.8.2
This argument is now optional, allowing for no templating engine to
be used if none is desired.
deps
Packages required to ensure that the named package is built
can be hosted on either the salt master server or on an HTTP
or FTP server. Both HTTPS and HTTP are supported as well as
downloading directly from Amazon S3 compatible URLs with both
pre-configured and automatic IAM credentials
env
A dictionary of environment variables to be set prior to execution.
Example:
.. code-block:: yaml
- env:
DEB_BUILD_OPTIONS: 'nocheck'
.. warning::
The above illustrates a common PyYAML pitfall, that **yes**,
**no**, **on**, **off**, **true**, and **false** are all loaded as
boolean ``True`` and ``False`` values, and must be enclosed in
quotes to be used as strings. More info on this (and other) PyYAML
idiosyncrasies can be found :ref:`here <yaml-idiosyncrasies>`.
results
The names of the expected rpms that will be built
force : False
If ``True``, packages will be built even if they already exist in the
``dest_dir``. This is useful when building a package for continuous or
nightly package builds.
.. versionadded:: 2015.8.2
saltenv
The saltenv to use for files downloaded from the salt filesever
log_dir : /var/log/salt/rpmbuild
Root directory for log files created from the build. Logs will be
organized by package name, version, OS release, and CPU architecture
under this directory.
.. versionadded:: 2015.8.2 | [
"Ensure",
"that",
"the",
"named",
"package",
"is",
"built",
"and",
"exists",
"in",
"the",
"named",
"directory"
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/pkgbuild.py#L73-L220 | train |
saltstack/salt | salt/states/pkgbuild.py | repo | def repo(name,
keyid=None,
env=None,
use_passphrase=False,
gnupghome='/etc/salt/gpgkeys',
runas='builder',
timeout=15.0):
'''
Make a package repository and optionally sign it and packages present
The name is directory to turn into a repo. This state is best used
with onchanges linked to your package building states.
name
The directory to find packages that will be in the repository
keyid
.. versionchanged:: 2016.3.0
Optional Key ID to use in signing packages and repository.
Utilizes Public and Private keys associated with keyid which have
been loaded into the minion's Pillar data.
For example, contents from a Pillar data file with named Public
and Private keys as follows:
.. code-block:: yaml
gpg_pkg_priv_key: |
-----BEGIN PGP PRIVATE KEY BLOCK-----
Version: GnuPG v1
lQO+BFciIfQBCADAPCtzx7I5Rl32escCMZsPzaEKWe7bIX1em4KCKkBoX47IG54b
w82PCE8Y1jF/9Uk2m3RKVWp3YcLlc7Ap3gj6VO4ysvVz28UbnhPxsIkOlf2cq8qc
.
.
Ebe+8JCQTwqSXPRTzXmy/b5WXDeM79CkLWvuGpXFor76D+ECMRPv/rawukEcNptn
R5OmgHqvydEnO4pWbn8JzQO9YX/Us0SMHBVzLC8eIi5ZIopzalvX
=JvW8
-----END PGP PRIVATE KEY BLOCK-----
gpg_pkg_priv_keyname: gpg_pkg_key.pem
gpg_pkg_pub_key: |
-----BEGIN PGP PUBLIC KEY BLOCK-----
Version: GnuPG v1
mQENBFciIfQBCADAPCtzx7I5Rl32escCMZsPzaEKWe7bIX1em4KCKkBoX47IG54b
w82PCE8Y1jF/9Uk2m3RKVWp3YcLlc7Ap3gj6VO4ysvVz28UbnhPxsIkOlf2cq8qc
.
.
bYP7t5iwJmQzRMyFInYRt77wkJBPCpJc9FPNebL9vlZcN4zv0KQta+4alcWivvoP
4QIxE+/+trC6QRw2m2dHk6aAeq/J0Sc7ilZufwnNA71hf9SzRIwcFXMsLx4iLlki
inNqW9c=
=s1CX
-----END PGP PUBLIC KEY BLOCK-----
gpg_pkg_pub_keyname: gpg_pkg_key.pub
env
.. versionchanged:: 2016.3.0
A dictionary of environment variables to be utilized in creating the
repository. Example:
.. code-block:: yaml
- env:
OPTIONS: 'ask-passphrase'
.. warning::
The above illustrates a common ``PyYAML`` pitfall, that **yes**,
**no**, **on**, **off**, **true**, and **false** are all loaded as
boolean ``True`` and ``False`` values, and must be enclosed in
quotes to be used as strings. More info on this (and other)
``PyYAML`` idiosyncrasies can be found :ref:`here
<yaml-idiosyncrasies>`.
Use of ``OPTIONS`` on some platforms, for example:
``ask-passphrase``, will require ``gpg-agent`` or similar to cache
passphrases.
.. note::
This parameter is not used for making ``yum`` repositories.
use_passphrase : False
.. versionadded:: 2016.3.0
Use a passphrase with the signing key presented in ``keyid``.
Passphrase is received from Pillar data which could be passed on the
command line with ``pillar`` parameter. For example:
.. code-block:: bash
pillar='{ "gpg_passphrase" : "my_passphrase" }'
gnupghome : /etc/salt/gpgkeys
.. versionadded:: 2016.3.0
Location where GPG related files are stored, used with 'keyid'
runas : builder
.. versionadded:: 2016.3.0
User to create the repository as, and optionally sign packages.
.. note::
Ensure the user has correct permissions to any files and
directories which are to be utilized.
timeout : 15.0
.. versionadded:: 2016.3.4
Timeout in seconds to wait for the prompt for inputting the passphrase.
'''
ret = {'name': name,
'changes': {},
'comment': '',
'result': True}
if __opts__['test'] is True:
ret['result'] = None
ret['comment'] = 'Package repo metadata at {0} will be refreshed'.format(name)
return ret
# Need the check for None here, if env is not provided then it falls back
# to None and it is assumed that the environment is not being overridden.
if env is not None and not isinstance(env, dict):
ret['comment'] = ('Invalidly-formatted \'env\' parameter. See '
'documentation.')
return ret
func = 'pkgbuild.make_repo'
if __grains__.get('os_family', False) not in ('RedHat', 'Suse'):
for file in os.listdir(name):
if file.endswith('.rpm'):
func = 'rpmbuild.make_repo'
break
res = __salt__[func](name, keyid, env, use_passphrase, gnupghome, runas, timeout)
if res['retcode'] > 0:
ret['result'] = False
else:
ret['changes'] = {'refresh': True}
if res['stdout'] and res['stderr']:
ret['comment'] = "{0}\n{1}".format(res['stdout'], res['stderr'])
elif res['stdout']:
ret['comment'] = res['stdout']
elif res['stderr']:
ret['comment'] = res['stderr']
return ret | python | def repo(name,
keyid=None,
env=None,
use_passphrase=False,
gnupghome='/etc/salt/gpgkeys',
runas='builder',
timeout=15.0):
'''
Make a package repository and optionally sign it and packages present
The name is directory to turn into a repo. This state is best used
with onchanges linked to your package building states.
name
The directory to find packages that will be in the repository
keyid
.. versionchanged:: 2016.3.0
Optional Key ID to use in signing packages and repository.
Utilizes Public and Private keys associated with keyid which have
been loaded into the minion's Pillar data.
For example, contents from a Pillar data file with named Public
and Private keys as follows:
.. code-block:: yaml
gpg_pkg_priv_key: |
-----BEGIN PGP PRIVATE KEY BLOCK-----
Version: GnuPG v1
lQO+BFciIfQBCADAPCtzx7I5Rl32escCMZsPzaEKWe7bIX1em4KCKkBoX47IG54b
w82PCE8Y1jF/9Uk2m3RKVWp3YcLlc7Ap3gj6VO4ysvVz28UbnhPxsIkOlf2cq8qc
.
.
Ebe+8JCQTwqSXPRTzXmy/b5WXDeM79CkLWvuGpXFor76D+ECMRPv/rawukEcNptn
R5OmgHqvydEnO4pWbn8JzQO9YX/Us0SMHBVzLC8eIi5ZIopzalvX
=JvW8
-----END PGP PRIVATE KEY BLOCK-----
gpg_pkg_priv_keyname: gpg_pkg_key.pem
gpg_pkg_pub_key: |
-----BEGIN PGP PUBLIC KEY BLOCK-----
Version: GnuPG v1
mQENBFciIfQBCADAPCtzx7I5Rl32escCMZsPzaEKWe7bIX1em4KCKkBoX47IG54b
w82PCE8Y1jF/9Uk2m3RKVWp3YcLlc7Ap3gj6VO4ysvVz28UbnhPxsIkOlf2cq8qc
.
.
bYP7t5iwJmQzRMyFInYRt77wkJBPCpJc9FPNebL9vlZcN4zv0KQta+4alcWivvoP
4QIxE+/+trC6QRw2m2dHk6aAeq/J0Sc7ilZufwnNA71hf9SzRIwcFXMsLx4iLlki
inNqW9c=
=s1CX
-----END PGP PUBLIC KEY BLOCK-----
gpg_pkg_pub_keyname: gpg_pkg_key.pub
env
.. versionchanged:: 2016.3.0
A dictionary of environment variables to be utilized in creating the
repository. Example:
.. code-block:: yaml
- env:
OPTIONS: 'ask-passphrase'
.. warning::
The above illustrates a common ``PyYAML`` pitfall, that **yes**,
**no**, **on**, **off**, **true**, and **false** are all loaded as
boolean ``True`` and ``False`` values, and must be enclosed in
quotes to be used as strings. More info on this (and other)
``PyYAML`` idiosyncrasies can be found :ref:`here
<yaml-idiosyncrasies>`.
Use of ``OPTIONS`` on some platforms, for example:
``ask-passphrase``, will require ``gpg-agent`` or similar to cache
passphrases.
.. note::
This parameter is not used for making ``yum`` repositories.
use_passphrase : False
.. versionadded:: 2016.3.0
Use a passphrase with the signing key presented in ``keyid``.
Passphrase is received from Pillar data which could be passed on the
command line with ``pillar`` parameter. For example:
.. code-block:: bash
pillar='{ "gpg_passphrase" : "my_passphrase" }'
gnupghome : /etc/salt/gpgkeys
.. versionadded:: 2016.3.0
Location where GPG related files are stored, used with 'keyid'
runas : builder
.. versionadded:: 2016.3.0
User to create the repository as, and optionally sign packages.
.. note::
Ensure the user has correct permissions to any files and
directories which are to be utilized.
timeout : 15.0
.. versionadded:: 2016.3.4
Timeout in seconds to wait for the prompt for inputting the passphrase.
'''
ret = {'name': name,
'changes': {},
'comment': '',
'result': True}
if __opts__['test'] is True:
ret['result'] = None
ret['comment'] = 'Package repo metadata at {0} will be refreshed'.format(name)
return ret
# Need the check for None here, if env is not provided then it falls back
# to None and it is assumed that the environment is not being overridden.
if env is not None and not isinstance(env, dict):
ret['comment'] = ('Invalidly-formatted \'env\' parameter. See '
'documentation.')
return ret
func = 'pkgbuild.make_repo'
if __grains__.get('os_family', False) not in ('RedHat', 'Suse'):
for file in os.listdir(name):
if file.endswith('.rpm'):
func = 'rpmbuild.make_repo'
break
res = __salt__[func](name, keyid, env, use_passphrase, gnupghome, runas, timeout)
if res['retcode'] > 0:
ret['result'] = False
else:
ret['changes'] = {'refresh': True}
if res['stdout'] and res['stderr']:
ret['comment'] = "{0}\n{1}".format(res['stdout'], res['stderr'])
elif res['stdout']:
ret['comment'] = res['stdout']
elif res['stderr']:
ret['comment'] = res['stderr']
return ret | [
"def",
"repo",
"(",
"name",
",",
"keyid",
"=",
"None",
",",
"env",
"=",
"None",
",",
"use_passphrase",
"=",
"False",
",",
"gnupghome",
"=",
"'/etc/salt/gpgkeys'",
",",
"runas",
"=",
"'builder'",
",",
"timeout",
"=",
"15.0",
")",
":",
"ret",
"=",
"{",
... | Make a package repository and optionally sign it and packages present
The name is directory to turn into a repo. This state is best used
with onchanges linked to your package building states.
name
The directory to find packages that will be in the repository
keyid
.. versionchanged:: 2016.3.0
Optional Key ID to use in signing packages and repository.
Utilizes Public and Private keys associated with keyid which have
been loaded into the minion's Pillar data.
For example, contents from a Pillar data file with named Public
and Private keys as follows:
.. code-block:: yaml
gpg_pkg_priv_key: |
-----BEGIN PGP PRIVATE KEY BLOCK-----
Version: GnuPG v1
lQO+BFciIfQBCADAPCtzx7I5Rl32escCMZsPzaEKWe7bIX1em4KCKkBoX47IG54b
w82PCE8Y1jF/9Uk2m3RKVWp3YcLlc7Ap3gj6VO4ysvVz28UbnhPxsIkOlf2cq8qc
.
.
Ebe+8JCQTwqSXPRTzXmy/b5WXDeM79CkLWvuGpXFor76D+ECMRPv/rawukEcNptn
R5OmgHqvydEnO4pWbn8JzQO9YX/Us0SMHBVzLC8eIi5ZIopzalvX
=JvW8
-----END PGP PRIVATE KEY BLOCK-----
gpg_pkg_priv_keyname: gpg_pkg_key.pem
gpg_pkg_pub_key: |
-----BEGIN PGP PUBLIC KEY BLOCK-----
Version: GnuPG v1
mQENBFciIfQBCADAPCtzx7I5Rl32escCMZsPzaEKWe7bIX1em4KCKkBoX47IG54b
w82PCE8Y1jF/9Uk2m3RKVWp3YcLlc7Ap3gj6VO4ysvVz28UbnhPxsIkOlf2cq8qc
.
.
bYP7t5iwJmQzRMyFInYRt77wkJBPCpJc9FPNebL9vlZcN4zv0KQta+4alcWivvoP
4QIxE+/+trC6QRw2m2dHk6aAeq/J0Sc7ilZufwnNA71hf9SzRIwcFXMsLx4iLlki
inNqW9c=
=s1CX
-----END PGP PUBLIC KEY BLOCK-----
gpg_pkg_pub_keyname: gpg_pkg_key.pub
env
.. versionchanged:: 2016.3.0
A dictionary of environment variables to be utilized in creating the
repository. Example:
.. code-block:: yaml
- env:
OPTIONS: 'ask-passphrase'
.. warning::
The above illustrates a common ``PyYAML`` pitfall, that **yes**,
**no**, **on**, **off**, **true**, and **false** are all loaded as
boolean ``True`` and ``False`` values, and must be enclosed in
quotes to be used as strings. More info on this (and other)
``PyYAML`` idiosyncrasies can be found :ref:`here
<yaml-idiosyncrasies>`.
Use of ``OPTIONS`` on some platforms, for example:
``ask-passphrase``, will require ``gpg-agent`` or similar to cache
passphrases.
.. note::
This parameter is not used for making ``yum`` repositories.
use_passphrase : False
.. versionadded:: 2016.3.0
Use a passphrase with the signing key presented in ``keyid``.
Passphrase is received from Pillar data which could be passed on the
command line with ``pillar`` parameter. For example:
.. code-block:: bash
pillar='{ "gpg_passphrase" : "my_passphrase" }'
gnupghome : /etc/salt/gpgkeys
.. versionadded:: 2016.3.0
Location where GPG related files are stored, used with 'keyid'
runas : builder
.. versionadded:: 2016.3.0
User to create the repository as, and optionally sign packages.
.. note::
Ensure the user has correct permissions to any files and
directories which are to be utilized.
timeout : 15.0
.. versionadded:: 2016.3.4
Timeout in seconds to wait for the prompt for inputting the passphrase. | [
"Make",
"a",
"package",
"repository",
"and",
"optionally",
"sign",
"it",
"and",
"packages",
"present"
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/pkgbuild.py#L223-L380 | train |
saltstack/salt | salt/template.py | compile_template | def compile_template(template,
renderers,
default,
blacklist,
whitelist,
saltenv='base',
sls='',
input_data='',
**kwargs):
'''
Take the path to a template and return the high data structure
derived from the template.
Helpers:
:param mask_value:
Mask value for debugging purposes (prevent sensitive information etc)
example: "mask_value="pass*". All "passwd", "password", "pass" will
be masked (as text).
'''
# if any error occurs, we return an empty dictionary
ret = {}
log.debug('compile template: %s', template)
if 'env' in kwargs:
# "env" is not supported; Use "saltenv".
kwargs.pop('env')
if template != ':string:':
# Template was specified incorrectly
if not isinstance(template, six.string_types):
log.error('Template was specified incorrectly: %s', template)
return ret
# Template does not exist
if not os.path.isfile(template):
log.error('Template does not exist: %s', template)
return ret
# Template is an empty file
if salt.utils.files.is_empty(template):
log.debug('Template is an empty file: %s', template)
return ret
with codecs.open(template, encoding=SLS_ENCODING) as ifile:
# data input to the first render function in the pipe
input_data = ifile.read()
if not input_data.strip():
# Template is nothing but whitespace
log.error('Template is nothing but whitespace: %s', template)
return ret
# Get the list of render funcs in the render pipe line.
render_pipe = template_shebang(template, renderers, default, blacklist, whitelist, input_data)
windows_newline = '\r\n' in input_data
input_data = StringIO(input_data)
for render, argline in render_pipe:
if salt.utils.stringio.is_readable(input_data):
input_data.seek(0) # pylint: disable=no-member
render_kwargs = dict(renderers=renderers, tmplpath=template)
render_kwargs.update(kwargs)
if argline:
render_kwargs['argline'] = argline
start = time.time()
ret = render(input_data, saltenv, sls, **render_kwargs)
log.profile(
'Time (in seconds) to render \'%s\' using \'%s\' renderer: %s',
template,
render.__module__.split('.')[-1],
time.time() - start
)
if ret is None:
# The file is empty or is being written elsewhere
time.sleep(0.01)
ret = render(input_data, saltenv, sls, **render_kwargs)
input_data = ret
if log.isEnabledFor(logging.GARBAGE): # pylint: disable=no-member
# If ret is not a StringIO (which means it was rendered using
# yaml, mako, or another engine which renders to a data
# structure) we don't want to log this.
if salt.utils.stringio.is_readable(ret):
log.debug('Rendered data from file: %s:\n%s', template,
salt.utils.sanitizers.mask_args_value(salt.utils.data.decode(ret.read()),
kwargs.get('mask_value'))) # pylint: disable=no-member
ret.seek(0) # pylint: disable=no-member
# Preserve newlines from original template
if windows_newline:
if salt.utils.stringio.is_readable(ret):
is_stringio = True
contents = ret.read()
else:
is_stringio = False
contents = ret
if isinstance(contents, six.string_types):
if '\r\n' not in contents:
contents = contents.replace('\n', '\r\n')
ret = StringIO(contents) if is_stringio else contents
else:
if is_stringio:
ret.seek(0)
return ret | python | def compile_template(template,
renderers,
default,
blacklist,
whitelist,
saltenv='base',
sls='',
input_data='',
**kwargs):
'''
Take the path to a template and return the high data structure
derived from the template.
Helpers:
:param mask_value:
Mask value for debugging purposes (prevent sensitive information etc)
example: "mask_value="pass*". All "passwd", "password", "pass" will
be masked (as text).
'''
# if any error occurs, we return an empty dictionary
ret = {}
log.debug('compile template: %s', template)
if 'env' in kwargs:
# "env" is not supported; Use "saltenv".
kwargs.pop('env')
if template != ':string:':
# Template was specified incorrectly
if not isinstance(template, six.string_types):
log.error('Template was specified incorrectly: %s', template)
return ret
# Template does not exist
if not os.path.isfile(template):
log.error('Template does not exist: %s', template)
return ret
# Template is an empty file
if salt.utils.files.is_empty(template):
log.debug('Template is an empty file: %s', template)
return ret
with codecs.open(template, encoding=SLS_ENCODING) as ifile:
# data input to the first render function in the pipe
input_data = ifile.read()
if not input_data.strip():
# Template is nothing but whitespace
log.error('Template is nothing but whitespace: %s', template)
return ret
# Get the list of render funcs in the render pipe line.
render_pipe = template_shebang(template, renderers, default, blacklist, whitelist, input_data)
windows_newline = '\r\n' in input_data
input_data = StringIO(input_data)
for render, argline in render_pipe:
if salt.utils.stringio.is_readable(input_data):
input_data.seek(0) # pylint: disable=no-member
render_kwargs = dict(renderers=renderers, tmplpath=template)
render_kwargs.update(kwargs)
if argline:
render_kwargs['argline'] = argline
start = time.time()
ret = render(input_data, saltenv, sls, **render_kwargs)
log.profile(
'Time (in seconds) to render \'%s\' using \'%s\' renderer: %s',
template,
render.__module__.split('.')[-1],
time.time() - start
)
if ret is None:
# The file is empty or is being written elsewhere
time.sleep(0.01)
ret = render(input_data, saltenv, sls, **render_kwargs)
input_data = ret
if log.isEnabledFor(logging.GARBAGE): # pylint: disable=no-member
# If ret is not a StringIO (which means it was rendered using
# yaml, mako, or another engine which renders to a data
# structure) we don't want to log this.
if salt.utils.stringio.is_readable(ret):
log.debug('Rendered data from file: %s:\n%s', template,
salt.utils.sanitizers.mask_args_value(salt.utils.data.decode(ret.read()),
kwargs.get('mask_value'))) # pylint: disable=no-member
ret.seek(0) # pylint: disable=no-member
# Preserve newlines from original template
if windows_newline:
if salt.utils.stringio.is_readable(ret):
is_stringio = True
contents = ret.read()
else:
is_stringio = False
contents = ret
if isinstance(contents, six.string_types):
if '\r\n' not in contents:
contents = contents.replace('\n', '\r\n')
ret = StringIO(contents) if is_stringio else contents
else:
if is_stringio:
ret.seek(0)
return ret | [
"def",
"compile_template",
"(",
"template",
",",
"renderers",
",",
"default",
",",
"blacklist",
",",
"whitelist",
",",
"saltenv",
"=",
"'base'",
",",
"sls",
"=",
"''",
",",
"input_data",
"=",
"''",
",",
"*",
"*",
"kwargs",
")",
":",
"# if any error occurs,... | Take the path to a template and return the high data structure
derived from the template.
Helpers:
:param mask_value:
Mask value for debugging purposes (prevent sensitive information etc)
example: "mask_value="pass*". All "passwd", "password", "pass" will
be masked (as text). | [
"Take",
"the",
"path",
"to",
"a",
"template",
"and",
"return",
"the",
"high",
"data",
"structure",
"derived",
"from",
"the",
"template",
"."
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/template.py#L35-L139 | train |
saltstack/salt | salt/template.py | compile_template_str | def compile_template_str(template, renderers, default, blacklist, whitelist):
'''
Take template as a string and return the high data structure
derived from the template.
'''
fn_ = salt.utils.files.mkstemp()
with salt.utils.files.fopen(fn_, 'wb') as ofile:
ofile.write(SLS_ENCODER(template)[0])
return compile_template(fn_, renderers, default, blacklist, whitelist) | python | def compile_template_str(template, renderers, default, blacklist, whitelist):
'''
Take template as a string and return the high data structure
derived from the template.
'''
fn_ = salt.utils.files.mkstemp()
with salt.utils.files.fopen(fn_, 'wb') as ofile:
ofile.write(SLS_ENCODER(template)[0])
return compile_template(fn_, renderers, default, blacklist, whitelist) | [
"def",
"compile_template_str",
"(",
"template",
",",
"renderers",
",",
"default",
",",
"blacklist",
",",
"whitelist",
")",
":",
"fn_",
"=",
"salt",
".",
"utils",
".",
"files",
".",
"mkstemp",
"(",
")",
"with",
"salt",
".",
"utils",
".",
"files",
".",
"... | Take template as a string and return the high data structure
derived from the template. | [
"Take",
"template",
"as",
"a",
"string",
"and",
"return",
"the",
"high",
"data",
"structure",
"derived",
"from",
"the",
"template",
"."
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/template.py#L142-L150 | train |
saltstack/salt | salt/template.py | template_shebang | def template_shebang(template, renderers, default, blacklist, whitelist, input_data):
'''
Check the template shebang line and return the list of renderers specified
in the pipe.
Example shebang lines::
#!yaml_jinja
#!yaml_mako
#!mako|yaml
#!jinja|yaml
#!jinja|mako|yaml
#!mako|yaml|stateconf
#!jinja|yaml|stateconf
#!mako|yaml_odict
#!mako|yaml_odict|stateconf
'''
line = ''
# Open up the first line of the sls template
if template == ':string:':
line = input_data.split()[0]
else:
with salt.utils.files.fopen(template, 'r') as ifile:
line = salt.utils.stringutils.to_unicode(ifile.readline())
# Check if it starts with a shebang and not a path
if line.startswith('#!') and not line.startswith('#!/'):
# pull out the shebang data
# If the shebang does not contain recognized/not-blacklisted/whitelisted
# renderers, do not fall back to the default renderer
return check_render_pipe_str(line.strip()[2:], renderers, blacklist, whitelist)
else:
return check_render_pipe_str(default, renderers, blacklist, whitelist) | python | def template_shebang(template, renderers, default, blacklist, whitelist, input_data):
'''
Check the template shebang line and return the list of renderers specified
in the pipe.
Example shebang lines::
#!yaml_jinja
#!yaml_mako
#!mako|yaml
#!jinja|yaml
#!jinja|mako|yaml
#!mako|yaml|stateconf
#!jinja|yaml|stateconf
#!mako|yaml_odict
#!mako|yaml_odict|stateconf
'''
line = ''
# Open up the first line of the sls template
if template == ':string:':
line = input_data.split()[0]
else:
with salt.utils.files.fopen(template, 'r') as ifile:
line = salt.utils.stringutils.to_unicode(ifile.readline())
# Check if it starts with a shebang and not a path
if line.startswith('#!') and not line.startswith('#!/'):
# pull out the shebang data
# If the shebang does not contain recognized/not-blacklisted/whitelisted
# renderers, do not fall back to the default renderer
return check_render_pipe_str(line.strip()[2:], renderers, blacklist, whitelist)
else:
return check_render_pipe_str(default, renderers, blacklist, whitelist) | [
"def",
"template_shebang",
"(",
"template",
",",
"renderers",
",",
"default",
",",
"blacklist",
",",
"whitelist",
",",
"input_data",
")",
":",
"line",
"=",
"''",
"# Open up the first line of the sls template",
"if",
"template",
"==",
"':string:'",
":",
"line",
"="... | Check the template shebang line and return the list of renderers specified
in the pipe.
Example shebang lines::
#!yaml_jinja
#!yaml_mako
#!mako|yaml
#!jinja|yaml
#!jinja|mako|yaml
#!mako|yaml|stateconf
#!jinja|yaml|stateconf
#!mako|yaml_odict
#!mako|yaml_odict|stateconf | [
"Check",
"the",
"template",
"shebang",
"line",
"and",
"return",
"the",
"list",
"of",
"renderers",
"specified",
"in",
"the",
"pipe",
"."
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/template.py#L153-L186 | train |
saltstack/salt | salt/template.py | check_render_pipe_str | def check_render_pipe_str(pipestr, renderers, blacklist, whitelist):
'''
Check that all renderers specified in the pipe string are available.
If so, return the list of render functions in the pipe as
(render_func, arg_str) tuples; otherwise return [].
'''
if pipestr is None:
return []
parts = [r.strip() for r in pipestr.split('|')]
# Note: currently, | is not allowed anywhere in the shebang line except
# as pipes between renderers.
results = []
try:
if parts[0] == pipestr and pipestr in OLD_STYLE_RENDERERS:
parts = OLD_STYLE_RENDERERS[pipestr].split('|')
for part in parts:
name, argline = (part + ' ').split(' ', 1)
if whitelist and name not in whitelist or \
blacklist and name in blacklist:
log.warning(
'The renderer "%s" is disallowed by configuration and '
'will be skipped.', name
)
continue
results.append((renderers[name], argline.strip()))
return results
except KeyError:
log.error('The renderer "%s" is not available', pipestr)
return [] | python | def check_render_pipe_str(pipestr, renderers, blacklist, whitelist):
'''
Check that all renderers specified in the pipe string are available.
If so, return the list of render functions in the pipe as
(render_func, arg_str) tuples; otherwise return [].
'''
if pipestr is None:
return []
parts = [r.strip() for r in pipestr.split('|')]
# Note: currently, | is not allowed anywhere in the shebang line except
# as pipes between renderers.
results = []
try:
if parts[0] == pipestr and pipestr in OLD_STYLE_RENDERERS:
parts = OLD_STYLE_RENDERERS[pipestr].split('|')
for part in parts:
name, argline = (part + ' ').split(' ', 1)
if whitelist and name not in whitelist or \
blacklist and name in blacklist:
log.warning(
'The renderer "%s" is disallowed by configuration and '
'will be skipped.', name
)
continue
results.append((renderers[name], argline.strip()))
return results
except KeyError:
log.error('The renderer "%s" is not available', pipestr)
return [] | [
"def",
"check_render_pipe_str",
"(",
"pipestr",
",",
"renderers",
",",
"blacklist",
",",
"whitelist",
")",
":",
"if",
"pipestr",
"is",
"None",
":",
"return",
"[",
"]",
"parts",
"=",
"[",
"r",
".",
"strip",
"(",
")",
"for",
"r",
"in",
"pipestr",
".",
... | Check that all renderers specified in the pipe string are available.
If so, return the list of render functions in the pipe as
(render_func, arg_str) tuples; otherwise return []. | [
"Check",
"that",
"all",
"renderers",
"specified",
"in",
"the",
"pipe",
"string",
"are",
"available",
".",
"If",
"so",
"return",
"the",
"list",
"of",
"render",
"functions",
"in",
"the",
"pipe",
"as",
"(",
"render_func",
"arg_str",
")",
"tuples",
";",
"other... | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/template.py#L208-L237 | train |
saltstack/salt | salt/modules/solaris_system.py | init | def init(state):
'''
Change the system runlevel on sysV compatible systems
CLI Example:
state : string
Init state
.. code-block:: bash
salt '*' system.init 3
.. note:
state 0
Stop the operating system.
state 1
State 1 is referred to as the administrative state. In
state 1 file systems required for multi-user operations
are mounted, and logins requiring access to multi-user
file systems can be used. When the system comes up from
firmware mode into state 1, only the console is active
and other multi-user (state 2) services are unavailable.
Note that not all user processes are stopped when
transitioning from multi-user state to state 1.
state s, S
State s (or S) is referred to as the single-user state.
All user processes are stopped on transitions to this
state. In the single-user state, file systems required
for multi-user logins are unmounted and the system can
only be accessed through the console. Logins requiring
access to multi-user file systems cannot be used.
state 5
Shut the machine down so that it is safe to remove the
power. Have the machine remove power, if possible. The
rc0 procedure is called to perform this task.
state 6
Stop the operating system and reboot to the state defined
by the initdefault entry in /etc/inittab. The rc6
procedure is called to perform this task.
'''
cmd = ['shutdown', '-i', state, '-g', '0', '-y']
ret = __salt__['cmd.run'](cmd, python_shell=False)
return ret | python | def init(state):
'''
Change the system runlevel on sysV compatible systems
CLI Example:
state : string
Init state
.. code-block:: bash
salt '*' system.init 3
.. note:
state 0
Stop the operating system.
state 1
State 1 is referred to as the administrative state. In
state 1 file systems required for multi-user operations
are mounted, and logins requiring access to multi-user
file systems can be used. When the system comes up from
firmware mode into state 1, only the console is active
and other multi-user (state 2) services are unavailable.
Note that not all user processes are stopped when
transitioning from multi-user state to state 1.
state s, S
State s (or S) is referred to as the single-user state.
All user processes are stopped on transitions to this
state. In the single-user state, file systems required
for multi-user logins are unmounted and the system can
only be accessed through the console. Logins requiring
access to multi-user file systems cannot be used.
state 5
Shut the machine down so that it is safe to remove the
power. Have the machine remove power, if possible. The
rc0 procedure is called to perform this task.
state 6
Stop the operating system and reboot to the state defined
by the initdefault entry in /etc/inittab. The rc6
procedure is called to perform this task.
'''
cmd = ['shutdown', '-i', state, '-g', '0', '-y']
ret = __salt__['cmd.run'](cmd, python_shell=False)
return ret | [
"def",
"init",
"(",
"state",
")",
":",
"cmd",
"=",
"[",
"'shutdown'",
",",
"'-i'",
",",
"state",
",",
"'-g'",
",",
"'0'",
",",
"'-y'",
"]",
"ret",
"=",
"__salt__",
"[",
"'cmd.run'",
"]",
"(",
"cmd",
",",
"python_shell",
"=",
"False",
")",
"return",... | Change the system runlevel on sysV compatible systems
CLI Example:
state : string
Init state
.. code-block:: bash
salt '*' system.init 3
.. note:
state 0
Stop the operating system.
state 1
State 1 is referred to as the administrative state. In
state 1 file systems required for multi-user operations
are mounted, and logins requiring access to multi-user
file systems can be used. When the system comes up from
firmware mode into state 1, only the console is active
and other multi-user (state 2) services are unavailable.
Note that not all user processes are stopped when
transitioning from multi-user state to state 1.
state s, S
State s (or S) is referred to as the single-user state.
All user processes are stopped on transitions to this
state. In the single-user state, file systems required
for multi-user logins are unmounted and the system can
only be accessed through the console. Logins requiring
access to multi-user file systems cannot be used.
state 5
Shut the machine down so that it is safe to remove the
power. Have the machine remove power, if possible. The
rc0 procedure is called to perform this task.
state 6
Stop the operating system and reboot to the state defined
by the initdefault entry in /etc/inittab. The rc6
procedure is called to perform this task. | [
"Change",
"the",
"system",
"runlevel",
"on",
"sysV",
"compatible",
"systems"
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/solaris_system.py#L47-L95 | train |
saltstack/salt | salt/modules/solaris_system.py | reboot | def reboot(delay=0, message=None):
'''
Reboot the system
delay : int
Optional wait time in seconds before the system will be rebooted.
message : string
Optional message to broadcast before rebooting.
CLI Example:
.. code-block:: bash
salt '*' system.reboot
salt '*' system.reboot 60 "=== system upgraded ==="
'''
cmd = ['shutdown', '-i', '6', '-g', delay, '-y']
if message:
cmd.append(message)
ret = __salt__['cmd.run'](cmd, python_shell=False)
return ret | python | def reboot(delay=0, message=None):
'''
Reboot the system
delay : int
Optional wait time in seconds before the system will be rebooted.
message : string
Optional message to broadcast before rebooting.
CLI Example:
.. code-block:: bash
salt '*' system.reboot
salt '*' system.reboot 60 "=== system upgraded ==="
'''
cmd = ['shutdown', '-i', '6', '-g', delay, '-y']
if message:
cmd.append(message)
ret = __salt__['cmd.run'](cmd, python_shell=False)
return ret | [
"def",
"reboot",
"(",
"delay",
"=",
"0",
",",
"message",
"=",
"None",
")",
":",
"cmd",
"=",
"[",
"'shutdown'",
",",
"'-i'",
",",
"'6'",
",",
"'-g'",
",",
"delay",
",",
"'-y'",
"]",
"if",
"message",
":",
"cmd",
".",
"append",
"(",
"message",
")",
... | Reboot the system
delay : int
Optional wait time in seconds before the system will be rebooted.
message : string
Optional message to broadcast before rebooting.
CLI Example:
.. code-block:: bash
salt '*' system.reboot
salt '*' system.reboot 60 "=== system upgraded ===" | [
"Reboot",
"the",
"system"
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/solaris_system.py#L111-L131 | train |
saltstack/salt | salt/modules/twilio_notify.py | _get_twilio | def _get_twilio(profile):
'''
Return the twilio connection
'''
creds = __salt__['config.option'](profile)
client = TwilioRestClient(
creds.get('twilio.account_sid'),
creds.get('twilio.auth_token'),
)
return client | python | def _get_twilio(profile):
'''
Return the twilio connection
'''
creds = __salt__['config.option'](profile)
client = TwilioRestClient(
creds.get('twilio.account_sid'),
creds.get('twilio.auth_token'),
)
return client | [
"def",
"_get_twilio",
"(",
"profile",
")",
":",
"creds",
"=",
"__salt__",
"[",
"'config.option'",
"]",
"(",
"profile",
")",
"client",
"=",
"TwilioRestClient",
"(",
"creds",
".",
"get",
"(",
"'twilio.account_sid'",
")",
",",
"creds",
".",
"get",
"(",
"'twil... | Return the twilio connection | [
"Return",
"the",
"twilio",
"connection"
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/twilio_notify.py#L57-L67 | train |
saltstack/salt | salt/modules/twilio_notify.py | send_sms | def send_sms(profile, body, to, from_):
'''
Send an sms
CLI Example:
twilio.send_sms twilio-account 'Test sms' '+18019999999' '+18011111111'
'''
ret = {}
ret['message'] = {}
ret['message']['sid'] = None
client = _get_twilio(profile)
try:
if TWILIO_5:
message = client.sms.messages.create(body=body, to=to, from_=from_)
else:
message = client.messages.create(body=body, to=to, from_=from_)
except TwilioRestException as exc:
ret['_error'] = {}
ret['_error']['code'] = exc.code
ret['_error']['msg'] = exc.msg
ret['_error']['status'] = exc.status
log.debug('Could not send sms. Error: %s', ret)
return ret
ret['message'] = {}
ret['message']['sid'] = message.sid
ret['message']['price'] = message.price
ret['message']['price_unit'] = message.price_unit
ret['message']['status'] = message.status
ret['message']['num_segments'] = message.num_segments
ret['message']['body'] = message.body
ret['message']['date_sent'] = six.text_type(message.date_sent)
ret['message']['date_created'] = six.text_type(message.date_created)
log.info(ret)
return ret | python | def send_sms(profile, body, to, from_):
'''
Send an sms
CLI Example:
twilio.send_sms twilio-account 'Test sms' '+18019999999' '+18011111111'
'''
ret = {}
ret['message'] = {}
ret['message']['sid'] = None
client = _get_twilio(profile)
try:
if TWILIO_5:
message = client.sms.messages.create(body=body, to=to, from_=from_)
else:
message = client.messages.create(body=body, to=to, from_=from_)
except TwilioRestException as exc:
ret['_error'] = {}
ret['_error']['code'] = exc.code
ret['_error']['msg'] = exc.msg
ret['_error']['status'] = exc.status
log.debug('Could not send sms. Error: %s', ret)
return ret
ret['message'] = {}
ret['message']['sid'] = message.sid
ret['message']['price'] = message.price
ret['message']['price_unit'] = message.price_unit
ret['message']['status'] = message.status
ret['message']['num_segments'] = message.num_segments
ret['message']['body'] = message.body
ret['message']['date_sent'] = six.text_type(message.date_sent)
ret['message']['date_created'] = six.text_type(message.date_created)
log.info(ret)
return ret | [
"def",
"send_sms",
"(",
"profile",
",",
"body",
",",
"to",
",",
"from_",
")",
":",
"ret",
"=",
"{",
"}",
"ret",
"[",
"'message'",
"]",
"=",
"{",
"}",
"ret",
"[",
"'message'",
"]",
"[",
"'sid'",
"]",
"=",
"None",
"client",
"=",
"_get_twilio",
"(",... | Send an sms
CLI Example:
twilio.send_sms twilio-account 'Test sms' '+18019999999' '+18011111111' | [
"Send",
"an",
"sms"
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/twilio_notify.py#L70-L104 | train |
saltstack/salt | salt/client/ssh/state.py | lowstate_file_refs | def lowstate_file_refs(chunks, extras=''):
'''
Create a list of file ref objects to reconcile
'''
refs = {}
for chunk in chunks:
if not isinstance(chunk, dict):
continue
saltenv = 'base'
crefs = []
for state in chunk:
if state == '__env__':
saltenv = chunk[state]
elif state.startswith('__'):
continue
crefs.extend(salt_refs(chunk[state]))
if saltenv not in refs:
refs[saltenv] = []
if crefs:
refs[saltenv].append(crefs)
if extras:
extra_refs = extras.split(',')
if extra_refs:
for env in refs:
for x in extra_refs:
refs[env].append([x])
return refs | python | def lowstate_file_refs(chunks, extras=''):
'''
Create a list of file ref objects to reconcile
'''
refs = {}
for chunk in chunks:
if not isinstance(chunk, dict):
continue
saltenv = 'base'
crefs = []
for state in chunk:
if state == '__env__':
saltenv = chunk[state]
elif state.startswith('__'):
continue
crefs.extend(salt_refs(chunk[state]))
if saltenv not in refs:
refs[saltenv] = []
if crefs:
refs[saltenv].append(crefs)
if extras:
extra_refs = extras.split(',')
if extra_refs:
for env in refs:
for x in extra_refs:
refs[env].append([x])
return refs | [
"def",
"lowstate_file_refs",
"(",
"chunks",
",",
"extras",
"=",
"''",
")",
":",
"refs",
"=",
"{",
"}",
"for",
"chunk",
"in",
"chunks",
":",
"if",
"not",
"isinstance",
"(",
"chunk",
",",
"dict",
")",
":",
"continue",
"saltenv",
"=",
"'base'",
"crefs",
... | Create a list of file ref objects to reconcile | [
"Create",
"a",
"list",
"of",
"file",
"ref",
"objects",
"to",
"reconcile"
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/client/ssh/state.py#L122-L148 | train |
saltstack/salt | salt/client/ssh/state.py | salt_refs | def salt_refs(data, ret=None):
'''
Pull salt file references out of the states
'''
proto = 'salt://'
if ret is None:
ret = []
if isinstance(data, six.string_types):
if data.startswith(proto) and data not in ret:
ret.append(data)
if isinstance(data, list):
for comp in data:
salt_refs(comp, ret)
if isinstance(data, dict):
for comp in data:
salt_refs(data[comp], ret)
return ret | python | def salt_refs(data, ret=None):
'''
Pull salt file references out of the states
'''
proto = 'salt://'
if ret is None:
ret = []
if isinstance(data, six.string_types):
if data.startswith(proto) and data not in ret:
ret.append(data)
if isinstance(data, list):
for comp in data:
salt_refs(comp, ret)
if isinstance(data, dict):
for comp in data:
salt_refs(data[comp], ret)
return ret | [
"def",
"salt_refs",
"(",
"data",
",",
"ret",
"=",
"None",
")",
":",
"proto",
"=",
"'salt://'",
"if",
"ret",
"is",
"None",
":",
"ret",
"=",
"[",
"]",
"if",
"isinstance",
"(",
"data",
",",
"six",
".",
"string_types",
")",
":",
"if",
"data",
".",
"s... | Pull salt file references out of the states | [
"Pull",
"salt",
"file",
"references",
"out",
"of",
"the",
"states"
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/client/ssh/state.py#L151-L167 | train |
saltstack/salt | salt/client/ssh/state.py | prep_trans_tar | def prep_trans_tar(file_client, chunks, file_refs, pillar=None, id_=None, roster_grains=None):
'''
Generate the execution package from the saltenv file refs and a low state
data structure
'''
gendir = tempfile.mkdtemp()
trans_tar = salt.utils.files.mkstemp()
lowfn = os.path.join(gendir, 'lowstate.json')
pillarfn = os.path.join(gendir, 'pillar.json')
roster_grainsfn = os.path.join(gendir, 'roster_grains.json')
sync_refs = [
[salt.utils.url.create('_modules')],
[salt.utils.url.create('_states')],
[salt.utils.url.create('_grains')],
[salt.utils.url.create('_renderers')],
[salt.utils.url.create('_returners')],
[salt.utils.url.create('_output')],
[salt.utils.url.create('_utils')],
]
with salt.utils.files.fopen(lowfn, 'w+') as fp_:
salt.utils.json.dump(chunks, fp_)
if pillar:
with salt.utils.files.fopen(pillarfn, 'w+') as fp_:
salt.utils.json.dump(pillar, fp_)
if roster_grains:
with salt.utils.files.fopen(roster_grainsfn, 'w+') as fp_:
salt.utils.json.dump(roster_grains, fp_)
if id_ is None:
id_ = ''
try:
cachedir = os.path.join('salt-ssh', id_).rstrip(os.sep)
except AttributeError:
# Minion ID should always be a str, but don't let an int break this
cachedir = os.path.join('salt-ssh', six.text_type(id_)).rstrip(os.sep)
for saltenv in file_refs:
# Location where files in this saltenv will be cached
cache_dest_root = os.path.join(cachedir, 'files', saltenv)
file_refs[saltenv].extend(sync_refs)
env_root = os.path.join(gendir, saltenv)
if not os.path.isdir(env_root):
os.makedirs(env_root)
for ref in file_refs[saltenv]:
for name in ref:
short = salt.utils.url.parse(name)[0].lstrip('/')
cache_dest = os.path.join(cache_dest_root, short)
try:
path = file_client.cache_file(name, saltenv, cachedir=cachedir)
except IOError:
path = ''
if path:
tgt = os.path.join(env_root, short)
tgt_dir = os.path.dirname(tgt)
if not os.path.isdir(tgt_dir):
os.makedirs(tgt_dir)
shutil.copy(path, tgt)
continue
try:
files = file_client.cache_dir(name, saltenv, cachedir=cachedir)
except IOError:
files = ''
if files:
for filename in files:
fn = filename[len(file_client.get_cachedir(cache_dest)):].strip('/')
tgt = os.path.join(
env_root,
short,
fn,
)
tgt_dir = os.path.dirname(tgt)
if not os.path.isdir(tgt_dir):
os.makedirs(tgt_dir)
shutil.copy(filename, tgt)
continue
try:
# cwd may not exist if it was removed but salt was run from it
cwd = os.getcwd()
except OSError:
cwd = None
os.chdir(gendir)
with closing(tarfile.open(trans_tar, 'w:gz')) as tfp:
for root, dirs, files in salt.utils.path.os_walk(gendir):
for name in files:
full = os.path.join(root, name)
tfp.add(full[len(gendir):].lstrip(os.sep))
if cwd:
os.chdir(cwd)
shutil.rmtree(gendir)
return trans_tar | python | def prep_trans_tar(file_client, chunks, file_refs, pillar=None, id_=None, roster_grains=None):
'''
Generate the execution package from the saltenv file refs and a low state
data structure
'''
gendir = tempfile.mkdtemp()
trans_tar = salt.utils.files.mkstemp()
lowfn = os.path.join(gendir, 'lowstate.json')
pillarfn = os.path.join(gendir, 'pillar.json')
roster_grainsfn = os.path.join(gendir, 'roster_grains.json')
sync_refs = [
[salt.utils.url.create('_modules')],
[salt.utils.url.create('_states')],
[salt.utils.url.create('_grains')],
[salt.utils.url.create('_renderers')],
[salt.utils.url.create('_returners')],
[salt.utils.url.create('_output')],
[salt.utils.url.create('_utils')],
]
with salt.utils.files.fopen(lowfn, 'w+') as fp_:
salt.utils.json.dump(chunks, fp_)
if pillar:
with salt.utils.files.fopen(pillarfn, 'w+') as fp_:
salt.utils.json.dump(pillar, fp_)
if roster_grains:
with salt.utils.files.fopen(roster_grainsfn, 'w+') as fp_:
salt.utils.json.dump(roster_grains, fp_)
if id_ is None:
id_ = ''
try:
cachedir = os.path.join('salt-ssh', id_).rstrip(os.sep)
except AttributeError:
# Minion ID should always be a str, but don't let an int break this
cachedir = os.path.join('salt-ssh', six.text_type(id_)).rstrip(os.sep)
for saltenv in file_refs:
# Location where files in this saltenv will be cached
cache_dest_root = os.path.join(cachedir, 'files', saltenv)
file_refs[saltenv].extend(sync_refs)
env_root = os.path.join(gendir, saltenv)
if not os.path.isdir(env_root):
os.makedirs(env_root)
for ref in file_refs[saltenv]:
for name in ref:
short = salt.utils.url.parse(name)[0].lstrip('/')
cache_dest = os.path.join(cache_dest_root, short)
try:
path = file_client.cache_file(name, saltenv, cachedir=cachedir)
except IOError:
path = ''
if path:
tgt = os.path.join(env_root, short)
tgt_dir = os.path.dirname(tgt)
if not os.path.isdir(tgt_dir):
os.makedirs(tgt_dir)
shutil.copy(path, tgt)
continue
try:
files = file_client.cache_dir(name, saltenv, cachedir=cachedir)
except IOError:
files = ''
if files:
for filename in files:
fn = filename[len(file_client.get_cachedir(cache_dest)):].strip('/')
tgt = os.path.join(
env_root,
short,
fn,
)
tgt_dir = os.path.dirname(tgt)
if not os.path.isdir(tgt_dir):
os.makedirs(tgt_dir)
shutil.copy(filename, tgt)
continue
try:
# cwd may not exist if it was removed but salt was run from it
cwd = os.getcwd()
except OSError:
cwd = None
os.chdir(gendir)
with closing(tarfile.open(trans_tar, 'w:gz')) as tfp:
for root, dirs, files in salt.utils.path.os_walk(gendir):
for name in files:
full = os.path.join(root, name)
tfp.add(full[len(gendir):].lstrip(os.sep))
if cwd:
os.chdir(cwd)
shutil.rmtree(gendir)
return trans_tar | [
"def",
"prep_trans_tar",
"(",
"file_client",
",",
"chunks",
",",
"file_refs",
",",
"pillar",
"=",
"None",
",",
"id_",
"=",
"None",
",",
"roster_grains",
"=",
"None",
")",
":",
"gendir",
"=",
"tempfile",
".",
"mkdtemp",
"(",
")",
"trans_tar",
"=",
"salt",... | Generate the execution package from the saltenv file refs and a low state
data structure | [
"Generate",
"the",
"execution",
"package",
"from",
"the",
"saltenv",
"file",
"refs",
"and",
"a",
"low",
"state",
"data",
"structure"
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/client/ssh/state.py#L170-L259 | train |
saltstack/salt | salt/client/ssh/state.py | SSHState.load_modules | def load_modules(self, data=None, proxy=None):
'''
Load up the modules for remote compilation via ssh
'''
self.functions = self.wrapper
self.utils = salt.loader.utils(self.opts)
self.serializers = salt.loader.serializers(self.opts)
locals_ = salt.loader.minion_mods(self.opts, utils=self.utils)
self.states = salt.loader.states(self.opts, locals_, self.utils, self.serializers)
self.rend = salt.loader.render(self.opts, self.functions) | python | def load_modules(self, data=None, proxy=None):
'''
Load up the modules for remote compilation via ssh
'''
self.functions = self.wrapper
self.utils = salt.loader.utils(self.opts)
self.serializers = salt.loader.serializers(self.opts)
locals_ = salt.loader.minion_mods(self.opts, utils=self.utils)
self.states = salt.loader.states(self.opts, locals_, self.utils, self.serializers)
self.rend = salt.loader.render(self.opts, self.functions) | [
"def",
"load_modules",
"(",
"self",
",",
"data",
"=",
"None",
",",
"proxy",
"=",
"None",
")",
":",
"self",
".",
"functions",
"=",
"self",
".",
"wrapper",
"self",
".",
"utils",
"=",
"salt",
".",
"loader",
".",
"utils",
"(",
"self",
".",
"opts",
")",... | Load up the modules for remote compilation via ssh | [
"Load",
"up",
"the",
"modules",
"for",
"remote",
"compilation",
"via",
"ssh"
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/client/ssh/state.py#L43-L52 | train |
saltstack/salt | salt/client/ssh/state.py | SSHHighState._master_tops | def _master_tops(self):
'''
Evaluate master_tops locally
'''
if 'id' not in self.opts:
log.error('Received call for external nodes without an id')
return {}
if not salt.utils.verify.valid_id(self.opts, self.opts['id']):
return {}
# Evaluate all configured master_tops interfaces
grains = {}
ret = {}
if 'grains' in self.opts:
grains = self.opts['grains']
for fun in self.tops:
if fun not in self.opts.get('master_tops', {}):
continue
try:
ret.update(self.tops[fun](opts=self.opts, grains=grains))
except Exception as exc:
# If anything happens in the top generation, log it and move on
log.error(
'Top function %s failed with error %s for minion %s',
fun, exc, self.opts['id']
)
return ret | python | def _master_tops(self):
'''
Evaluate master_tops locally
'''
if 'id' not in self.opts:
log.error('Received call for external nodes without an id')
return {}
if not salt.utils.verify.valid_id(self.opts, self.opts['id']):
return {}
# Evaluate all configured master_tops interfaces
grains = {}
ret = {}
if 'grains' in self.opts:
grains = self.opts['grains']
for fun in self.tops:
if fun not in self.opts.get('master_tops', {}):
continue
try:
ret.update(self.tops[fun](opts=self.opts, grains=grains))
except Exception as exc:
# If anything happens in the top generation, log it and move on
log.error(
'Top function %s failed with error %s for minion %s',
fun, exc, self.opts['id']
)
return ret | [
"def",
"_master_tops",
"(",
"self",
")",
":",
"if",
"'id'",
"not",
"in",
"self",
".",
"opts",
":",
"log",
".",
"error",
"(",
"'Received call for external nodes without an id'",
")",
"return",
"{",
"}",
"if",
"not",
"salt",
".",
"utils",
".",
"verify",
".",... | Evaluate master_tops locally | [
"Evaluate",
"master_tops",
"locally"
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/client/ssh/state.py#L92-L119 | train |
saltstack/salt | salt/cloud/clouds/azurearm.py | get_api_versions | def get_api_versions(call=None, kwargs=None): # pylint: disable=unused-argument
'''
Get a resource type api versions
'''
if kwargs is None:
kwargs = {}
if 'resource_provider' not in kwargs:
raise SaltCloudSystemExit(
'A resource_provider must be specified'
)
if 'resource_type' not in kwargs:
raise SaltCloudSystemExit(
'A resource_type must be specified'
)
api_versions = []
try:
resconn = get_conn(client_type='resource')
provider_query = resconn.providers.get(
resource_provider_namespace=kwargs['resource_provider']
)
for resource in provider_query.resource_types:
if six.text_type(resource.resource_type) == kwargs['resource_type']:
resource_dict = resource.as_dict()
api_versions = resource_dict['api_versions']
except CloudError as exc:
__utils__['azurearm.log_cloud_error']('resource', exc.message)
return api_versions | python | def get_api_versions(call=None, kwargs=None): # pylint: disable=unused-argument
'''
Get a resource type api versions
'''
if kwargs is None:
kwargs = {}
if 'resource_provider' not in kwargs:
raise SaltCloudSystemExit(
'A resource_provider must be specified'
)
if 'resource_type' not in kwargs:
raise SaltCloudSystemExit(
'A resource_type must be specified'
)
api_versions = []
try:
resconn = get_conn(client_type='resource')
provider_query = resconn.providers.get(
resource_provider_namespace=kwargs['resource_provider']
)
for resource in provider_query.resource_types:
if six.text_type(resource.resource_type) == kwargs['resource_type']:
resource_dict = resource.as_dict()
api_versions = resource_dict['api_versions']
except CloudError as exc:
__utils__['azurearm.log_cloud_error']('resource', exc.message)
return api_versions | [
"def",
"get_api_versions",
"(",
"call",
"=",
"None",
",",
"kwargs",
"=",
"None",
")",
":",
"# pylint: disable=unused-argument",
"if",
"kwargs",
"is",
"None",
":",
"kwargs",
"=",
"{",
"}",
"if",
"'resource_provider'",
"not",
"in",
"kwargs",
":",
"raise",
"Sal... | Get a resource type api versions | [
"Get",
"a",
"resource",
"type",
"api",
"versions"
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/azurearm.py#L157-L189 | train |
saltstack/salt | salt/cloud/clouds/azurearm.py | get_resource_by_id | def get_resource_by_id(resource_id, api_version, extract_value=None):
'''
Get an AzureARM resource by id
'''
ret = {}
try:
resconn = get_conn(client_type='resource')
resource_query = resconn.resources.get_by_id(
resource_id=resource_id,
api_version=api_version
)
resource_dict = resource_query.as_dict()
if extract_value is not None:
ret = resource_dict[extract_value]
else:
ret = resource_dict
except CloudError as exc:
__utils__['azurearm.log_cloud_error']('resource', exc.message)
ret = {'Error': exc.message}
return ret | python | def get_resource_by_id(resource_id, api_version, extract_value=None):
'''
Get an AzureARM resource by id
'''
ret = {}
try:
resconn = get_conn(client_type='resource')
resource_query = resconn.resources.get_by_id(
resource_id=resource_id,
api_version=api_version
)
resource_dict = resource_query.as_dict()
if extract_value is not None:
ret = resource_dict[extract_value]
else:
ret = resource_dict
except CloudError as exc:
__utils__['azurearm.log_cloud_error']('resource', exc.message)
ret = {'Error': exc.message}
return ret | [
"def",
"get_resource_by_id",
"(",
"resource_id",
",",
"api_version",
",",
"extract_value",
"=",
"None",
")",
":",
"ret",
"=",
"{",
"}",
"try",
":",
"resconn",
"=",
"get_conn",
"(",
"client_type",
"=",
"'resource'",
")",
"resource_query",
"=",
"resconn",
".",... | Get an AzureARM resource by id | [
"Get",
"an",
"AzureARM",
"resource",
"by",
"id"
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/azurearm.py#L192-L213 | train |
saltstack/salt | salt/cloud/clouds/azurearm.py | get_configured_provider | def get_configured_provider():
'''
Return the first configured provider instance.
'''
def __is_provider_configured(opts, provider, required_keys=()):
'''
Check if the provider is configured.
'''
if ':' in provider:
alias, driver = provider.split(':')
if alias not in opts['providers']:
return False
if driver not in opts['providers'][alias]:
return False
for key in required_keys:
if opts['providers'][alias][driver].get(key, None) is None:
return False
return opts['providers'][alias][driver]
for alias, drivers in six.iteritems(opts['providers']):
for driver, provider_details in six.iteritems(drivers):
if driver != provider:
continue
skip_provider = False
for key in required_keys:
if provider_details.get(key, None) is None:
# This provider does not include all necessary keys,
# continue to next one.
skip_provider = True
break
if skip_provider:
continue
return provider_details
return False
provider = __is_provider_configured(
__opts__,
__active_provider_name__ or __virtualname__,
('subscription_id', 'tenant', 'client_id', 'secret'),
)
if provider is False:
provider = __is_provider_configured(
__opts__,
__active_provider_name__ or __virtualname__,
('subscription_id', 'username', 'password'),
)
if provider is False:
# check if using MSI style credentials...
provider = config.is_provider_configured(
__opts__,
__active_provider_name__ or __virtualname__,
required_keys=('subscription_id',),
)
return provider | python | def get_configured_provider():
'''
Return the first configured provider instance.
'''
def __is_provider_configured(opts, provider, required_keys=()):
'''
Check if the provider is configured.
'''
if ':' in provider:
alias, driver = provider.split(':')
if alias not in opts['providers']:
return False
if driver not in opts['providers'][alias]:
return False
for key in required_keys:
if opts['providers'][alias][driver].get(key, None) is None:
return False
return opts['providers'][alias][driver]
for alias, drivers in six.iteritems(opts['providers']):
for driver, provider_details in six.iteritems(drivers):
if driver != provider:
continue
skip_provider = False
for key in required_keys:
if provider_details.get(key, None) is None:
# This provider does not include all necessary keys,
# continue to next one.
skip_provider = True
break
if skip_provider:
continue
return provider_details
return False
provider = __is_provider_configured(
__opts__,
__active_provider_name__ or __virtualname__,
('subscription_id', 'tenant', 'client_id', 'secret'),
)
if provider is False:
provider = __is_provider_configured(
__opts__,
__active_provider_name__ or __virtualname__,
('subscription_id', 'username', 'password'),
)
if provider is False:
# check if using MSI style credentials...
provider = config.is_provider_configured(
__opts__,
__active_provider_name__ or __virtualname__,
required_keys=('subscription_id',),
)
return provider | [
"def",
"get_configured_provider",
"(",
")",
":",
"def",
"__is_provider_configured",
"(",
"opts",
",",
"provider",
",",
"required_keys",
"=",
"(",
")",
")",
":",
"'''\n Check if the provider is configured.\n '''",
"if",
"':'",
"in",
"provider",
":",
"alia... | Return the first configured provider instance. | [
"Return",
"the",
"first",
"configured",
"provider",
"instance",
"."
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/azurearm.py#L216-L275 | train |
saltstack/salt | salt/cloud/clouds/azurearm.py | get_conn | def get_conn(client_type):
'''
Return a connection object for a client type.
'''
conn_kwargs = {}
conn_kwargs['subscription_id'] = salt.utils.stringutils.to_str(
config.get_cloud_config_value(
'subscription_id',
get_configured_provider(), __opts__, search_global=False
)
)
cloud_env = config.get_cloud_config_value(
'cloud_environment',
get_configured_provider(), __opts__, search_global=False
)
if cloud_env is not None:
conn_kwargs['cloud_environment'] = cloud_env
tenant = config.get_cloud_config_value(
'tenant',
get_configured_provider(), __opts__, search_global=False
)
if tenant is not None:
client_id = config.get_cloud_config_value(
'client_id',
get_configured_provider(), __opts__, search_global=False
)
secret = config.get_cloud_config_value(
'secret',
get_configured_provider(), __opts__, search_global=False
)
conn_kwargs.update({'client_id': client_id, 'secret': secret,
'tenant': tenant})
username = config.get_cloud_config_value(
'username',
get_configured_provider(), __opts__, search_global=False
)
if username is not None:
password = config.get_cloud_config_value(
'password',
get_configured_provider(), __opts__, search_global=False
)
conn_kwargs.update({'username': username, 'password': password})
client = __utils__['azurearm.get_client'](
client_type=client_type, **conn_kwargs
)
return client | python | def get_conn(client_type):
'''
Return a connection object for a client type.
'''
conn_kwargs = {}
conn_kwargs['subscription_id'] = salt.utils.stringutils.to_str(
config.get_cloud_config_value(
'subscription_id',
get_configured_provider(), __opts__, search_global=False
)
)
cloud_env = config.get_cloud_config_value(
'cloud_environment',
get_configured_provider(), __opts__, search_global=False
)
if cloud_env is not None:
conn_kwargs['cloud_environment'] = cloud_env
tenant = config.get_cloud_config_value(
'tenant',
get_configured_provider(), __opts__, search_global=False
)
if tenant is not None:
client_id = config.get_cloud_config_value(
'client_id',
get_configured_provider(), __opts__, search_global=False
)
secret = config.get_cloud_config_value(
'secret',
get_configured_provider(), __opts__, search_global=False
)
conn_kwargs.update({'client_id': client_id, 'secret': secret,
'tenant': tenant})
username = config.get_cloud_config_value(
'username',
get_configured_provider(), __opts__, search_global=False
)
if username is not None:
password = config.get_cloud_config_value(
'password',
get_configured_provider(), __opts__, search_global=False
)
conn_kwargs.update({'username': username, 'password': password})
client = __utils__['azurearm.get_client'](
client_type=client_type, **conn_kwargs
)
return client | [
"def",
"get_conn",
"(",
"client_type",
")",
":",
"conn_kwargs",
"=",
"{",
"}",
"conn_kwargs",
"[",
"'subscription_id'",
"]",
"=",
"salt",
".",
"utils",
".",
"stringutils",
".",
"to_str",
"(",
"config",
".",
"get_cloud_config_value",
"(",
"'subscription_id'",
"... | Return a connection object for a client type. | [
"Return",
"a",
"connection",
"object",
"for",
"a",
"client",
"type",
"."
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/azurearm.py#L288-L342 | train |
saltstack/salt | salt/cloud/clouds/azurearm.py | get_location | def get_location(call=None, kwargs=None): # pylint: disable=unused-argument
'''
Return the location that is configured for this provider
'''
if not kwargs:
kwargs = {}
vm_dict = get_configured_provider()
vm_dict.update(kwargs)
return config.get_cloud_config_value(
'location',
vm_dict, __opts__, search_global=False
) | python | def get_location(call=None, kwargs=None): # pylint: disable=unused-argument
'''
Return the location that is configured for this provider
'''
if not kwargs:
kwargs = {}
vm_dict = get_configured_provider()
vm_dict.update(kwargs)
return config.get_cloud_config_value(
'location',
vm_dict, __opts__, search_global=False
) | [
"def",
"get_location",
"(",
"call",
"=",
"None",
",",
"kwargs",
"=",
"None",
")",
":",
"# pylint: disable=unused-argument",
"if",
"not",
"kwargs",
":",
"kwargs",
"=",
"{",
"}",
"vm_dict",
"=",
"get_configured_provider",
"(",
")",
"vm_dict",
".",
"update",
"(... | Return the location that is configured for this provider | [
"Return",
"the",
"location",
"that",
"is",
"configured",
"for",
"this",
"provider"
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/azurearm.py#L345-L356 | train |
saltstack/salt | salt/cloud/clouds/azurearm.py | avail_locations | def avail_locations(call=None):
'''
Return a dict of all available regions.
'''
if call == 'action':
raise SaltCloudSystemExit(
'The avail_locations function must be called with '
'-f or --function, or with the --list-locations option'
)
ret = {}
ret['locations'] = []
try:
resconn = get_conn(client_type='resource')
provider_query = resconn.providers.get(
resource_provider_namespace='Microsoft.Compute'
)
locations = []
for resource in provider_query.resource_types:
if six.text_type(resource.resource_type) == 'virtualMachines':
resource_dict = resource.as_dict()
locations = resource_dict['locations']
for location in locations:
lowercase = location.lower().replace(' ', '')
ret['locations'].append(lowercase)
except CloudError as exc:
__utils__['azurearm.log_cloud_error']('resource', exc.message)
ret = {'Error': exc.message}
return ret | python | def avail_locations(call=None):
'''
Return a dict of all available regions.
'''
if call == 'action':
raise SaltCloudSystemExit(
'The avail_locations function must be called with '
'-f or --function, or with the --list-locations option'
)
ret = {}
ret['locations'] = []
try:
resconn = get_conn(client_type='resource')
provider_query = resconn.providers.get(
resource_provider_namespace='Microsoft.Compute'
)
locations = []
for resource in provider_query.resource_types:
if six.text_type(resource.resource_type) == 'virtualMachines':
resource_dict = resource.as_dict()
locations = resource_dict['locations']
for location in locations:
lowercase = location.lower().replace(' ', '')
ret['locations'].append(lowercase)
except CloudError as exc:
__utils__['azurearm.log_cloud_error']('resource', exc.message)
ret = {'Error': exc.message}
return ret | [
"def",
"avail_locations",
"(",
"call",
"=",
"None",
")",
":",
"if",
"call",
"==",
"'action'",
":",
"raise",
"SaltCloudSystemExit",
"(",
"'The avail_locations function must be called with '",
"'-f or --function, or with the --list-locations option'",
")",
"ret",
"=",
"{",
... | Return a dict of all available regions. | [
"Return",
"a",
"dict",
"of",
"all",
"available",
"regions",
"."
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/azurearm.py#L359-L389 | train |
saltstack/salt | salt/cloud/clouds/azurearm.py | avail_images | def avail_images(call=None):
'''
Return a dict of all available images on the provider
'''
if call == 'action':
raise SaltCloudSystemExit(
'The avail_images function must be called with '
'-f or --function, or with the --list-images option'
)
compconn = get_conn(client_type='compute')
region = get_location()
publishers = []
ret = {}
def _get_publisher_images(publisher):
'''
Get all images from a specific publisher
'''
data = {}
try:
offers = compconn.virtual_machine_images.list_offers(
location=region,
publisher_name=publisher,
)
for offer_obj in offers:
offer = offer_obj.as_dict()
skus = compconn.virtual_machine_images.list_skus(
location=region,
publisher_name=publisher,
offer=offer['name'],
)
for sku_obj in skus:
sku = sku_obj.as_dict()
results = compconn.virtual_machine_images.list(
location=region,
publisher_name=publisher,
offer=offer['name'],
skus=sku['name'],
)
for version_obj in results:
version = version_obj.as_dict()
name = '|'.join((
publisher,
offer['name'],
sku['name'],
version['name'],
))
data[name] = {
'publisher': publisher,
'offer': offer['name'],
'sku': sku['name'],
'version': version['name'],
}
except CloudError as exc:
__utils__['azurearm.log_cloud_error']('compute', exc.message)
data = {publisher: exc.message}
return data
try:
publishers_query = compconn.virtual_machine_images.list_publishers(
location=region
)
for publisher_obj in publishers_query:
publisher = publisher_obj.as_dict()
publishers.append(publisher['name'])
except CloudError as exc:
__utils__['azurearm.log_cloud_error']('compute', exc.message)
pool = ThreadPool(cpu_count() * 6)
results = pool.map_async(_get_publisher_images, publishers)
results.wait()
ret = {k: v for result in results.get() for k, v in six.iteritems(result)}
return ret | python | def avail_images(call=None):
'''
Return a dict of all available images on the provider
'''
if call == 'action':
raise SaltCloudSystemExit(
'The avail_images function must be called with '
'-f or --function, or with the --list-images option'
)
compconn = get_conn(client_type='compute')
region = get_location()
publishers = []
ret = {}
def _get_publisher_images(publisher):
'''
Get all images from a specific publisher
'''
data = {}
try:
offers = compconn.virtual_machine_images.list_offers(
location=region,
publisher_name=publisher,
)
for offer_obj in offers:
offer = offer_obj.as_dict()
skus = compconn.virtual_machine_images.list_skus(
location=region,
publisher_name=publisher,
offer=offer['name'],
)
for sku_obj in skus:
sku = sku_obj.as_dict()
results = compconn.virtual_machine_images.list(
location=region,
publisher_name=publisher,
offer=offer['name'],
skus=sku['name'],
)
for version_obj in results:
version = version_obj.as_dict()
name = '|'.join((
publisher,
offer['name'],
sku['name'],
version['name'],
))
data[name] = {
'publisher': publisher,
'offer': offer['name'],
'sku': sku['name'],
'version': version['name'],
}
except CloudError as exc:
__utils__['azurearm.log_cloud_error']('compute', exc.message)
data = {publisher: exc.message}
return data
try:
publishers_query = compconn.virtual_machine_images.list_publishers(
location=region
)
for publisher_obj in publishers_query:
publisher = publisher_obj.as_dict()
publishers.append(publisher['name'])
except CloudError as exc:
__utils__['azurearm.log_cloud_error']('compute', exc.message)
pool = ThreadPool(cpu_count() * 6)
results = pool.map_async(_get_publisher_images, publishers)
results.wait()
ret = {k: v for result in results.get() for k, v in six.iteritems(result)}
return ret | [
"def",
"avail_images",
"(",
"call",
"=",
"None",
")",
":",
"if",
"call",
"==",
"'action'",
":",
"raise",
"SaltCloudSystemExit",
"(",
"'The avail_images function must be called with '",
"'-f or --function, or with the --list-images option'",
")",
"compconn",
"=",
"get_conn",... | Return a dict of all available images on the provider | [
"Return",
"a",
"dict",
"of",
"all",
"available",
"images",
"on",
"the",
"provider"
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/azurearm.py#L392-L467 | train |
saltstack/salt | salt/cloud/clouds/azurearm.py | avail_sizes | def avail_sizes(call=None):
'''
Return a list of sizes available from the provider
'''
if call == 'action':
raise SaltCloudSystemExit(
'The avail_sizes function must be called with '
'-f or --function, or with the --list-sizes option'
)
compconn = get_conn(client_type='compute')
ret = {}
location = get_location()
try:
sizes = compconn.virtual_machine_sizes.list(
location=location
)
for size_obj in sizes:
size = size_obj.as_dict()
ret[size['name']] = size
except CloudError as exc:
__utils__['azurearm.log_cloud_error']('compute', exc.message)
ret = {'Error': exc.message}
return ret | python | def avail_sizes(call=None):
'''
Return a list of sizes available from the provider
'''
if call == 'action':
raise SaltCloudSystemExit(
'The avail_sizes function must be called with '
'-f or --function, or with the --list-sizes option'
)
compconn = get_conn(client_type='compute')
ret = {}
location = get_location()
try:
sizes = compconn.virtual_machine_sizes.list(
location=location
)
for size_obj in sizes:
size = size_obj.as_dict()
ret[size['name']] = size
except CloudError as exc:
__utils__['azurearm.log_cloud_error']('compute', exc.message)
ret = {'Error': exc.message}
return ret | [
"def",
"avail_sizes",
"(",
"call",
"=",
"None",
")",
":",
"if",
"call",
"==",
"'action'",
":",
"raise",
"SaltCloudSystemExit",
"(",
"'The avail_sizes function must be called with '",
"'-f or --function, or with the --list-sizes option'",
")",
"compconn",
"=",
"get_conn",
... | Return a list of sizes available from the provider | [
"Return",
"a",
"list",
"of",
"sizes",
"available",
"from",
"the",
"provider"
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/azurearm.py#L470-L496 | train |
saltstack/salt | salt/cloud/clouds/azurearm.py | list_nodes | def list_nodes(call=None):
'''
List VMs on this Azure account
'''
if call == 'action':
raise SaltCloudSystemExit(
'The list_nodes function must be called with -f or --function.'
)
ret = {}
nodes = list_nodes_full()
for node in nodes:
ret[node] = {'name': node}
for prop in ('id', 'image', 'size', 'state', 'private_ips', 'public_ips'):
ret[node][prop] = nodes[node].get(prop)
return ret | python | def list_nodes(call=None):
'''
List VMs on this Azure account
'''
if call == 'action':
raise SaltCloudSystemExit(
'The list_nodes function must be called with -f or --function.'
)
ret = {}
nodes = list_nodes_full()
for node in nodes:
ret[node] = {'name': node}
for prop in ('id', 'image', 'size', 'state', 'private_ips', 'public_ips'):
ret[node][prop] = nodes[node].get(prop)
return ret | [
"def",
"list_nodes",
"(",
"call",
"=",
"None",
")",
":",
"if",
"call",
"==",
"'action'",
":",
"raise",
"SaltCloudSystemExit",
"(",
"'The list_nodes function must be called with -f or --function.'",
")",
"ret",
"=",
"{",
"}",
"nodes",
"=",
"list_nodes_full",
"(",
"... | List VMs on this Azure account | [
"List",
"VMs",
"on",
"this",
"Azure",
"account"
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/azurearm.py#L499-L515 | train |
saltstack/salt | salt/cloud/clouds/azurearm.py | list_nodes_full | def list_nodes_full(call=None):
'''
List all VMs on the subscription with full information
'''
if call == 'action':
raise SaltCloudSystemExit(
'The list_nodes_full function must be called with -f or --function.'
)
netapi_versions = get_api_versions(kwargs={
'resource_provider': 'Microsoft.Network',
'resource_type': 'networkInterfaces'
}
)
netapi_version = netapi_versions[0]
compconn = get_conn(client_type='compute')
ret = {}
def _get_node_info(node):
'''
Get node info.
'''
node_ret = {}
node['id'] = node['vm_id']
node['size'] = node['hardware_profile']['vm_size']
node['state'] = node['provisioning_state']
node['public_ips'] = []
node['private_ips'] = []
node_ret[node['name']] = node
try:
image_ref = node['storage_profile']['image_reference']
node['image'] = '|'.join([
image_ref['publisher'],
image_ref['offer'],
image_ref['sku'],
image_ref['version'],
])
except (TypeError, KeyError):
try:
node['image'] = node['storage_profile']['os_disk']['image']['uri']
except (TypeError, KeyError):
node['image'] = node.get('storage_profile', {}).get('image_reference', {}).get('id')
try:
netifaces = node['network_profile']['network_interfaces']
for index, netiface in enumerate(netifaces):
netiface_name = get_resource_by_id(
netiface['id'],
netapi_version,
'name'
)
netiface, pubips, privips = _get_network_interface(
netiface_name,
node['resource_group']
)
node['network_profile']['network_interfaces'][index].update(netiface)
node['public_ips'].extend(pubips)
node['private_ips'].extend(privips)
except Exception:
pass
node_ret[node['name']] = node
return node_ret
for group in list_resource_groups():
nodes = []
nodes_query = compconn.virtual_machines.list(
resource_group_name=group
)
for node_obj in nodes_query:
node = node_obj.as_dict()
node['resource_group'] = group
nodes.append(node)
pool = ThreadPool(cpu_count() * 6)
results = pool.map_async(_get_node_info, nodes)
results.wait()
group_ret = {k: v for result in results.get() for k, v in six.iteritems(result)}
ret.update(group_ret)
return ret | python | def list_nodes_full(call=None):
'''
List all VMs on the subscription with full information
'''
if call == 'action':
raise SaltCloudSystemExit(
'The list_nodes_full function must be called with -f or --function.'
)
netapi_versions = get_api_versions(kwargs={
'resource_provider': 'Microsoft.Network',
'resource_type': 'networkInterfaces'
}
)
netapi_version = netapi_versions[0]
compconn = get_conn(client_type='compute')
ret = {}
def _get_node_info(node):
'''
Get node info.
'''
node_ret = {}
node['id'] = node['vm_id']
node['size'] = node['hardware_profile']['vm_size']
node['state'] = node['provisioning_state']
node['public_ips'] = []
node['private_ips'] = []
node_ret[node['name']] = node
try:
image_ref = node['storage_profile']['image_reference']
node['image'] = '|'.join([
image_ref['publisher'],
image_ref['offer'],
image_ref['sku'],
image_ref['version'],
])
except (TypeError, KeyError):
try:
node['image'] = node['storage_profile']['os_disk']['image']['uri']
except (TypeError, KeyError):
node['image'] = node.get('storage_profile', {}).get('image_reference', {}).get('id')
try:
netifaces = node['network_profile']['network_interfaces']
for index, netiface in enumerate(netifaces):
netiface_name = get_resource_by_id(
netiface['id'],
netapi_version,
'name'
)
netiface, pubips, privips = _get_network_interface(
netiface_name,
node['resource_group']
)
node['network_profile']['network_interfaces'][index].update(netiface)
node['public_ips'].extend(pubips)
node['private_ips'].extend(privips)
except Exception:
pass
node_ret[node['name']] = node
return node_ret
for group in list_resource_groups():
nodes = []
nodes_query = compconn.virtual_machines.list(
resource_group_name=group
)
for node_obj in nodes_query:
node = node_obj.as_dict()
node['resource_group'] = group
nodes.append(node)
pool = ThreadPool(cpu_count() * 6)
results = pool.map_async(_get_node_info, nodes)
results.wait()
group_ret = {k: v for result in results.get() for k, v in six.iteritems(result)}
ret.update(group_ret)
return ret | [
"def",
"list_nodes_full",
"(",
"call",
"=",
"None",
")",
":",
"if",
"call",
"==",
"'action'",
":",
"raise",
"SaltCloudSystemExit",
"(",
"'The list_nodes_full function must be called with -f or --function.'",
")",
"netapi_versions",
"=",
"get_api_versions",
"(",
"kwargs",
... | List all VMs on the subscription with full information | [
"List",
"all",
"VMs",
"on",
"the",
"subscription",
"with",
"full",
"information"
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/azurearm.py#L518-L600 | train |
saltstack/salt | salt/cloud/clouds/azurearm.py | list_resource_groups | def list_resource_groups(call=None):
'''
List resource groups associated with the subscription
'''
if call == 'action':
raise SaltCloudSystemExit(
'The list_hosted_services function must be called with '
'-f or --function'
)
resconn = get_conn(client_type='resource')
ret = {}
try:
groups = resconn.resource_groups.list()
for group_obj in groups:
group = group_obj.as_dict()
ret[group['name']] = group
except CloudError as exc:
__utils__['azurearm.log_cloud_error']('resource', exc.message)
ret = {'Error': exc.message}
return ret | python | def list_resource_groups(call=None):
'''
List resource groups associated with the subscription
'''
if call == 'action':
raise SaltCloudSystemExit(
'The list_hosted_services function must be called with '
'-f or --function'
)
resconn = get_conn(client_type='resource')
ret = {}
try:
groups = resconn.resource_groups.list()
for group_obj in groups:
group = group_obj.as_dict()
ret[group['name']] = group
except CloudError as exc:
__utils__['azurearm.log_cloud_error']('resource', exc.message)
ret = {'Error': exc.message}
return ret | [
"def",
"list_resource_groups",
"(",
"call",
"=",
"None",
")",
":",
"if",
"call",
"==",
"'action'",
":",
"raise",
"SaltCloudSystemExit",
"(",
"'The list_hosted_services function must be called with '",
"'-f or --function'",
")",
"resconn",
"=",
"get_conn",
"(",
"client_t... | List resource groups associated with the subscription | [
"List",
"resource",
"groups",
"associated",
"with",
"the",
"subscription"
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/azurearm.py#L603-L625 | train |
saltstack/salt | salt/cloud/clouds/azurearm.py | show_instance | def show_instance(name, call=None):
'''
Show the details from AzureARM concerning an instance
'''
if call != 'action':
raise SaltCloudSystemExit(
'The show_instance action must be called with -a or --action.'
)
try:
node = list_nodes_full('function')[name]
except KeyError:
log.debug('Failed to get data for node \'%s\'', name)
node = {}
__utils__['cloud.cache_node'](node, __active_provider_name__, __opts__)
return node | python | def show_instance(name, call=None):
'''
Show the details from AzureARM concerning an instance
'''
if call != 'action':
raise SaltCloudSystemExit(
'The show_instance action must be called with -a or --action.'
)
try:
node = list_nodes_full('function')[name]
except KeyError:
log.debug('Failed to get data for node \'%s\'', name)
node = {}
__utils__['cloud.cache_node'](node, __active_provider_name__, __opts__)
return node | [
"def",
"show_instance",
"(",
"name",
",",
"call",
"=",
"None",
")",
":",
"if",
"call",
"!=",
"'action'",
":",
"raise",
"SaltCloudSystemExit",
"(",
"'The show_instance action must be called with -a or --action.'",
")",
"try",
":",
"node",
"=",
"list_nodes_full",
"(",... | Show the details from AzureARM concerning an instance | [
"Show",
"the",
"details",
"from",
"AzureARM",
"concerning",
"an",
"instance"
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/azurearm.py#L628-L644 | train |
saltstack/salt | salt/cloud/clouds/azurearm.py | delete_interface | def delete_interface(call=None, kwargs=None): # pylint: disable=unused-argument
'''
Delete a network interface.
'''
if kwargs is None:
kwargs = {}
netconn = get_conn(client_type='network')
if kwargs.get('resource_group') is None:
kwargs['resource_group'] = config.get_cloud_config_value(
'resource_group', {}, __opts__, search_global=True
)
ips = []
iface = netconn.network_interfaces.get(
kwargs['resource_group'],
kwargs['iface_name'],
)
iface_name = iface.name
for ip_ in iface.ip_configurations:
ips.append(ip_.name)
poller = netconn.network_interfaces.delete(
kwargs['resource_group'],
kwargs['iface_name'],
)
poller.wait()
for ip_ in ips:
poller = netconn.public_ip_addresses.delete(kwargs['resource_group'], ip_)
poller.wait()
return {iface_name: ips} | python | def delete_interface(call=None, kwargs=None): # pylint: disable=unused-argument
'''
Delete a network interface.
'''
if kwargs is None:
kwargs = {}
netconn = get_conn(client_type='network')
if kwargs.get('resource_group') is None:
kwargs['resource_group'] = config.get_cloud_config_value(
'resource_group', {}, __opts__, search_global=True
)
ips = []
iface = netconn.network_interfaces.get(
kwargs['resource_group'],
kwargs['iface_name'],
)
iface_name = iface.name
for ip_ in iface.ip_configurations:
ips.append(ip_.name)
poller = netconn.network_interfaces.delete(
kwargs['resource_group'],
kwargs['iface_name'],
)
poller.wait()
for ip_ in ips:
poller = netconn.public_ip_addresses.delete(kwargs['resource_group'], ip_)
poller.wait()
return {iface_name: ips} | [
"def",
"delete_interface",
"(",
"call",
"=",
"None",
",",
"kwargs",
"=",
"None",
")",
":",
"# pylint: disable=unused-argument",
"if",
"kwargs",
"is",
"None",
":",
"kwargs",
"=",
"{",
"}",
"netconn",
"=",
"get_conn",
"(",
"client_type",
"=",
"'network'",
")",... | Delete a network interface. | [
"Delete",
"a",
"network",
"interface",
"."
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/azurearm.py#L647-L680 | train |
saltstack/salt | salt/cloud/clouds/azurearm.py | _get_public_ip | def _get_public_ip(name, resource_group):
'''
Get the public ip address details by name.
'''
netconn = get_conn(client_type='network')
try:
pubip_query = netconn.public_ip_addresses.get(
resource_group_name=resource_group,
public_ip_address_name=name
)
pubip = pubip_query.as_dict()
except CloudError as exc:
__utils__['azurearm.log_cloud_error']('network', exc.message)
pubip = {'error': exc.message}
return pubip | python | def _get_public_ip(name, resource_group):
'''
Get the public ip address details by name.
'''
netconn = get_conn(client_type='network')
try:
pubip_query = netconn.public_ip_addresses.get(
resource_group_name=resource_group,
public_ip_address_name=name
)
pubip = pubip_query.as_dict()
except CloudError as exc:
__utils__['azurearm.log_cloud_error']('network', exc.message)
pubip = {'error': exc.message}
return pubip | [
"def",
"_get_public_ip",
"(",
"name",
",",
"resource_group",
")",
":",
"netconn",
"=",
"get_conn",
"(",
"client_type",
"=",
"'network'",
")",
"try",
":",
"pubip_query",
"=",
"netconn",
".",
"public_ip_addresses",
".",
"get",
"(",
"resource_group_name",
"=",
"r... | Get the public ip address details by name. | [
"Get",
"the",
"public",
"ip",
"address",
"details",
"by",
"name",
"."
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/azurearm.py#L683-L698 | train |
saltstack/salt | salt/cloud/clouds/azurearm.py | _get_network_interface | def _get_network_interface(name, resource_group):
'''
Get a network interface.
'''
public_ips = []
private_ips = []
netapi_versions = get_api_versions(kwargs={
'resource_provider': 'Microsoft.Network',
'resource_type': 'publicIPAddresses'
}
)
netapi_version = netapi_versions[0]
netconn = get_conn(client_type='network')
netiface_query = netconn.network_interfaces.get(
resource_group_name=resource_group,
network_interface_name=name
)
netiface = netiface_query.as_dict()
for index, ip_config in enumerate(netiface['ip_configurations']):
if ip_config.get('private_ip_address') is not None:
private_ips.append(ip_config['private_ip_address'])
if 'id' in ip_config.get('public_ip_address', {}):
public_ip_name = get_resource_by_id(
ip_config['public_ip_address']['id'],
netapi_version,
'name'
)
public_ip = _get_public_ip(public_ip_name, resource_group)
public_ips.append(public_ip['ip_address'])
netiface['ip_configurations'][index]['public_ip_address'].update(public_ip)
return netiface, public_ips, private_ips | python | def _get_network_interface(name, resource_group):
'''
Get a network interface.
'''
public_ips = []
private_ips = []
netapi_versions = get_api_versions(kwargs={
'resource_provider': 'Microsoft.Network',
'resource_type': 'publicIPAddresses'
}
)
netapi_version = netapi_versions[0]
netconn = get_conn(client_type='network')
netiface_query = netconn.network_interfaces.get(
resource_group_name=resource_group,
network_interface_name=name
)
netiface = netiface_query.as_dict()
for index, ip_config in enumerate(netiface['ip_configurations']):
if ip_config.get('private_ip_address') is not None:
private_ips.append(ip_config['private_ip_address'])
if 'id' in ip_config.get('public_ip_address', {}):
public_ip_name = get_resource_by_id(
ip_config['public_ip_address']['id'],
netapi_version,
'name'
)
public_ip = _get_public_ip(public_ip_name, resource_group)
public_ips.append(public_ip['ip_address'])
netiface['ip_configurations'][index]['public_ip_address'].update(public_ip)
return netiface, public_ips, private_ips | [
"def",
"_get_network_interface",
"(",
"name",
",",
"resource_group",
")",
":",
"public_ips",
"=",
"[",
"]",
"private_ips",
"=",
"[",
"]",
"netapi_versions",
"=",
"get_api_versions",
"(",
"kwargs",
"=",
"{",
"'resource_provider'",
":",
"'Microsoft.Network'",
",",
... | Get a network interface. | [
"Get",
"a",
"network",
"interface",
"."
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/azurearm.py#L701-L733 | train |
saltstack/salt | salt/cloud/clouds/azurearm.py | create_network_interface | def create_network_interface(call=None, kwargs=None):
'''
Create a network interface.
'''
if call != 'action':
raise SaltCloudSystemExit(
'The create_network_interface action must be called with -a or --action.'
)
# pylint: disable=invalid-name
IPAllocationMethod = getattr(
network_models,
'IPAllocationMethod'
)
# pylint: disable=invalid-name
NetworkInterface = getattr(
network_models,
'NetworkInterface'
)
# pylint: disable=invalid-name
NetworkInterfaceIPConfiguration = getattr(
network_models,
'NetworkInterfaceIPConfiguration'
)
# pylint: disable=invalid-name
PublicIPAddress = getattr(
network_models,
'PublicIPAddress'
)
if not isinstance(kwargs, dict):
kwargs = {}
vm_ = kwargs
netconn = get_conn(client_type='network')
if kwargs.get('location') is None:
kwargs['location'] = get_location()
if kwargs.get('network') is None:
kwargs['network'] = config.get_cloud_config_value(
'network', vm_, __opts__, search_global=False
)
if kwargs.get('subnet') is None:
kwargs['subnet'] = config.get_cloud_config_value(
'subnet', vm_, __opts__, search_global=False
)
if kwargs.get('network_resource_group') is None:
kwargs['network_resource_group'] = config.get_cloud_config_value(
'resource_group', vm_, __opts__, search_global=False
)
if kwargs.get('iface_name') is None:
kwargs['iface_name'] = '{0}-iface0'.format(vm_['name'])
try:
subnet_obj = netconn.subnets.get(
resource_group_name=kwargs['network_resource_group'],
virtual_network_name=kwargs['network'],
subnet_name=kwargs['subnet'],
)
except CloudError as exc:
raise SaltCloudSystemExit(
'{0} (Resource Group: "{1}", VNET: "{2}", Subnet: "{3}")'.format(
exc.message,
kwargs['network_resource_group'],
kwargs['network'],
kwargs['subnet']
)
)
ip_kwargs = {}
ip_configurations = None
if 'load_balancer_backend_address_pools' in kwargs:
pool_dicts = kwargs['load_balancer_backend_address_pools']
if isinstance(pool_dicts, dict):
pool_ids = []
for load_bal, be_pools in pool_dicts.items():
for pool in be_pools:
try:
lbbep_data = netconn.load_balancer_backend_address_pools.get(
kwargs['resource_group'],
load_bal,
pool,
)
pool_ids.append({'id': lbbep_data.as_dict()['id']})
except CloudError as exc:
log.error('There was a cloud error: %s', six.text_type(exc))
except KeyError as exc:
log.error('There was an error getting the Backend Pool ID: %s', six.text_type(exc))
ip_kwargs['load_balancer_backend_address_pools'] = pool_ids
if 'private_ip_address' in kwargs.keys():
ip_kwargs['private_ip_address'] = kwargs['private_ip_address']
ip_kwargs['private_ip_allocation_method'] = IPAllocationMethod.static
else:
ip_kwargs['private_ip_allocation_method'] = IPAllocationMethod.dynamic
if kwargs.get('allocate_public_ip') is True:
pub_ip_name = '{0}-ip'.format(kwargs['iface_name'])
poller = netconn.public_ip_addresses.create_or_update(
resource_group_name=kwargs['resource_group'],
public_ip_address_name=pub_ip_name,
parameters=PublicIPAddress(
location=kwargs['location'],
public_ip_allocation_method=IPAllocationMethod.static,
),
)
count = 0
poller.wait()
while True:
try:
pub_ip_data = netconn.public_ip_addresses.get(
kwargs['resource_group'],
pub_ip_name,
)
if pub_ip_data.ip_address: # pylint: disable=no-member
ip_kwargs['public_ip_address'] = PublicIPAddress(
id=six.text_type(pub_ip_data.id), # pylint: disable=no-member
)
ip_configurations = [
NetworkInterfaceIPConfiguration(
name='{0}-ip'.format(kwargs['iface_name']),
subnet=subnet_obj,
**ip_kwargs
)
]
break
except CloudError as exc:
log.error('There was a cloud error: %s', exc)
count += 1
if count > 120:
raise ValueError('Timed out waiting for public IP Address.')
time.sleep(5)
else:
priv_ip_name = '{0}-ip'.format(kwargs['iface_name'])
ip_configurations = [
NetworkInterfaceIPConfiguration(
name=priv_ip_name,
subnet=subnet_obj,
**ip_kwargs
)
]
network_security_group = None
if kwargs.get('security_group') is not None:
network_security_group = netconn.network_security_groups.get(
resource_group_name=kwargs['resource_group'],
network_security_group_name=kwargs['security_group'],
)
iface_params = NetworkInterface(
location=kwargs['location'],
network_security_group=network_security_group,
ip_configurations=ip_configurations,
)
poller = netconn.network_interfaces.create_or_update(
kwargs['resource_group'], kwargs['iface_name'], iface_params
)
try:
poller.wait()
except Exception as exc:
log.warning('Network interface creation could not be polled. '
'It is likely that we are reusing an existing interface. (%s)', exc)
count = 0
while True:
try:
return _get_network_interface(kwargs['iface_name'], kwargs['resource_group'])
except CloudError:
count += 1
if count > 120:
raise ValueError('Timed out waiting for operation to complete.')
time.sleep(5) | python | def create_network_interface(call=None, kwargs=None):
'''
Create a network interface.
'''
if call != 'action':
raise SaltCloudSystemExit(
'The create_network_interface action must be called with -a or --action.'
)
# pylint: disable=invalid-name
IPAllocationMethod = getattr(
network_models,
'IPAllocationMethod'
)
# pylint: disable=invalid-name
NetworkInterface = getattr(
network_models,
'NetworkInterface'
)
# pylint: disable=invalid-name
NetworkInterfaceIPConfiguration = getattr(
network_models,
'NetworkInterfaceIPConfiguration'
)
# pylint: disable=invalid-name
PublicIPAddress = getattr(
network_models,
'PublicIPAddress'
)
if not isinstance(kwargs, dict):
kwargs = {}
vm_ = kwargs
netconn = get_conn(client_type='network')
if kwargs.get('location') is None:
kwargs['location'] = get_location()
if kwargs.get('network') is None:
kwargs['network'] = config.get_cloud_config_value(
'network', vm_, __opts__, search_global=False
)
if kwargs.get('subnet') is None:
kwargs['subnet'] = config.get_cloud_config_value(
'subnet', vm_, __opts__, search_global=False
)
if kwargs.get('network_resource_group') is None:
kwargs['network_resource_group'] = config.get_cloud_config_value(
'resource_group', vm_, __opts__, search_global=False
)
if kwargs.get('iface_name') is None:
kwargs['iface_name'] = '{0}-iface0'.format(vm_['name'])
try:
subnet_obj = netconn.subnets.get(
resource_group_name=kwargs['network_resource_group'],
virtual_network_name=kwargs['network'],
subnet_name=kwargs['subnet'],
)
except CloudError as exc:
raise SaltCloudSystemExit(
'{0} (Resource Group: "{1}", VNET: "{2}", Subnet: "{3}")'.format(
exc.message,
kwargs['network_resource_group'],
kwargs['network'],
kwargs['subnet']
)
)
ip_kwargs = {}
ip_configurations = None
if 'load_balancer_backend_address_pools' in kwargs:
pool_dicts = kwargs['load_balancer_backend_address_pools']
if isinstance(pool_dicts, dict):
pool_ids = []
for load_bal, be_pools in pool_dicts.items():
for pool in be_pools:
try:
lbbep_data = netconn.load_balancer_backend_address_pools.get(
kwargs['resource_group'],
load_bal,
pool,
)
pool_ids.append({'id': lbbep_data.as_dict()['id']})
except CloudError as exc:
log.error('There was a cloud error: %s', six.text_type(exc))
except KeyError as exc:
log.error('There was an error getting the Backend Pool ID: %s', six.text_type(exc))
ip_kwargs['load_balancer_backend_address_pools'] = pool_ids
if 'private_ip_address' in kwargs.keys():
ip_kwargs['private_ip_address'] = kwargs['private_ip_address']
ip_kwargs['private_ip_allocation_method'] = IPAllocationMethod.static
else:
ip_kwargs['private_ip_allocation_method'] = IPAllocationMethod.dynamic
if kwargs.get('allocate_public_ip') is True:
pub_ip_name = '{0}-ip'.format(kwargs['iface_name'])
poller = netconn.public_ip_addresses.create_or_update(
resource_group_name=kwargs['resource_group'],
public_ip_address_name=pub_ip_name,
parameters=PublicIPAddress(
location=kwargs['location'],
public_ip_allocation_method=IPAllocationMethod.static,
),
)
count = 0
poller.wait()
while True:
try:
pub_ip_data = netconn.public_ip_addresses.get(
kwargs['resource_group'],
pub_ip_name,
)
if pub_ip_data.ip_address: # pylint: disable=no-member
ip_kwargs['public_ip_address'] = PublicIPAddress(
id=six.text_type(pub_ip_data.id), # pylint: disable=no-member
)
ip_configurations = [
NetworkInterfaceIPConfiguration(
name='{0}-ip'.format(kwargs['iface_name']),
subnet=subnet_obj,
**ip_kwargs
)
]
break
except CloudError as exc:
log.error('There was a cloud error: %s', exc)
count += 1
if count > 120:
raise ValueError('Timed out waiting for public IP Address.')
time.sleep(5)
else:
priv_ip_name = '{0}-ip'.format(kwargs['iface_name'])
ip_configurations = [
NetworkInterfaceIPConfiguration(
name=priv_ip_name,
subnet=subnet_obj,
**ip_kwargs
)
]
network_security_group = None
if kwargs.get('security_group') is not None:
network_security_group = netconn.network_security_groups.get(
resource_group_name=kwargs['resource_group'],
network_security_group_name=kwargs['security_group'],
)
iface_params = NetworkInterface(
location=kwargs['location'],
network_security_group=network_security_group,
ip_configurations=ip_configurations,
)
poller = netconn.network_interfaces.create_or_update(
kwargs['resource_group'], kwargs['iface_name'], iface_params
)
try:
poller.wait()
except Exception as exc:
log.warning('Network interface creation could not be polled. '
'It is likely that we are reusing an existing interface. (%s)', exc)
count = 0
while True:
try:
return _get_network_interface(kwargs['iface_name'], kwargs['resource_group'])
except CloudError:
count += 1
if count > 120:
raise ValueError('Timed out waiting for operation to complete.')
time.sleep(5) | [
"def",
"create_network_interface",
"(",
"call",
"=",
"None",
",",
"kwargs",
"=",
"None",
")",
":",
"if",
"call",
"!=",
"'action'",
":",
"raise",
"SaltCloudSystemExit",
"(",
"'The create_network_interface action must be called with -a or --action.'",
")",
"# pylint: disabl... | Create a network interface. | [
"Create",
"a",
"network",
"interface",
"."
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/azurearm.py#L736-L913 | train |
saltstack/salt | salt/cloud/clouds/azurearm.py | request_instance | def request_instance(vm_):
'''
Request a VM from Azure.
'''
compconn = get_conn(client_type='compute')
# pylint: disable=invalid-name
CachingTypes = getattr(
compute_models, 'CachingTypes'
)
# pylint: disable=invalid-name
DataDisk = getattr(
compute_models, 'DataDisk'
)
# pylint: disable=invalid-name
DiskCreateOptionTypes = getattr(
compute_models, 'DiskCreateOptionTypes'
)
# pylint: disable=invalid-name
HardwareProfile = getattr(
compute_models, 'HardwareProfile'
)
# pylint: disable=invalid-name
ImageReference = getattr(
compute_models, 'ImageReference'
)
# pylint: disable=invalid-name
LinuxConfiguration = getattr(
compute_models, 'LinuxConfiguration'
)
# pylint: disable=invalid-name
SshConfiguration = getattr(
compute_models, 'SshConfiguration'
)
# pylint: disable=invalid-name
SshPublicKey = getattr(
compute_models, 'SshPublicKey'
)
# pylint: disable=invalid-name
NetworkInterfaceReference = getattr(
compute_models, 'NetworkInterfaceReference'
)
# pylint: disable=invalid-name
NetworkProfile = getattr(
compute_models, 'NetworkProfile'
)
# pylint: disable=invalid-name
OSDisk = getattr(
compute_models, 'OSDisk'
)
# pylint: disable=invalid-name
OSProfile = getattr(
compute_models, 'OSProfile'
)
# pylint: disable=invalid-name
StorageProfile = getattr(
compute_models, 'StorageProfile'
)
# pylint: disable=invalid-name
VirtualHardDisk = getattr(
compute_models, 'VirtualHardDisk'
)
# pylint: disable=invalid-name
VirtualMachine = getattr(
compute_models, 'VirtualMachine'
)
# pylint: disable=invalid-name
VirtualMachineSizeTypes = getattr(
compute_models, 'VirtualMachineSizeTypes'
)
subscription_id = config.get_cloud_config_value(
'subscription_id',
get_configured_provider(), __opts__, search_global=False
)
if vm_.get('driver') is None:
vm_['driver'] = 'azurearm'
if vm_.get('location') is None:
vm_['location'] = get_location()
if vm_.get('resource_group') is None:
vm_['resource_group'] = config.get_cloud_config_value(
'resource_group', vm_, __opts__, search_global=True
)
if vm_.get('name') is None:
vm_['name'] = config.get_cloud_config_value(
'name', vm_, __opts__, search_global=True
)
# pylint: disable=unused-variable
iface_data, public_ips, private_ips = create_network_interface(
call='action',
kwargs=vm_
)
vm_['iface_id'] = iface_data['id']
disk_name = '{0}-vol0'.format(vm_['name'])
vm_username = config.get_cloud_config_value(
'ssh_username', vm_, __opts__, search_global=True,
default=config.get_cloud_config_value(
'win_username', vm_, __opts__, search_global=True
)
)
ssh_publickeyfile_contents = None
ssh_publickeyfile = config.get_cloud_config_value(
'ssh_publickeyfile',
vm_,
__opts__,
search_global=False,
default=None
)
if ssh_publickeyfile is not None:
try:
with salt.utils.files.fopen(ssh_publickeyfile, 'r') as spkc_:
ssh_publickeyfile_contents = spkc_.read()
except Exception as exc:
raise SaltCloudConfigError(
"Failed to read ssh publickey file '{0}': "
"{1}".format(ssh_publickeyfile,
exc.args[-1])
)
disable_password_authentication = config.get_cloud_config_value(
'disable_password_authentication',
vm_,
__opts__,
search_global=False,
default=False
)
vm_password = salt.utils.stringutils.to_str(
config.get_cloud_config_value(
'ssh_password', vm_, __opts__, search_global=True,
default=config.get_cloud_config_value(
'win_password', vm_, __opts__, search_global=True
)
)
)
os_kwargs = {}
win_installer = config.get_cloud_config_value(
'win_installer', vm_, __opts__, search_global=True
)
if not win_installer and ssh_publickeyfile_contents is not None:
sshpublickey = SshPublicKey(
key_data=ssh_publickeyfile_contents,
path='/home/{0}/.ssh/authorized_keys'.format(vm_username),
)
sshconfiguration = SshConfiguration(
public_keys=[sshpublickey],
)
linuxconfiguration = LinuxConfiguration(
disable_password_authentication=disable_password_authentication,
ssh=sshconfiguration,
)
os_kwargs['linux_configuration'] = linuxconfiguration
if win_installer or (vm_password is not None and not disable_password_authentication):
if not isinstance(vm_password, str):
raise SaltCloudSystemExit(
'The admin password must be a string.'
)
if len(vm_password) < 8 or len(vm_password) > 123:
raise SaltCloudSystemExit(
'The admin password must be between 8-123 characters long.'
)
complexity = 0
if any(char.isdigit() for char in vm_password):
complexity += 1
if any(char.isupper() for char in vm_password):
complexity += 1
if any(char.islower() for char in vm_password):
complexity += 1
if any(char in string.punctuation for char in vm_password):
complexity += 1
if complexity < 3:
raise SaltCloudSystemExit(
'The admin password must contain at least 3 of the following types: '
'upper, lower, digits, special characters'
)
os_kwargs['admin_password'] = vm_password
availability_set = config.get_cloud_config_value(
'availability_set',
vm_,
__opts__,
search_global=False,
default=None
)
if availability_set is not None and isinstance(availability_set, six.string_types):
availability_set = {
'id': '/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.Compute/availabilitySets/{2}'.format(
subscription_id,
vm_['resource_group'],
availability_set
)
}
else:
availability_set = None
cloud_env = _get_cloud_environment()
storage_endpoint_suffix = cloud_env.suffixes.storage_endpoint
if isinstance(vm_.get('volumes'), six.string_types):
volumes = salt.utils.yaml.safe_load(vm_['volumes'])
else:
volumes = vm_.get('volumes')
data_disks = None
if isinstance(volumes, list):
data_disks = []
else:
volumes = []
lun = 0
luns = []
for volume in volumes:
if isinstance(volume, six.string_types):
volume = {'name': volume}
volume.setdefault(
'name',
volume.get(
'name',
volume.get(
'name',
'{0}-datadisk{1}'.format(vm_['name'], six.text_type(lun))
)
)
)
volume.setdefault(
'disk_size_gb',
volume.get(
'logical_disk_size_in_gb',
volume.get('size', 100)
)
)
# Old kwarg was host_caching, new name is caching
volume.setdefault('caching', volume.get('host_caching', 'ReadOnly'))
while lun in luns:
lun += 1
if lun > 15:
log.error('Maximum lun count has been reached')
break
volume.setdefault('lun', lun)
lun += 1
# The default vhd is {vm_name}-datadisk{lun}.vhd
if 'media_link' in volume:
volume['vhd'] = VirtualHardDisk(volume['media_link'])
del volume['media_link']
elif volume.get('vhd') == 'unmanaged':
volume['vhd'] = VirtualHardDisk(
'https://{0}.blob.{1}/vhds/{2}-datadisk{3}.vhd'.format(
vm_['storage_account'],
storage_endpoint_suffix,
vm_['name'],
volume['lun'],
),
)
elif 'vhd' in volume:
volume['vhd'] = VirtualHardDisk(volume['vhd'])
if 'image' in volume:
volume['create_option'] = 'from_image'
elif 'attach' in volume:
volume['create_option'] = 'attach'
else:
volume['create_option'] = 'empty'
data_disks.append(DataDisk(**volume))
img_ref = None
if vm_['image'].startswith('http') or vm_.get('vhd') == 'unmanaged':
if vm_['image'].startswith('http'):
source_image = VirtualHardDisk(vm_['image'])
else:
source_image = None
if '|' in vm_['image']:
img_pub, img_off, img_sku, img_ver = vm_['image'].split('|')
img_ref = ImageReference(
publisher=img_pub,
offer=img_off,
sku=img_sku,
version=img_ver,
)
elif vm_['image'].startswith('/subscriptions'):
img_ref = ImageReference(id=vm_['image'])
if win_installer:
os_type = 'Windows'
else:
os_type = 'Linux'
os_disk = OSDisk(
caching=CachingTypes.none,
create_option=DiskCreateOptionTypes.from_image,
name=disk_name,
vhd=VirtualHardDisk(
'https://{0}.blob.{1}/vhds/{2}.vhd'.format(
vm_['storage_account'],
storage_endpoint_suffix,
disk_name,
),
),
os_type=os_type,
image=source_image,
disk_size_gb=vm_.get('os_disk_size_gb')
)
else:
source_image = None
os_type = None
os_disk = OSDisk(
create_option=DiskCreateOptionTypes.from_image,
disk_size_gb=vm_.get('os_disk_size_gb')
)
if '|' in vm_['image']:
img_pub, img_off, img_sku, img_ver = vm_['image'].split('|')
img_ref = ImageReference(
publisher=img_pub,
offer=img_off,
sku=img_sku,
version=img_ver,
)
elif vm_['image'].startswith('/subscriptions'):
img_ref = ImageReference(id=vm_['image'])
userdata_file = config.get_cloud_config_value(
'userdata_file', vm_, __opts__, search_global=False, default=None
)
userdata = config.get_cloud_config_value(
'userdata', vm_, __opts__, search_global=False, default=None
)
userdata_template = config.get_cloud_config_value(
'userdata_template', vm_, __opts__, search_global=False, default=None
)
if userdata_file:
if os.path.exists(userdata_file):
with salt.utils.files.fopen(userdata_file, 'r') as fh_:
userdata = fh_.read()
if userdata and userdata_template:
userdata_sendkeys = config.get_cloud_config_value(
'userdata_sendkeys', vm_, __opts__, search_global=False, default=None
)
if userdata_sendkeys:
vm_['priv_key'], vm_['pub_key'] = salt.utils.cloud.gen_keys(
config.get_cloud_config_value(
'keysize',
vm_,
__opts__
)
)
key_id = vm_.get('name')
if 'append_domain' in vm_:
key_id = '.'.join([key_id, vm_['append_domain']])
salt.utils.cloud.accept_key(
__opts__['pki_dir'], vm_['pub_key'], key_id
)
userdata = salt.utils.cloud.userdata_template(__opts__, vm_, userdata)
custom_extension = None
if userdata is not None or userdata_file is not None:
try:
if win_installer:
publisher = 'Microsoft.Compute'
virtual_machine_extension_type = 'CustomScriptExtension'
type_handler_version = '1.8'
if userdata_file and userdata_file.endswith('.ps1'):
command_prefix = 'powershell -ExecutionPolicy Unrestricted -File '
else:
command_prefix = ''
else:
publisher = 'Microsoft.Azure.Extensions'
virtual_machine_extension_type = 'CustomScript'
type_handler_version = '2.0'
command_prefix = ''
settings = {}
if userdata:
settings['commandToExecute'] = userdata
elif userdata_file.startswith('http'):
settings['fileUris'] = [userdata_file]
settings['commandToExecute'] = command_prefix + './' + userdata_file[userdata_file.rfind('/')+1:]
custom_extension = {
'resource_group': vm_['resource_group'],
'virtual_machine_name': vm_['name'],
'extension_name': vm_['name'] + '_custom_userdata_script',
'location': vm_['location'],
'publisher': publisher,
'virtual_machine_extension_type': virtual_machine_extension_type,
'type_handler_version': type_handler_version,
'auto_upgrade_minor_version': True,
'settings': settings,
'protected_settings': None
}
except Exception as exc:
log.exception('Failed to encode userdata: %s', exc)
params = VirtualMachine(
location=vm_['location'],
plan=None,
hardware_profile=HardwareProfile(
vm_size=getattr(
VirtualMachineSizeTypes, vm_['size'].lower()
),
),
storage_profile=StorageProfile(
os_disk=os_disk,
data_disks=data_disks,
image_reference=img_ref,
),
os_profile=OSProfile(
admin_username=vm_username,
computer_name=vm_['name'],
**os_kwargs
),
network_profile=NetworkProfile(
network_interfaces=[
NetworkInterfaceReference(vm_['iface_id']),
],
),
availability_set=availability_set,
)
__utils__['cloud.fire_event'](
'event',
'requesting instance',
'salt/cloud/{0}/requesting'.format(vm_['name']),
args=__utils__['cloud.filter_event'](
'requesting',
vm_,
['name', 'profile', 'provider', 'driver']
),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
try:
vm_create = compconn.virtual_machines.create_or_update(
resource_group_name=vm_['resource_group'],
vm_name=vm_['name'],
parameters=params
)
vm_create.wait()
vm_result = vm_create.result()
vm_result = vm_result.as_dict()
if custom_extension:
create_or_update_vmextension(kwargs=custom_extension)
except CloudError as exc:
__utils__['azurearm.log_cloud_error']('compute', exc.message)
vm_result = {}
return vm_result | python | def request_instance(vm_):
'''
Request a VM from Azure.
'''
compconn = get_conn(client_type='compute')
# pylint: disable=invalid-name
CachingTypes = getattr(
compute_models, 'CachingTypes'
)
# pylint: disable=invalid-name
DataDisk = getattr(
compute_models, 'DataDisk'
)
# pylint: disable=invalid-name
DiskCreateOptionTypes = getattr(
compute_models, 'DiskCreateOptionTypes'
)
# pylint: disable=invalid-name
HardwareProfile = getattr(
compute_models, 'HardwareProfile'
)
# pylint: disable=invalid-name
ImageReference = getattr(
compute_models, 'ImageReference'
)
# pylint: disable=invalid-name
LinuxConfiguration = getattr(
compute_models, 'LinuxConfiguration'
)
# pylint: disable=invalid-name
SshConfiguration = getattr(
compute_models, 'SshConfiguration'
)
# pylint: disable=invalid-name
SshPublicKey = getattr(
compute_models, 'SshPublicKey'
)
# pylint: disable=invalid-name
NetworkInterfaceReference = getattr(
compute_models, 'NetworkInterfaceReference'
)
# pylint: disable=invalid-name
NetworkProfile = getattr(
compute_models, 'NetworkProfile'
)
# pylint: disable=invalid-name
OSDisk = getattr(
compute_models, 'OSDisk'
)
# pylint: disable=invalid-name
OSProfile = getattr(
compute_models, 'OSProfile'
)
# pylint: disable=invalid-name
StorageProfile = getattr(
compute_models, 'StorageProfile'
)
# pylint: disable=invalid-name
VirtualHardDisk = getattr(
compute_models, 'VirtualHardDisk'
)
# pylint: disable=invalid-name
VirtualMachine = getattr(
compute_models, 'VirtualMachine'
)
# pylint: disable=invalid-name
VirtualMachineSizeTypes = getattr(
compute_models, 'VirtualMachineSizeTypes'
)
subscription_id = config.get_cloud_config_value(
'subscription_id',
get_configured_provider(), __opts__, search_global=False
)
if vm_.get('driver') is None:
vm_['driver'] = 'azurearm'
if vm_.get('location') is None:
vm_['location'] = get_location()
if vm_.get('resource_group') is None:
vm_['resource_group'] = config.get_cloud_config_value(
'resource_group', vm_, __opts__, search_global=True
)
if vm_.get('name') is None:
vm_['name'] = config.get_cloud_config_value(
'name', vm_, __opts__, search_global=True
)
# pylint: disable=unused-variable
iface_data, public_ips, private_ips = create_network_interface(
call='action',
kwargs=vm_
)
vm_['iface_id'] = iface_data['id']
disk_name = '{0}-vol0'.format(vm_['name'])
vm_username = config.get_cloud_config_value(
'ssh_username', vm_, __opts__, search_global=True,
default=config.get_cloud_config_value(
'win_username', vm_, __opts__, search_global=True
)
)
ssh_publickeyfile_contents = None
ssh_publickeyfile = config.get_cloud_config_value(
'ssh_publickeyfile',
vm_,
__opts__,
search_global=False,
default=None
)
if ssh_publickeyfile is not None:
try:
with salt.utils.files.fopen(ssh_publickeyfile, 'r') as spkc_:
ssh_publickeyfile_contents = spkc_.read()
except Exception as exc:
raise SaltCloudConfigError(
"Failed to read ssh publickey file '{0}': "
"{1}".format(ssh_publickeyfile,
exc.args[-1])
)
disable_password_authentication = config.get_cloud_config_value(
'disable_password_authentication',
vm_,
__opts__,
search_global=False,
default=False
)
vm_password = salt.utils.stringutils.to_str(
config.get_cloud_config_value(
'ssh_password', vm_, __opts__, search_global=True,
default=config.get_cloud_config_value(
'win_password', vm_, __opts__, search_global=True
)
)
)
os_kwargs = {}
win_installer = config.get_cloud_config_value(
'win_installer', vm_, __opts__, search_global=True
)
if not win_installer and ssh_publickeyfile_contents is not None:
sshpublickey = SshPublicKey(
key_data=ssh_publickeyfile_contents,
path='/home/{0}/.ssh/authorized_keys'.format(vm_username),
)
sshconfiguration = SshConfiguration(
public_keys=[sshpublickey],
)
linuxconfiguration = LinuxConfiguration(
disable_password_authentication=disable_password_authentication,
ssh=sshconfiguration,
)
os_kwargs['linux_configuration'] = linuxconfiguration
if win_installer or (vm_password is not None and not disable_password_authentication):
if not isinstance(vm_password, str):
raise SaltCloudSystemExit(
'The admin password must be a string.'
)
if len(vm_password) < 8 or len(vm_password) > 123:
raise SaltCloudSystemExit(
'The admin password must be between 8-123 characters long.'
)
complexity = 0
if any(char.isdigit() for char in vm_password):
complexity += 1
if any(char.isupper() for char in vm_password):
complexity += 1
if any(char.islower() for char in vm_password):
complexity += 1
if any(char in string.punctuation for char in vm_password):
complexity += 1
if complexity < 3:
raise SaltCloudSystemExit(
'The admin password must contain at least 3 of the following types: '
'upper, lower, digits, special characters'
)
os_kwargs['admin_password'] = vm_password
availability_set = config.get_cloud_config_value(
'availability_set',
vm_,
__opts__,
search_global=False,
default=None
)
if availability_set is not None and isinstance(availability_set, six.string_types):
availability_set = {
'id': '/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.Compute/availabilitySets/{2}'.format(
subscription_id,
vm_['resource_group'],
availability_set
)
}
else:
availability_set = None
cloud_env = _get_cloud_environment()
storage_endpoint_suffix = cloud_env.suffixes.storage_endpoint
if isinstance(vm_.get('volumes'), six.string_types):
volumes = salt.utils.yaml.safe_load(vm_['volumes'])
else:
volumes = vm_.get('volumes')
data_disks = None
if isinstance(volumes, list):
data_disks = []
else:
volumes = []
lun = 0
luns = []
for volume in volumes:
if isinstance(volume, six.string_types):
volume = {'name': volume}
volume.setdefault(
'name',
volume.get(
'name',
volume.get(
'name',
'{0}-datadisk{1}'.format(vm_['name'], six.text_type(lun))
)
)
)
volume.setdefault(
'disk_size_gb',
volume.get(
'logical_disk_size_in_gb',
volume.get('size', 100)
)
)
# Old kwarg was host_caching, new name is caching
volume.setdefault('caching', volume.get('host_caching', 'ReadOnly'))
while lun in luns:
lun += 1
if lun > 15:
log.error('Maximum lun count has been reached')
break
volume.setdefault('lun', lun)
lun += 1
# The default vhd is {vm_name}-datadisk{lun}.vhd
if 'media_link' in volume:
volume['vhd'] = VirtualHardDisk(volume['media_link'])
del volume['media_link']
elif volume.get('vhd') == 'unmanaged':
volume['vhd'] = VirtualHardDisk(
'https://{0}.blob.{1}/vhds/{2}-datadisk{3}.vhd'.format(
vm_['storage_account'],
storage_endpoint_suffix,
vm_['name'],
volume['lun'],
),
)
elif 'vhd' in volume:
volume['vhd'] = VirtualHardDisk(volume['vhd'])
if 'image' in volume:
volume['create_option'] = 'from_image'
elif 'attach' in volume:
volume['create_option'] = 'attach'
else:
volume['create_option'] = 'empty'
data_disks.append(DataDisk(**volume))
img_ref = None
if vm_['image'].startswith('http') or vm_.get('vhd') == 'unmanaged':
if vm_['image'].startswith('http'):
source_image = VirtualHardDisk(vm_['image'])
else:
source_image = None
if '|' in vm_['image']:
img_pub, img_off, img_sku, img_ver = vm_['image'].split('|')
img_ref = ImageReference(
publisher=img_pub,
offer=img_off,
sku=img_sku,
version=img_ver,
)
elif vm_['image'].startswith('/subscriptions'):
img_ref = ImageReference(id=vm_['image'])
if win_installer:
os_type = 'Windows'
else:
os_type = 'Linux'
os_disk = OSDisk(
caching=CachingTypes.none,
create_option=DiskCreateOptionTypes.from_image,
name=disk_name,
vhd=VirtualHardDisk(
'https://{0}.blob.{1}/vhds/{2}.vhd'.format(
vm_['storage_account'],
storage_endpoint_suffix,
disk_name,
),
),
os_type=os_type,
image=source_image,
disk_size_gb=vm_.get('os_disk_size_gb')
)
else:
source_image = None
os_type = None
os_disk = OSDisk(
create_option=DiskCreateOptionTypes.from_image,
disk_size_gb=vm_.get('os_disk_size_gb')
)
if '|' in vm_['image']:
img_pub, img_off, img_sku, img_ver = vm_['image'].split('|')
img_ref = ImageReference(
publisher=img_pub,
offer=img_off,
sku=img_sku,
version=img_ver,
)
elif vm_['image'].startswith('/subscriptions'):
img_ref = ImageReference(id=vm_['image'])
userdata_file = config.get_cloud_config_value(
'userdata_file', vm_, __opts__, search_global=False, default=None
)
userdata = config.get_cloud_config_value(
'userdata', vm_, __opts__, search_global=False, default=None
)
userdata_template = config.get_cloud_config_value(
'userdata_template', vm_, __opts__, search_global=False, default=None
)
if userdata_file:
if os.path.exists(userdata_file):
with salt.utils.files.fopen(userdata_file, 'r') as fh_:
userdata = fh_.read()
if userdata and userdata_template:
userdata_sendkeys = config.get_cloud_config_value(
'userdata_sendkeys', vm_, __opts__, search_global=False, default=None
)
if userdata_sendkeys:
vm_['priv_key'], vm_['pub_key'] = salt.utils.cloud.gen_keys(
config.get_cloud_config_value(
'keysize',
vm_,
__opts__
)
)
key_id = vm_.get('name')
if 'append_domain' in vm_:
key_id = '.'.join([key_id, vm_['append_domain']])
salt.utils.cloud.accept_key(
__opts__['pki_dir'], vm_['pub_key'], key_id
)
userdata = salt.utils.cloud.userdata_template(__opts__, vm_, userdata)
custom_extension = None
if userdata is not None or userdata_file is not None:
try:
if win_installer:
publisher = 'Microsoft.Compute'
virtual_machine_extension_type = 'CustomScriptExtension'
type_handler_version = '1.8'
if userdata_file and userdata_file.endswith('.ps1'):
command_prefix = 'powershell -ExecutionPolicy Unrestricted -File '
else:
command_prefix = ''
else:
publisher = 'Microsoft.Azure.Extensions'
virtual_machine_extension_type = 'CustomScript'
type_handler_version = '2.0'
command_prefix = ''
settings = {}
if userdata:
settings['commandToExecute'] = userdata
elif userdata_file.startswith('http'):
settings['fileUris'] = [userdata_file]
settings['commandToExecute'] = command_prefix + './' + userdata_file[userdata_file.rfind('/')+1:]
custom_extension = {
'resource_group': vm_['resource_group'],
'virtual_machine_name': vm_['name'],
'extension_name': vm_['name'] + '_custom_userdata_script',
'location': vm_['location'],
'publisher': publisher,
'virtual_machine_extension_type': virtual_machine_extension_type,
'type_handler_version': type_handler_version,
'auto_upgrade_minor_version': True,
'settings': settings,
'protected_settings': None
}
except Exception as exc:
log.exception('Failed to encode userdata: %s', exc)
params = VirtualMachine(
location=vm_['location'],
plan=None,
hardware_profile=HardwareProfile(
vm_size=getattr(
VirtualMachineSizeTypes, vm_['size'].lower()
),
),
storage_profile=StorageProfile(
os_disk=os_disk,
data_disks=data_disks,
image_reference=img_ref,
),
os_profile=OSProfile(
admin_username=vm_username,
computer_name=vm_['name'],
**os_kwargs
),
network_profile=NetworkProfile(
network_interfaces=[
NetworkInterfaceReference(vm_['iface_id']),
],
),
availability_set=availability_set,
)
__utils__['cloud.fire_event'](
'event',
'requesting instance',
'salt/cloud/{0}/requesting'.format(vm_['name']),
args=__utils__['cloud.filter_event'](
'requesting',
vm_,
['name', 'profile', 'provider', 'driver']
),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
try:
vm_create = compconn.virtual_machines.create_or_update(
resource_group_name=vm_['resource_group'],
vm_name=vm_['name'],
parameters=params
)
vm_create.wait()
vm_result = vm_create.result()
vm_result = vm_result.as_dict()
if custom_extension:
create_or_update_vmextension(kwargs=custom_extension)
except CloudError as exc:
__utils__['azurearm.log_cloud_error']('compute', exc.message)
vm_result = {}
return vm_result | [
"def",
"request_instance",
"(",
"vm_",
")",
":",
"compconn",
"=",
"get_conn",
"(",
"client_type",
"=",
"'compute'",
")",
"# pylint: disable=invalid-name",
"CachingTypes",
"=",
"getattr",
"(",
"compute_models",
",",
"'CachingTypes'",
")",
"# pylint: disable=invalid-name"... | Request a VM from Azure. | [
"Request",
"a",
"VM",
"from",
"Azure",
"."
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/azurearm.py#L916-L1377 | train |
saltstack/salt | salt/cloud/clouds/azurearm.py | create | def create(vm_):
'''
Create a single VM from a data dict.
'''
try:
if vm_['profile'] and config.is_profile_configured(
__opts__,
__active_provider_name__ or 'azurearm',
vm_['profile'],
vm_=vm_
) is False:
return False
except AttributeError:
pass
if vm_.get('bootstrap_interface') is None:
vm_['bootstrap_interface'] = 'public'
__utils__['cloud.fire_event'](
'event',
'starting create',
'salt/cloud/{0}/creating'.format(vm_['name']),
args=__utils__['cloud.filter_event'](
'creating', vm_, ['name', 'profile', 'provider', 'driver']
),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
__utils__['cloud.cachedir_index_add'](
vm_['name'], vm_['profile'], 'azurearm', vm_['driver']
)
if not vm_.get('location'):
vm_['location'] = get_location(kwargs=vm_)
log.info('Creating Cloud VM %s in %s', vm_['name'], vm_['location'])
vm_request = request_instance(vm_=vm_)
if not vm_request or 'error' in vm_request:
err_message = 'Error creating VM {0}! ({1})'.format(vm_['name'], six.text_type(vm_request))
log.error(err_message)
raise SaltCloudSystemExit(err_message)
def _query_node_data(name, bootstrap_interface):
'''
Query node data.
'''
data = show_instance(name, call='action')
if not data:
return False
ip_address = None
if bootstrap_interface == 'public':
ip_address = data['public_ips'][0]
if bootstrap_interface == 'private':
ip_address = data['private_ips'][0]
if ip_address is None:
return False
return ip_address
try:
data = salt.utils.cloud.wait_for_ip(
_query_node_data,
update_args=(vm_['name'], vm_['bootstrap_interface'],),
timeout=config.get_cloud_config_value(
'wait_for_ip_timeout', vm_, __opts__, default=10 * 60),
interval=config.get_cloud_config_value(
'wait_for_ip_interval', vm_, __opts__, default=10),
interval_multiplier=config.get_cloud_config_value(
'wait_for_ip_interval_multiplier', vm_, __opts__, default=1),
)
except (
SaltCloudExecutionTimeout,
SaltCloudExecutionFailure,
SaltCloudSystemExit
) as exc:
try:
log.warning(exc)
finally:
raise SaltCloudSystemExit(six.text_type(exc))
vm_['ssh_host'] = data
if not vm_.get('ssh_username'):
vm_['ssh_username'] = config.get_cloud_config_value(
'ssh_username', vm_, __opts__
)
vm_['password'] = config.get_cloud_config_value(
'ssh_password', vm_, __opts__
)
ret = __utils__['cloud.bootstrap'](vm_, __opts__)
data = show_instance(vm_['name'], call='action')
log.info('Created Cloud VM \'%s\'', vm_['name'])
log.debug(
'\'%s\' VM creation details:\n%s',
vm_['name'],
pprint.pformat(data)
)
ret.update(data)
__utils__['cloud.fire_event'](
'event',
'created instance',
'salt/cloud/{0}/created'.format(vm_['name']),
args=__utils__['cloud.filter_event'](
'created',
vm_, ['name', 'profile', 'provider', 'driver']
),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
return ret | python | def create(vm_):
'''
Create a single VM from a data dict.
'''
try:
if vm_['profile'] and config.is_profile_configured(
__opts__,
__active_provider_name__ or 'azurearm',
vm_['profile'],
vm_=vm_
) is False:
return False
except AttributeError:
pass
if vm_.get('bootstrap_interface') is None:
vm_['bootstrap_interface'] = 'public'
__utils__['cloud.fire_event'](
'event',
'starting create',
'salt/cloud/{0}/creating'.format(vm_['name']),
args=__utils__['cloud.filter_event'](
'creating', vm_, ['name', 'profile', 'provider', 'driver']
),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
__utils__['cloud.cachedir_index_add'](
vm_['name'], vm_['profile'], 'azurearm', vm_['driver']
)
if not vm_.get('location'):
vm_['location'] = get_location(kwargs=vm_)
log.info('Creating Cloud VM %s in %s', vm_['name'], vm_['location'])
vm_request = request_instance(vm_=vm_)
if not vm_request or 'error' in vm_request:
err_message = 'Error creating VM {0}! ({1})'.format(vm_['name'], six.text_type(vm_request))
log.error(err_message)
raise SaltCloudSystemExit(err_message)
def _query_node_data(name, bootstrap_interface):
'''
Query node data.
'''
data = show_instance(name, call='action')
if not data:
return False
ip_address = None
if bootstrap_interface == 'public':
ip_address = data['public_ips'][0]
if bootstrap_interface == 'private':
ip_address = data['private_ips'][0]
if ip_address is None:
return False
return ip_address
try:
data = salt.utils.cloud.wait_for_ip(
_query_node_data,
update_args=(vm_['name'], vm_['bootstrap_interface'],),
timeout=config.get_cloud_config_value(
'wait_for_ip_timeout', vm_, __opts__, default=10 * 60),
interval=config.get_cloud_config_value(
'wait_for_ip_interval', vm_, __opts__, default=10),
interval_multiplier=config.get_cloud_config_value(
'wait_for_ip_interval_multiplier', vm_, __opts__, default=1),
)
except (
SaltCloudExecutionTimeout,
SaltCloudExecutionFailure,
SaltCloudSystemExit
) as exc:
try:
log.warning(exc)
finally:
raise SaltCloudSystemExit(six.text_type(exc))
vm_['ssh_host'] = data
if not vm_.get('ssh_username'):
vm_['ssh_username'] = config.get_cloud_config_value(
'ssh_username', vm_, __opts__
)
vm_['password'] = config.get_cloud_config_value(
'ssh_password', vm_, __opts__
)
ret = __utils__['cloud.bootstrap'](vm_, __opts__)
data = show_instance(vm_['name'], call='action')
log.info('Created Cloud VM \'%s\'', vm_['name'])
log.debug(
'\'%s\' VM creation details:\n%s',
vm_['name'],
pprint.pformat(data)
)
ret.update(data)
__utils__['cloud.fire_event'](
'event',
'created instance',
'salt/cloud/{0}/created'.format(vm_['name']),
args=__utils__['cloud.filter_event'](
'created',
vm_, ['name', 'profile', 'provider', 'driver']
),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
return ret | [
"def",
"create",
"(",
"vm_",
")",
":",
"try",
":",
"if",
"vm_",
"[",
"'profile'",
"]",
"and",
"config",
".",
"is_profile_configured",
"(",
"__opts__",
",",
"__active_provider_name__",
"or",
"'azurearm'",
",",
"vm_",
"[",
"'profile'",
"]",
",",
"vm_",
"=",
... | Create a single VM from a data dict. | [
"Create",
"a",
"single",
"VM",
"from",
"a",
"data",
"dict",
"."
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/azurearm.py#L1380-L1492 | train |
saltstack/salt | salt/cloud/clouds/azurearm.py | destroy | def destroy(name, call=None, kwargs=None): # pylint: disable=unused-argument
'''
Destroy a VM.
CLI Examples:
.. code-block:: bash
salt-cloud -d myminion
salt-cloud -a destroy myminion service_name=myservice
'''
if kwargs is None:
kwargs = {}
if call == 'function':
raise SaltCloudSystemExit(
'The destroy action must be called with -d, --destroy, '
'-a or --action.'
)
compconn = get_conn(client_type='compute')
node_data = show_instance(name, call='action')
if node_data['storage_profile']['os_disk'].get('managed_disk'):
vhd = node_data['storage_profile']['os_disk']['managed_disk']['id']
else:
vhd = node_data['storage_profile']['os_disk']['vhd']['uri']
ret = {name: {}}
log.debug('Deleting VM')
result = compconn.virtual_machines.delete(node_data['resource_group'], name)
result.wait()
if __opts__.get('update_cachedir', False) is True:
__utils__['cloud.delete_minion_cachedir'](
name,
__active_provider_name__.split(':')[0],
__opts__
)
cleanup_disks = config.get_cloud_config_value(
'cleanup_disks',
get_configured_provider(),
__opts__,
search_global=False,
default=False,
)
if cleanup_disks:
cleanup_vhds = kwargs.get(
'delete_vhd',
config.get_cloud_config_value(
'cleanup_vhds',
get_configured_provider(),
__opts__,
search_global=False,
default=False
)
)
if cleanup_vhds:
log.debug('Deleting vhd')
comps = vhd.split('/')
container = comps[-2]
blob = comps[-1]
ret[name]['delete_disk'] = {
'delete_disks': cleanup_disks,
'delete_vhd': cleanup_vhds,
'container': container,
'blob': blob,
}
if vhd.startswith('http'):
ret[name]['data'] = delete_blob(
kwargs={'container': container, 'blob': blob},
call='function'
)
else:
ret[name]['data'] = delete_managed_disk(
kwargs={'resource_group': node_data['resource_group'],
'container': container,
'blob': blob},
call='function'
)
cleanup_data_disks = kwargs.get(
'delete_data_disks',
config.get_cloud_config_value(
'cleanup_data_disks',
get_configured_provider(),
__opts__,
search_global=False,
default=False
)
)
if cleanup_data_disks:
log.debug('Deleting data_disks')
ret[name]['data_disks'] = {}
for disk in node_data['storage_profile']['data_disks']:
datavhd = disk.get('managed_disk', {}).get('id') or disk.get('vhd', {}).get('uri')
comps = datavhd.split('/')
container = comps[-2]
blob = comps[-1]
ret[name]['data_disks'][disk['name']] = {
'delete_disks': cleanup_disks,
'delete_vhd': cleanup_vhds,
'container': container,
'blob': blob,
}
if datavhd.startswith('http'):
ret[name]['data'] = delete_blob(
kwargs={'container': container, 'blob': blob},
call='function'
)
else:
ret[name]['data'] = delete_managed_disk(
kwargs={'resource_group': node_data['resource_group'],
'container': container,
'blob': blob},
call='function'
)
cleanup_interfaces = config.get_cloud_config_value(
'cleanup_interfaces',
get_configured_provider(),
__opts__,
search_global=False,
default=False
)
if cleanup_interfaces:
ret[name]['cleanup_network'] = {
'cleanup_interfaces': cleanup_interfaces,
'resource_group': node_data['resource_group'],
'data': [],
}
ifaces = node_data['network_profile']['network_interfaces']
for iface in ifaces:
resource_group = iface['id'].split('/')[4]
ret[name]['cleanup_network']['data'].append(
delete_interface(
kwargs={
'resource_group': resource_group,
'iface_name': iface['name'],
},
call='function',
)
)
return ret | python | def destroy(name, call=None, kwargs=None): # pylint: disable=unused-argument
'''
Destroy a VM.
CLI Examples:
.. code-block:: bash
salt-cloud -d myminion
salt-cloud -a destroy myminion service_name=myservice
'''
if kwargs is None:
kwargs = {}
if call == 'function':
raise SaltCloudSystemExit(
'The destroy action must be called with -d, --destroy, '
'-a or --action.'
)
compconn = get_conn(client_type='compute')
node_data = show_instance(name, call='action')
if node_data['storage_profile']['os_disk'].get('managed_disk'):
vhd = node_data['storage_profile']['os_disk']['managed_disk']['id']
else:
vhd = node_data['storage_profile']['os_disk']['vhd']['uri']
ret = {name: {}}
log.debug('Deleting VM')
result = compconn.virtual_machines.delete(node_data['resource_group'], name)
result.wait()
if __opts__.get('update_cachedir', False) is True:
__utils__['cloud.delete_minion_cachedir'](
name,
__active_provider_name__.split(':')[0],
__opts__
)
cleanup_disks = config.get_cloud_config_value(
'cleanup_disks',
get_configured_provider(),
__opts__,
search_global=False,
default=False,
)
if cleanup_disks:
cleanup_vhds = kwargs.get(
'delete_vhd',
config.get_cloud_config_value(
'cleanup_vhds',
get_configured_provider(),
__opts__,
search_global=False,
default=False
)
)
if cleanup_vhds:
log.debug('Deleting vhd')
comps = vhd.split('/')
container = comps[-2]
blob = comps[-1]
ret[name]['delete_disk'] = {
'delete_disks': cleanup_disks,
'delete_vhd': cleanup_vhds,
'container': container,
'blob': blob,
}
if vhd.startswith('http'):
ret[name]['data'] = delete_blob(
kwargs={'container': container, 'blob': blob},
call='function'
)
else:
ret[name]['data'] = delete_managed_disk(
kwargs={'resource_group': node_data['resource_group'],
'container': container,
'blob': blob},
call='function'
)
cleanup_data_disks = kwargs.get(
'delete_data_disks',
config.get_cloud_config_value(
'cleanup_data_disks',
get_configured_provider(),
__opts__,
search_global=False,
default=False
)
)
if cleanup_data_disks:
log.debug('Deleting data_disks')
ret[name]['data_disks'] = {}
for disk in node_data['storage_profile']['data_disks']:
datavhd = disk.get('managed_disk', {}).get('id') or disk.get('vhd', {}).get('uri')
comps = datavhd.split('/')
container = comps[-2]
blob = comps[-1]
ret[name]['data_disks'][disk['name']] = {
'delete_disks': cleanup_disks,
'delete_vhd': cleanup_vhds,
'container': container,
'blob': blob,
}
if datavhd.startswith('http'):
ret[name]['data'] = delete_blob(
kwargs={'container': container, 'blob': blob},
call='function'
)
else:
ret[name]['data'] = delete_managed_disk(
kwargs={'resource_group': node_data['resource_group'],
'container': container,
'blob': blob},
call='function'
)
cleanup_interfaces = config.get_cloud_config_value(
'cleanup_interfaces',
get_configured_provider(),
__opts__,
search_global=False,
default=False
)
if cleanup_interfaces:
ret[name]['cleanup_network'] = {
'cleanup_interfaces': cleanup_interfaces,
'resource_group': node_data['resource_group'],
'data': [],
}
ifaces = node_data['network_profile']['network_interfaces']
for iface in ifaces:
resource_group = iface['id'].split('/')[4]
ret[name]['cleanup_network']['data'].append(
delete_interface(
kwargs={
'resource_group': resource_group,
'iface_name': iface['name'],
},
call='function',
)
)
return ret | [
"def",
"destroy",
"(",
"name",
",",
"call",
"=",
"None",
",",
"kwargs",
"=",
"None",
")",
":",
"# pylint: disable=unused-argument",
"if",
"kwargs",
"is",
"None",
":",
"kwargs",
"=",
"{",
"}",
"if",
"call",
"==",
"'function'",
":",
"raise",
"SaltCloudSystem... | Destroy a VM.
CLI Examples:
.. code-block:: bash
salt-cloud -d myminion
salt-cloud -a destroy myminion service_name=myservice | [
"Destroy",
"a",
"VM",
"."
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/azurearm.py#L1495-L1651 | train |
saltstack/salt | salt/cloud/clouds/azurearm.py | list_storage_accounts | def list_storage_accounts(call=None):
'''
List storage accounts within the subscription.
'''
if call == 'action':
raise SaltCloudSystemExit(
'The list_storage_accounts function must be called with '
'-f or --function'
)
storconn = get_conn(client_type='storage')
ret = {}
try:
accounts_query = storconn.storage_accounts.list()
accounts = __utils__['azurearm.paged_object_to_list'](accounts_query)
for account in accounts:
ret[account['name']] = account
except CloudError as exc:
__utils__['azurearm.log_cloud_error']('storage', exc.message)
ret = {'Error': exc.message}
return ret | python | def list_storage_accounts(call=None):
'''
List storage accounts within the subscription.
'''
if call == 'action':
raise SaltCloudSystemExit(
'The list_storage_accounts function must be called with '
'-f or --function'
)
storconn = get_conn(client_type='storage')
ret = {}
try:
accounts_query = storconn.storage_accounts.list()
accounts = __utils__['azurearm.paged_object_to_list'](accounts_query)
for account in accounts:
ret[account['name']] = account
except CloudError as exc:
__utils__['azurearm.log_cloud_error']('storage', exc.message)
ret = {'Error': exc.message}
return ret | [
"def",
"list_storage_accounts",
"(",
"call",
"=",
"None",
")",
":",
"if",
"call",
"==",
"'action'",
":",
"raise",
"SaltCloudSystemExit",
"(",
"'The list_storage_accounts function must be called with '",
"'-f or --function'",
")",
"storconn",
"=",
"get_conn",
"(",
"clien... | List storage accounts within the subscription. | [
"List",
"storage",
"accounts",
"within",
"the",
"subscription",
"."
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/azurearm.py#L1654-L1676 | train |
saltstack/salt | salt/cloud/clouds/azurearm.py | _get_cloud_environment | def _get_cloud_environment():
'''
Get the cloud environment object.
'''
cloud_environment = config.get_cloud_config_value(
'cloud_environment',
get_configured_provider(), __opts__, search_global=False
)
try:
cloud_env_module = importlib.import_module('msrestazure.azure_cloud')
cloud_env = getattr(cloud_env_module, cloud_environment or 'AZURE_PUBLIC_CLOUD')
except (AttributeError, ImportError):
raise SaltCloudSystemExit(
'The azure {0} cloud environment is not available.'.format(cloud_environment)
)
return cloud_env | python | def _get_cloud_environment():
'''
Get the cloud environment object.
'''
cloud_environment = config.get_cloud_config_value(
'cloud_environment',
get_configured_provider(), __opts__, search_global=False
)
try:
cloud_env_module = importlib.import_module('msrestazure.azure_cloud')
cloud_env = getattr(cloud_env_module, cloud_environment or 'AZURE_PUBLIC_CLOUD')
except (AttributeError, ImportError):
raise SaltCloudSystemExit(
'The azure {0} cloud environment is not available.'.format(cloud_environment)
)
return cloud_env | [
"def",
"_get_cloud_environment",
"(",
")",
":",
"cloud_environment",
"=",
"config",
".",
"get_cloud_config_value",
"(",
"'cloud_environment'",
",",
"get_configured_provider",
"(",
")",
",",
"__opts__",
",",
"search_global",
"=",
"False",
")",
"try",
":",
"cloud_env_... | Get the cloud environment object. | [
"Get",
"the",
"cloud",
"environment",
"object",
"."
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/azurearm.py#L1679-L1695 | train |
saltstack/salt | salt/cloud/clouds/azurearm.py | _get_block_blob_service | def _get_block_blob_service(kwargs=None):
'''
Get the block blob storage service.
'''
resource_group = kwargs.get('resource_group') or config.get_cloud_config_value(
'resource_group',
get_configured_provider(), __opts__, search_global=False
)
sas_token = kwargs.get('sas_token') or config.get_cloud_config_value(
'sas_token',
get_configured_provider(), __opts__, search_global=False
)
storage_account = kwargs.get('storage_account') or config.get_cloud_config_value(
'storage_account',
get_configured_provider(), __opts__, search_global=False
)
storage_key = kwargs.get('storage_key') or config.get_cloud_config_value(
'storage_key',
get_configured_provider(), __opts__, search_global=False
)
if not resource_group:
raise SaltCloudSystemExit(
'A resource group must be specified'
)
if not storage_account:
raise SaltCloudSystemExit(
'A storage account must be specified'
)
if not storage_key:
storconn = get_conn(client_type='storage')
storage_keys = storconn.storage_accounts.list_keys(resource_group, storage_account)
storage_keys = {v.key_name: v.value for v in storage_keys.keys}
storage_key = next(six.itervalues(storage_keys))
cloud_env = _get_cloud_environment()
endpoint_suffix = cloud_env.suffixes.storage_endpoint
return BlockBlobService(storage_account, storage_key,
sas_token=sas_token,
endpoint_suffix=endpoint_suffix) | python | def _get_block_blob_service(kwargs=None):
'''
Get the block blob storage service.
'''
resource_group = kwargs.get('resource_group') or config.get_cloud_config_value(
'resource_group',
get_configured_provider(), __opts__, search_global=False
)
sas_token = kwargs.get('sas_token') or config.get_cloud_config_value(
'sas_token',
get_configured_provider(), __opts__, search_global=False
)
storage_account = kwargs.get('storage_account') or config.get_cloud_config_value(
'storage_account',
get_configured_provider(), __opts__, search_global=False
)
storage_key = kwargs.get('storage_key') or config.get_cloud_config_value(
'storage_key',
get_configured_provider(), __opts__, search_global=False
)
if not resource_group:
raise SaltCloudSystemExit(
'A resource group must be specified'
)
if not storage_account:
raise SaltCloudSystemExit(
'A storage account must be specified'
)
if not storage_key:
storconn = get_conn(client_type='storage')
storage_keys = storconn.storage_accounts.list_keys(resource_group, storage_account)
storage_keys = {v.key_name: v.value for v in storage_keys.keys}
storage_key = next(six.itervalues(storage_keys))
cloud_env = _get_cloud_environment()
endpoint_suffix = cloud_env.suffixes.storage_endpoint
return BlockBlobService(storage_account, storage_key,
sas_token=sas_token,
endpoint_suffix=endpoint_suffix) | [
"def",
"_get_block_blob_service",
"(",
"kwargs",
"=",
"None",
")",
":",
"resource_group",
"=",
"kwargs",
".",
"get",
"(",
"'resource_group'",
")",
"or",
"config",
".",
"get_cloud_config_value",
"(",
"'resource_group'",
",",
"get_configured_provider",
"(",
")",
","... | Get the block blob storage service. | [
"Get",
"the",
"block",
"blob",
"storage",
"service",
"."
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/azurearm.py#L1698-L1741 | train |
saltstack/salt | salt/cloud/clouds/azurearm.py | list_blobs | def list_blobs(call=None, kwargs=None): # pylint: disable=unused-argument
'''
List blobs.
'''
if kwargs is None:
kwargs = {}
if 'container' not in kwargs:
raise SaltCloudSystemExit(
'A container must be specified'
)
storageservice = _get_block_blob_service(kwargs)
ret = {}
try:
for blob in storageservice.list_blobs(kwargs['container']).items:
ret[blob.name] = {
'blob_type': blob.properties.blob_type,
'last_modified': blob.properties.last_modified.isoformat(),
'server_encrypted': blob.properties.server_encrypted,
}
except Exception as exc:
log.warning(six.text_type(exc))
return ret | python | def list_blobs(call=None, kwargs=None): # pylint: disable=unused-argument
'''
List blobs.
'''
if kwargs is None:
kwargs = {}
if 'container' not in kwargs:
raise SaltCloudSystemExit(
'A container must be specified'
)
storageservice = _get_block_blob_service(kwargs)
ret = {}
try:
for blob in storageservice.list_blobs(kwargs['container']).items:
ret[blob.name] = {
'blob_type': blob.properties.blob_type,
'last_modified': blob.properties.last_modified.isoformat(),
'server_encrypted': blob.properties.server_encrypted,
}
except Exception as exc:
log.warning(six.text_type(exc))
return ret | [
"def",
"list_blobs",
"(",
"call",
"=",
"None",
",",
"kwargs",
"=",
"None",
")",
":",
"# pylint: disable=unused-argument",
"if",
"kwargs",
"is",
"None",
":",
"kwargs",
"=",
"{",
"}",
"if",
"'container'",
"not",
"in",
"kwargs",
":",
"raise",
"SaltCloudSystemEx... | List blobs. | [
"List",
"blobs",
"."
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/azurearm.py#L1744-L1769 | train |
saltstack/salt | salt/cloud/clouds/azurearm.py | delete_blob | def delete_blob(call=None, kwargs=None): # pylint: disable=unused-argument
'''
Delete a blob from a container.
'''
if kwargs is None:
kwargs = {}
if 'container' not in kwargs:
raise SaltCloudSystemExit(
'A container must be specified'
)
if 'blob' not in kwargs:
raise SaltCloudSystemExit(
'A blob must be specified'
)
storageservice = _get_block_blob_service(kwargs)
storageservice.delete_blob(kwargs['container'], kwargs['blob'])
return True | python | def delete_blob(call=None, kwargs=None): # pylint: disable=unused-argument
'''
Delete a blob from a container.
'''
if kwargs is None:
kwargs = {}
if 'container' not in kwargs:
raise SaltCloudSystemExit(
'A container must be specified'
)
if 'blob' not in kwargs:
raise SaltCloudSystemExit(
'A blob must be specified'
)
storageservice = _get_block_blob_service(kwargs)
storageservice.delete_blob(kwargs['container'], kwargs['blob'])
return True | [
"def",
"delete_blob",
"(",
"call",
"=",
"None",
",",
"kwargs",
"=",
"None",
")",
":",
"# pylint: disable=unused-argument",
"if",
"kwargs",
"is",
"None",
":",
"kwargs",
"=",
"{",
"}",
"if",
"'container'",
"not",
"in",
"kwargs",
":",
"raise",
"SaltCloudSystemE... | Delete a blob from a container. | [
"Delete",
"a",
"blob",
"from",
"a",
"container",
"."
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/azurearm.py#L1772-L1792 | train |
saltstack/salt | salt/cloud/clouds/azurearm.py | delete_managed_disk | def delete_managed_disk(call=None, kwargs=None): # pylint: disable=unused-argument
'''
Delete a managed disk from a resource group.
'''
compconn = get_conn(client_type='compute')
try:
compconn.disks.delete(kwargs['resource_group'], kwargs['blob'])
except Exception as exc:
log.error('Error deleting managed disk %s - %s', kwargs.get('blob'), six.text_type(exc))
return False
return True | python | def delete_managed_disk(call=None, kwargs=None): # pylint: disable=unused-argument
'''
Delete a managed disk from a resource group.
'''
compconn = get_conn(client_type='compute')
try:
compconn.disks.delete(kwargs['resource_group'], kwargs['blob'])
except Exception as exc:
log.error('Error deleting managed disk %s - %s', kwargs.get('blob'), six.text_type(exc))
return False
return True | [
"def",
"delete_managed_disk",
"(",
"call",
"=",
"None",
",",
"kwargs",
"=",
"None",
")",
":",
"# pylint: disable=unused-argument",
"compconn",
"=",
"get_conn",
"(",
"client_type",
"=",
"'compute'",
")",
"try",
":",
"compconn",
".",
"disks",
".",
"delete",
"(",... | Delete a managed disk from a resource group. | [
"Delete",
"a",
"managed",
"disk",
"from",
"a",
"resource",
"group",
"."
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/azurearm.py#L1795-L1808 | train |
saltstack/salt | salt/cloud/clouds/azurearm.py | list_virtual_networks | def list_virtual_networks(call=None, kwargs=None):
'''
List virtual networks.
'''
if kwargs is None:
kwargs = {}
if call == 'action':
raise SaltCloudSystemExit(
'The avail_sizes function must be called with '
'-f or --function'
)
netconn = get_conn(client_type='network')
resource_groups = list_resource_groups()
ret = {}
for group in resource_groups:
try:
networks = netconn.virtual_networks.list(
resource_group_name=group
)
except CloudError:
networks = {}
for network_obj in networks:
network = network_obj.as_dict()
ret[network['name']] = network
ret[network['name']]['subnets'] = list_subnets(
kwargs={'resource_group': group, 'network': network['name']}
)
return ret | python | def list_virtual_networks(call=None, kwargs=None):
'''
List virtual networks.
'''
if kwargs is None:
kwargs = {}
if call == 'action':
raise SaltCloudSystemExit(
'The avail_sizes function must be called with '
'-f or --function'
)
netconn = get_conn(client_type='network')
resource_groups = list_resource_groups()
ret = {}
for group in resource_groups:
try:
networks = netconn.virtual_networks.list(
resource_group_name=group
)
except CloudError:
networks = {}
for network_obj in networks:
network = network_obj.as_dict()
ret[network['name']] = network
ret[network['name']]['subnets'] = list_subnets(
kwargs={'resource_group': group, 'network': network['name']}
)
return ret | [
"def",
"list_virtual_networks",
"(",
"call",
"=",
"None",
",",
"kwargs",
"=",
"None",
")",
":",
"if",
"kwargs",
"is",
"None",
":",
"kwargs",
"=",
"{",
"}",
"if",
"call",
"==",
"'action'",
":",
"raise",
"SaltCloudSystemExit",
"(",
"'The avail_sizes function m... | List virtual networks. | [
"List",
"virtual",
"networks",
"."
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/azurearm.py#L1811-L1842 | train |
saltstack/salt | salt/cloud/clouds/azurearm.py | list_subnets | def list_subnets(call=None, kwargs=None):
'''
List subnets in a virtual network.
'''
if kwargs is None:
kwargs = {}
if call == 'action':
raise SaltCloudSystemExit(
'The avail_sizes function must be called with '
'-f or --function'
)
netconn = get_conn(client_type='network')
resource_group = kwargs.get('resource_group') or config.get_cloud_config_value(
'resource_group',
get_configured_provider(), __opts__, search_global=False
)
if not resource_group and 'group' in kwargs and 'resource_group' not in kwargs:
resource_group = kwargs['group']
if not resource_group:
raise SaltCloudSystemExit(
'A resource group must be specified'
)
if kwargs.get('network') is None:
kwargs['network'] = config.get_cloud_config_value(
'network', get_configured_provider(), __opts__, search_global=False
)
if 'network' not in kwargs or kwargs['network'] is None:
raise SaltCloudSystemExit(
'A "network" must be specified'
)
ret = {}
subnets = netconn.subnets.list(resource_group, kwargs['network'])
for subnet in subnets:
ret[subnet.name] = subnet.as_dict()
ret[subnet.name]['ip_configurations'] = {}
for ip_ in subnet.ip_configurations:
comps = ip_.id.split('/')
name = comps[-1]
ret[subnet.name]['ip_configurations'][name] = ip_.as_dict()
ret[subnet.name]['ip_configurations'][name]['subnet'] = subnet.name
ret[subnet.name]['resource_group'] = resource_group
return ret | python | def list_subnets(call=None, kwargs=None):
'''
List subnets in a virtual network.
'''
if kwargs is None:
kwargs = {}
if call == 'action':
raise SaltCloudSystemExit(
'The avail_sizes function must be called with '
'-f or --function'
)
netconn = get_conn(client_type='network')
resource_group = kwargs.get('resource_group') or config.get_cloud_config_value(
'resource_group',
get_configured_provider(), __opts__, search_global=False
)
if not resource_group and 'group' in kwargs and 'resource_group' not in kwargs:
resource_group = kwargs['group']
if not resource_group:
raise SaltCloudSystemExit(
'A resource group must be specified'
)
if kwargs.get('network') is None:
kwargs['network'] = config.get_cloud_config_value(
'network', get_configured_provider(), __opts__, search_global=False
)
if 'network' not in kwargs or kwargs['network'] is None:
raise SaltCloudSystemExit(
'A "network" must be specified'
)
ret = {}
subnets = netconn.subnets.list(resource_group, kwargs['network'])
for subnet in subnets:
ret[subnet.name] = subnet.as_dict()
ret[subnet.name]['ip_configurations'] = {}
for ip_ in subnet.ip_configurations:
comps = ip_.id.split('/')
name = comps[-1]
ret[subnet.name]['ip_configurations'][name] = ip_.as_dict()
ret[subnet.name]['ip_configurations'][name]['subnet'] = subnet.name
ret[subnet.name]['resource_group'] = resource_group
return ret | [
"def",
"list_subnets",
"(",
"call",
"=",
"None",
",",
"kwargs",
"=",
"None",
")",
":",
"if",
"kwargs",
"is",
"None",
":",
"kwargs",
"=",
"{",
"}",
"if",
"call",
"==",
"'action'",
":",
"raise",
"SaltCloudSystemExit",
"(",
"'The avail_sizes function must be ca... | List subnets in a virtual network. | [
"List",
"subnets",
"in",
"a",
"virtual",
"network",
"."
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/azurearm.py#L1845-L1894 | train |
saltstack/salt | salt/cloud/clouds/azurearm.py | create_or_update_vmextension | def create_or_update_vmextension(call=None, kwargs=None): # pylint: disable=unused-argument
'''
.. versionadded:: 2019.2.0
Create or update a VM extension object "inside" of a VM object.
required kwargs:
.. code-block:: yaml
extension_name: myvmextension
virtual_machine_name: myvm
settings: {"commandToExecute": "hostname"}
optional kwargs:
.. code-block:: yaml
resource_group: < inferred from cloud configs >
location: < inferred from cloud configs >
publisher: < default: Microsoft.Azure.Extensions >
virtual_machine_extension_type: < default: CustomScript >
type_handler_version: < default: 2.0 >
auto_upgrade_minor_version: < default: True >
protected_settings: < default: None >
'''
if kwargs is None:
kwargs = {}
if 'extension_name' not in kwargs:
raise SaltCloudSystemExit(
'An extension name must be specified'
)
if 'virtual_machine_name' not in kwargs:
raise SaltCloudSystemExit(
'A virtual machine name must be specified'
)
compconn = get_conn(client_type='compute')
# pylint: disable=invalid-name
VirtualMachineExtension = getattr(
compute_models, 'VirtualMachineExtension'
)
resource_group = kwargs.get('resource_group') or config.get_cloud_config_value(
'resource_group',
get_configured_provider(), __opts__, search_global=False
)
if not resource_group:
raise SaltCloudSystemExit(
'A resource group must be specified'
)
location = kwargs.get('location') or get_location()
if not location:
raise SaltCloudSystemExit(
'A location must be specified'
)
publisher = kwargs.get('publisher', 'Microsoft.Azure.Extensions')
virtual_machine_extension_type = kwargs.get('virtual_machine_extension_type', 'CustomScript')
type_handler_version = kwargs.get('type_handler_version', '2.0')
auto_upgrade_minor_version = kwargs.get('auto_upgrade_minor_version', True)
settings = kwargs.get('settings', {})
protected_settings = kwargs.get('protected_settings')
if not isinstance(settings, dict):
raise SaltCloudSystemExit(
'VM extension settings are not valid'
)
elif 'commandToExecute' not in settings and 'script' not in settings:
raise SaltCloudSystemExit(
'VM extension settings are not valid. Either commandToExecute or script must be specified.'
)
log.info('Creating VM extension %s', kwargs['extension_name'])
ret = {}
try:
params = VirtualMachineExtension(
location=location,
publisher=publisher,
virtual_machine_extension_type=virtual_machine_extension_type,
type_handler_version=type_handler_version,
auto_upgrade_minor_version=auto_upgrade_minor_version,
settings=settings,
protected_settings=protected_settings
)
poller = compconn.virtual_machine_extensions.create_or_update(
resource_group,
kwargs['virtual_machine_name'],
kwargs['extension_name'],
params
)
ret = poller.result()
ret = ret.as_dict()
except CloudError as exc:
__utils__['azurearm.log_cloud_error']('compute', 'Error attempting to create the VM extension: {0}'.format(exc.message))
ret = {'error': exc.message}
return ret | python | def create_or_update_vmextension(call=None, kwargs=None): # pylint: disable=unused-argument
'''
.. versionadded:: 2019.2.0
Create or update a VM extension object "inside" of a VM object.
required kwargs:
.. code-block:: yaml
extension_name: myvmextension
virtual_machine_name: myvm
settings: {"commandToExecute": "hostname"}
optional kwargs:
.. code-block:: yaml
resource_group: < inferred from cloud configs >
location: < inferred from cloud configs >
publisher: < default: Microsoft.Azure.Extensions >
virtual_machine_extension_type: < default: CustomScript >
type_handler_version: < default: 2.0 >
auto_upgrade_minor_version: < default: True >
protected_settings: < default: None >
'''
if kwargs is None:
kwargs = {}
if 'extension_name' not in kwargs:
raise SaltCloudSystemExit(
'An extension name must be specified'
)
if 'virtual_machine_name' not in kwargs:
raise SaltCloudSystemExit(
'A virtual machine name must be specified'
)
compconn = get_conn(client_type='compute')
# pylint: disable=invalid-name
VirtualMachineExtension = getattr(
compute_models, 'VirtualMachineExtension'
)
resource_group = kwargs.get('resource_group') or config.get_cloud_config_value(
'resource_group',
get_configured_provider(), __opts__, search_global=False
)
if not resource_group:
raise SaltCloudSystemExit(
'A resource group must be specified'
)
location = kwargs.get('location') or get_location()
if not location:
raise SaltCloudSystemExit(
'A location must be specified'
)
publisher = kwargs.get('publisher', 'Microsoft.Azure.Extensions')
virtual_machine_extension_type = kwargs.get('virtual_machine_extension_type', 'CustomScript')
type_handler_version = kwargs.get('type_handler_version', '2.0')
auto_upgrade_minor_version = kwargs.get('auto_upgrade_minor_version', True)
settings = kwargs.get('settings', {})
protected_settings = kwargs.get('protected_settings')
if not isinstance(settings, dict):
raise SaltCloudSystemExit(
'VM extension settings are not valid'
)
elif 'commandToExecute' not in settings and 'script' not in settings:
raise SaltCloudSystemExit(
'VM extension settings are not valid. Either commandToExecute or script must be specified.'
)
log.info('Creating VM extension %s', kwargs['extension_name'])
ret = {}
try:
params = VirtualMachineExtension(
location=location,
publisher=publisher,
virtual_machine_extension_type=virtual_machine_extension_type,
type_handler_version=type_handler_version,
auto_upgrade_minor_version=auto_upgrade_minor_version,
settings=settings,
protected_settings=protected_settings
)
poller = compconn.virtual_machine_extensions.create_or_update(
resource_group,
kwargs['virtual_machine_name'],
kwargs['extension_name'],
params
)
ret = poller.result()
ret = ret.as_dict()
except CloudError as exc:
__utils__['azurearm.log_cloud_error']('compute', 'Error attempting to create the VM extension: {0}'.format(exc.message))
ret = {'error': exc.message}
return ret | [
"def",
"create_or_update_vmextension",
"(",
"call",
"=",
"None",
",",
"kwargs",
"=",
"None",
")",
":",
"# pylint: disable=unused-argument",
"if",
"kwargs",
"is",
"None",
":",
"kwargs",
"=",
"{",
"}",
"if",
"'extension_name'",
"not",
"in",
"kwargs",
":",
"raise... | .. versionadded:: 2019.2.0
Create or update a VM extension object "inside" of a VM object.
required kwargs:
.. code-block:: yaml
extension_name: myvmextension
virtual_machine_name: myvm
settings: {"commandToExecute": "hostname"}
optional kwargs:
.. code-block:: yaml
resource_group: < inferred from cloud configs >
location: < inferred from cloud configs >
publisher: < default: Microsoft.Azure.Extensions >
virtual_machine_extension_type: < default: CustomScript >
type_handler_version: < default: 2.0 >
auto_upgrade_minor_version: < default: True >
protected_settings: < default: None > | [
"..",
"versionadded",
"::",
"2019",
".",
"2",
".",
"0"
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/azurearm.py#L1897-L2000 | train |
saltstack/salt | salt/cloud/clouds/azurearm.py | stop | def stop(name, call=None):
'''
.. versionadded:: 2019.2.0
Stop (deallocate) a VM
CLI Examples:
.. code-block:: bash
salt-cloud -a stop myminion
'''
if call == 'function':
raise SaltCloudSystemExit(
'The stop action must be called with -a or --action.'
)
compconn = get_conn(client_type='compute')
resource_group = config.get_cloud_config_value(
'resource_group',
get_configured_provider(), __opts__, search_global=False
)
ret = {}
if not resource_group:
groups = list_resource_groups()
for group in groups:
try:
instance = compconn.virtual_machines.deallocate(
vm_name=name,
resource_group_name=group
)
instance.wait()
vm_result = instance.result()
ret = vm_result.as_dict()
break
except CloudError as exc:
if 'was not found' in exc.message:
continue
else:
ret = {'error': exc.message}
if not ret:
__utils__['azurearm.log_cloud_error']('compute', 'Unable to find virtual machine with name: {0}'.format(name))
ret = {'error': 'Unable to find virtual machine with name: {0}'.format(name)}
else:
try:
instance = compconn.virtual_machines.deallocate(
vm_name=name,
resource_group_name=resource_group
)
instance.wait()
vm_result = instance.result()
ret = vm_result.as_dict()
except CloudError as exc:
__utils__['azurearm.log_cloud_error']('compute', 'Error attempting to stop {0}: {1}'.format(name, exc.message))
ret = {'error': exc.message}
return ret | python | def stop(name, call=None):
'''
.. versionadded:: 2019.2.0
Stop (deallocate) a VM
CLI Examples:
.. code-block:: bash
salt-cloud -a stop myminion
'''
if call == 'function':
raise SaltCloudSystemExit(
'The stop action must be called with -a or --action.'
)
compconn = get_conn(client_type='compute')
resource_group = config.get_cloud_config_value(
'resource_group',
get_configured_provider(), __opts__, search_global=False
)
ret = {}
if not resource_group:
groups = list_resource_groups()
for group in groups:
try:
instance = compconn.virtual_machines.deallocate(
vm_name=name,
resource_group_name=group
)
instance.wait()
vm_result = instance.result()
ret = vm_result.as_dict()
break
except CloudError as exc:
if 'was not found' in exc.message:
continue
else:
ret = {'error': exc.message}
if not ret:
__utils__['azurearm.log_cloud_error']('compute', 'Unable to find virtual machine with name: {0}'.format(name))
ret = {'error': 'Unable to find virtual machine with name: {0}'.format(name)}
else:
try:
instance = compconn.virtual_machines.deallocate(
vm_name=name,
resource_group_name=resource_group
)
instance.wait()
vm_result = instance.result()
ret = vm_result.as_dict()
except CloudError as exc:
__utils__['azurearm.log_cloud_error']('compute', 'Error attempting to stop {0}: {1}'.format(name, exc.message))
ret = {'error': exc.message}
return ret | [
"def",
"stop",
"(",
"name",
",",
"call",
"=",
"None",
")",
":",
"if",
"call",
"==",
"'function'",
":",
"raise",
"SaltCloudSystemExit",
"(",
"'The stop action must be called with -a or --action.'",
")",
"compconn",
"=",
"get_conn",
"(",
"client_type",
"=",
"'comput... | .. versionadded:: 2019.2.0
Stop (deallocate) a VM
CLI Examples:
.. code-block:: bash
salt-cloud -a stop myminion | [
"..",
"versionadded",
"::",
"2019",
".",
"2",
".",
"0"
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/azurearm.py#L2003-L2061 | train |
saltstack/salt | salt/states/powerpath.py | license_present | def license_present(name):
'''
Ensures that the specified PowerPath license key is present
on the host.
name
The license key to ensure is present
'''
ret = {'name': name,
'changes': {},
'result': False,
'comment': ''}
if not __salt__['powerpath.has_powerpath']():
ret['result'] = False
ret['comment'] = 'PowerPath is not installed.'
return ret
licenses = [l['key'] for l in __salt__['powerpath.list_licenses']()]
if name in licenses:
ret['result'] = True
ret['comment'] = 'License key {0} already present'.format(name)
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'License key {0} is set to be added'.format(name)
return ret
data = __salt__['powerpath.add_license'](name)
if data['result']:
ret['changes'] = {name: 'added'}
ret['result'] = True
ret['comment'] = data['output']
return ret
else:
ret['result'] = False
ret['comment'] = data['output']
return ret | python | def license_present(name):
'''
Ensures that the specified PowerPath license key is present
on the host.
name
The license key to ensure is present
'''
ret = {'name': name,
'changes': {},
'result': False,
'comment': ''}
if not __salt__['powerpath.has_powerpath']():
ret['result'] = False
ret['comment'] = 'PowerPath is not installed.'
return ret
licenses = [l['key'] for l in __salt__['powerpath.list_licenses']()]
if name in licenses:
ret['result'] = True
ret['comment'] = 'License key {0} already present'.format(name)
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'License key {0} is set to be added'.format(name)
return ret
data = __salt__['powerpath.add_license'](name)
if data['result']:
ret['changes'] = {name: 'added'}
ret['result'] = True
ret['comment'] = data['output']
return ret
else:
ret['result'] = False
ret['comment'] = data['output']
return ret | [
"def",
"license_present",
"(",
"name",
")",
":",
"ret",
"=",
"{",
"'name'",
":",
"name",
",",
"'changes'",
":",
"{",
"}",
",",
"'result'",
":",
"False",
",",
"'comment'",
":",
"''",
"}",
"if",
"not",
"__salt__",
"[",
"'powerpath.has_powerpath'",
"]",
"... | Ensures that the specified PowerPath license key is present
on the host.
name
The license key to ensure is present | [
"Ensures",
"that",
"the",
"specified",
"PowerPath",
"license",
"key",
"is",
"present",
"on",
"the",
"host",
"."
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/powerpath.py#L19-L58 | train |
saltstack/salt | salt/modules/rpmbuild_pkgbuild.py | _create_rpmmacros | def _create_rpmmacros(runas='root'):
'''
Create the .rpmmacros file in user's home directory
'''
home = os.path.expanduser('~')
rpmbuilddir = os.path.join(home, 'rpmbuild')
if not os.path.isdir(rpmbuilddir):
__salt__['file.makedirs_perms'](name=rpmbuilddir, user=runas, group='mock')
mockdir = os.path.join(home, 'mock')
if not os.path.isdir(mockdir):
__salt__['file.makedirs_perms'](name=mockdir, user=runas, group='mock')
rpmmacros = os.path.join(home, '.rpmmacros')
with salt.utils.files.fopen(rpmmacros, 'w') as afile:
afile.write(
salt.utils.stringutils.to_str('%_topdir {0}\n'.format(rpmbuilddir))
)
afile.write('%signature gpg\n')
afile.write('%_source_filedigest_algorithm 8\n')
afile.write('%_binary_filedigest_algorithm 8\n')
afile.write('%_gpg_name packaging@saltstack.com\n') | python | def _create_rpmmacros(runas='root'):
'''
Create the .rpmmacros file in user's home directory
'''
home = os.path.expanduser('~')
rpmbuilddir = os.path.join(home, 'rpmbuild')
if not os.path.isdir(rpmbuilddir):
__salt__['file.makedirs_perms'](name=rpmbuilddir, user=runas, group='mock')
mockdir = os.path.join(home, 'mock')
if not os.path.isdir(mockdir):
__salt__['file.makedirs_perms'](name=mockdir, user=runas, group='mock')
rpmmacros = os.path.join(home, '.rpmmacros')
with salt.utils.files.fopen(rpmmacros, 'w') as afile:
afile.write(
salt.utils.stringutils.to_str('%_topdir {0}\n'.format(rpmbuilddir))
)
afile.write('%signature gpg\n')
afile.write('%_source_filedigest_algorithm 8\n')
afile.write('%_binary_filedigest_algorithm 8\n')
afile.write('%_gpg_name packaging@saltstack.com\n') | [
"def",
"_create_rpmmacros",
"(",
"runas",
"=",
"'root'",
")",
":",
"home",
"=",
"os",
".",
"path",
".",
"expanduser",
"(",
"'~'",
")",
"rpmbuilddir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"home",
",",
"'rpmbuild'",
")",
"if",
"not",
"os",
".",
... | Create the .rpmmacros file in user's home directory | [
"Create",
"the",
".",
"rpmmacros",
"file",
"in",
"user",
"s",
"home",
"directory"
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/rpmbuild_pkgbuild.py#L72-L93 | train |
saltstack/salt | salt/modules/rpmbuild_pkgbuild.py | _mk_tree | def _mk_tree(runas='root'):
'''
Create the rpm build tree
'''
basedir = tempfile.mkdtemp()
paths = ['BUILD', 'RPMS', 'SOURCES', 'SPECS', 'SRPMS']
for path in paths:
full = os.path.join(basedir, path)
__salt__['file.makedirs_perms'](name=full, user=runas, group='mock')
return basedir | python | def _mk_tree(runas='root'):
'''
Create the rpm build tree
'''
basedir = tempfile.mkdtemp()
paths = ['BUILD', 'RPMS', 'SOURCES', 'SPECS', 'SRPMS']
for path in paths:
full = os.path.join(basedir, path)
__salt__['file.makedirs_perms'](name=full, user=runas, group='mock')
return basedir | [
"def",
"_mk_tree",
"(",
"runas",
"=",
"'root'",
")",
":",
"basedir",
"=",
"tempfile",
".",
"mkdtemp",
"(",
")",
"paths",
"=",
"[",
"'BUILD'",
",",
"'RPMS'",
",",
"'SOURCES'",
",",
"'SPECS'",
",",
"'SRPMS'",
"]",
"for",
"path",
"in",
"paths",
":",
"fu... | Create the rpm build tree | [
"Create",
"the",
"rpm",
"build",
"tree"
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/rpmbuild_pkgbuild.py#L96-L105 | train |
saltstack/salt | salt/modules/rpmbuild_pkgbuild.py | _get_spec | def _get_spec(tree_base, spec, template, saltenv='base'):
'''
Get the spec file and place it in the SPECS dir
'''
spec_tgt = os.path.basename(spec)
dest = os.path.join(tree_base, 'SPECS', spec_tgt)
return __salt__['cp.get_url'](
spec,
dest,
saltenv=saltenv) | python | def _get_spec(tree_base, spec, template, saltenv='base'):
'''
Get the spec file and place it in the SPECS dir
'''
spec_tgt = os.path.basename(spec)
dest = os.path.join(tree_base, 'SPECS', spec_tgt)
return __salt__['cp.get_url'](
spec,
dest,
saltenv=saltenv) | [
"def",
"_get_spec",
"(",
"tree_base",
",",
"spec",
",",
"template",
",",
"saltenv",
"=",
"'base'",
")",
":",
"spec_tgt",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"spec",
")",
"dest",
"=",
"os",
".",
"path",
".",
"join",
"(",
"tree_base",
",",
... | Get the spec file and place it in the SPECS dir | [
"Get",
"the",
"spec",
"file",
"and",
"place",
"it",
"in",
"the",
"SPECS",
"dir"
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/rpmbuild_pkgbuild.py#L108-L117 | train |
saltstack/salt | salt/modules/rpmbuild_pkgbuild.py | _get_distset | def _get_distset(tgt):
'''
Get the distribution string for use with rpmbuild and mock
'''
# Centos adds 'centos' string to rpm names, removing that to have
# consistent naming on Centos and Redhat, and allow for Amazon naming
tgtattrs = tgt.split('-')
if tgtattrs[0] == 'amzn':
distset = '--define "dist .{0}1"'.format(tgtattrs[0])
elif tgtattrs[1] in ['6', '7']:
distset = '--define "dist .el{0}"'.format(tgtattrs[1])
else:
distset = ''
return distset | python | def _get_distset(tgt):
'''
Get the distribution string for use with rpmbuild and mock
'''
# Centos adds 'centos' string to rpm names, removing that to have
# consistent naming on Centos and Redhat, and allow for Amazon naming
tgtattrs = tgt.split('-')
if tgtattrs[0] == 'amzn':
distset = '--define "dist .{0}1"'.format(tgtattrs[0])
elif tgtattrs[1] in ['6', '7']:
distset = '--define "dist .el{0}"'.format(tgtattrs[1])
else:
distset = ''
return distset | [
"def",
"_get_distset",
"(",
"tgt",
")",
":",
"# Centos adds 'centos' string to rpm names, removing that to have",
"# consistent naming on Centos and Redhat, and allow for Amazon naming",
"tgtattrs",
"=",
"tgt",
".",
"split",
"(",
"'-'",
")",
"if",
"tgtattrs",
"[",
"0",
"]",
... | Get the distribution string for use with rpmbuild and mock | [
"Get",
"the",
"distribution",
"string",
"for",
"use",
"with",
"rpmbuild",
"and",
"mock"
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/rpmbuild_pkgbuild.py#L134-L148 | train |
saltstack/salt | salt/modules/rpmbuild_pkgbuild.py | _get_deps | def _get_deps(deps, tree_base, saltenv='base'):
'''
Get include string for list of dependent rpms to build package
'''
deps_list = ''
if deps is None:
return deps_list
if not isinstance(deps, list):
raise SaltInvocationError(
'\'deps\' must be a Python list or comma-separated string'
)
for deprpm in deps:
parsed = _urlparse(deprpm)
depbase = os.path.basename(deprpm)
dest = os.path.join(tree_base, depbase)
if parsed.scheme:
__salt__['cp.get_url'](deprpm, dest, saltenv=saltenv)
else:
shutil.copy(deprpm, dest)
deps_list += ' {0}'.format(dest)
return deps_list | python | def _get_deps(deps, tree_base, saltenv='base'):
'''
Get include string for list of dependent rpms to build package
'''
deps_list = ''
if deps is None:
return deps_list
if not isinstance(deps, list):
raise SaltInvocationError(
'\'deps\' must be a Python list or comma-separated string'
)
for deprpm in deps:
parsed = _urlparse(deprpm)
depbase = os.path.basename(deprpm)
dest = os.path.join(tree_base, depbase)
if parsed.scheme:
__salt__['cp.get_url'](deprpm, dest, saltenv=saltenv)
else:
shutil.copy(deprpm, dest)
deps_list += ' {0}'.format(dest)
return deps_list | [
"def",
"_get_deps",
"(",
"deps",
",",
"tree_base",
",",
"saltenv",
"=",
"'base'",
")",
":",
"deps_list",
"=",
"''",
"if",
"deps",
"is",
"None",
":",
"return",
"deps_list",
"if",
"not",
"isinstance",
"(",
"deps",
",",
"list",
")",
":",
"raise",
"SaltInv... | Get include string for list of dependent rpms to build package | [
"Get",
"include",
"string",
"for",
"list",
"of",
"dependent",
"rpms",
"to",
"build",
"package"
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/rpmbuild_pkgbuild.py#L151-L173 | train |
saltstack/salt | salt/modules/rpmbuild_pkgbuild.py | make_src_pkg | def make_src_pkg(dest_dir, spec, sources, env=None, template=None, saltenv='base', runas='root'):
'''
Create a source rpm from the given spec file and sources
CLI Example:
.. code-block:: bash
salt '*' pkgbuild.make_src_pkg /var/www/html/
https://raw.githubusercontent.com/saltstack/libnacl/master/pkg/rpm/python-libnacl.spec
https://pypi.python.org/packages/source/l/libnacl/libnacl-1.3.5.tar.gz
This example command should build the libnacl SOURCE package and place it in
/var/www/html/ on the minion
.. versionchanged:: 2017.7.0
dest_dir
The directory on the minion to place the built package(s)
spec
The location of the spec file (used for rpms)
sources
The list of package sources
env
A dictionary of environment variables to be set prior to execution.
template
Run the spec file through a templating engine
Optional arguement, allows for no templating engine used to be
if none is desired.
saltenv
The saltenv to use for files downloaded from the salt filesever
runas
The user to run the build process as
.. versionadded:: 2018.3.3
.. note::
using SHA256 as digest and minimum level dist el6
'''
_create_rpmmacros(runas)
tree_base = _mk_tree(runas)
spec_path = _get_spec(tree_base, spec, template, saltenv)
__salt__['file.chown'](path=spec_path, user=runas, group='mock')
__salt__['file.chown'](path=tree_base, user=runas, group='mock')
if isinstance(sources, six.string_types):
sources = sources.split(',')
for src in sources:
_get_src(tree_base, src, saltenv, runas)
# make source rpms for dist el6 with SHA256, usable with mock on other dists
cmd = 'rpmbuild --verbose --define "_topdir {0}" -bs --define "dist .el6" {1}'.format(tree_base, spec_path)
retrc = __salt__['cmd.retcode'](cmd, runas=runas)
if retrc != 0:
raise SaltInvocationError(
'Make source package for destination directory {0}, spec {1}, sources {2}, failed '
'with return error {3}, check logs for further details'.format(
dest_dir,
spec,
sources,
retrc)
)
srpms = os.path.join(tree_base, 'SRPMS')
ret = []
if not os.path.isdir(dest_dir):
__salt__['file.makedirs_perms'](name=dest_dir, user=runas, group='mock')
for fn_ in os.listdir(srpms):
full = os.path.join(srpms, fn_)
tgt = os.path.join(dest_dir, fn_)
shutil.copy(full, tgt)
ret.append(tgt)
return ret | python | def make_src_pkg(dest_dir, spec, sources, env=None, template=None, saltenv='base', runas='root'):
'''
Create a source rpm from the given spec file and sources
CLI Example:
.. code-block:: bash
salt '*' pkgbuild.make_src_pkg /var/www/html/
https://raw.githubusercontent.com/saltstack/libnacl/master/pkg/rpm/python-libnacl.spec
https://pypi.python.org/packages/source/l/libnacl/libnacl-1.3.5.tar.gz
This example command should build the libnacl SOURCE package and place it in
/var/www/html/ on the minion
.. versionchanged:: 2017.7.0
dest_dir
The directory on the minion to place the built package(s)
spec
The location of the spec file (used for rpms)
sources
The list of package sources
env
A dictionary of environment variables to be set prior to execution.
template
Run the spec file through a templating engine
Optional arguement, allows for no templating engine used to be
if none is desired.
saltenv
The saltenv to use for files downloaded from the salt filesever
runas
The user to run the build process as
.. versionadded:: 2018.3.3
.. note::
using SHA256 as digest and minimum level dist el6
'''
_create_rpmmacros(runas)
tree_base = _mk_tree(runas)
spec_path = _get_spec(tree_base, spec, template, saltenv)
__salt__['file.chown'](path=spec_path, user=runas, group='mock')
__salt__['file.chown'](path=tree_base, user=runas, group='mock')
if isinstance(sources, six.string_types):
sources = sources.split(',')
for src in sources:
_get_src(tree_base, src, saltenv, runas)
# make source rpms for dist el6 with SHA256, usable with mock on other dists
cmd = 'rpmbuild --verbose --define "_topdir {0}" -bs --define "dist .el6" {1}'.format(tree_base, spec_path)
retrc = __salt__['cmd.retcode'](cmd, runas=runas)
if retrc != 0:
raise SaltInvocationError(
'Make source package for destination directory {0}, spec {1}, sources {2}, failed '
'with return error {3}, check logs for further details'.format(
dest_dir,
spec,
sources,
retrc)
)
srpms = os.path.join(tree_base, 'SRPMS')
ret = []
if not os.path.isdir(dest_dir):
__salt__['file.makedirs_perms'](name=dest_dir, user=runas, group='mock')
for fn_ in os.listdir(srpms):
full = os.path.join(srpms, fn_)
tgt = os.path.join(dest_dir, fn_)
shutil.copy(full, tgt)
ret.append(tgt)
return ret | [
"def",
"make_src_pkg",
"(",
"dest_dir",
",",
"spec",
",",
"sources",
",",
"env",
"=",
"None",
",",
"template",
"=",
"None",
",",
"saltenv",
"=",
"'base'",
",",
"runas",
"=",
"'root'",
")",
":",
"_create_rpmmacros",
"(",
"runas",
")",
"tree_base",
"=",
... | Create a source rpm from the given spec file and sources
CLI Example:
.. code-block:: bash
salt '*' pkgbuild.make_src_pkg /var/www/html/
https://raw.githubusercontent.com/saltstack/libnacl/master/pkg/rpm/python-libnacl.spec
https://pypi.python.org/packages/source/l/libnacl/libnacl-1.3.5.tar.gz
This example command should build the libnacl SOURCE package and place it in
/var/www/html/ on the minion
.. versionchanged:: 2017.7.0
dest_dir
The directory on the minion to place the built package(s)
spec
The location of the spec file (used for rpms)
sources
The list of package sources
env
A dictionary of environment variables to be set prior to execution.
template
Run the spec file through a templating engine
Optional arguement, allows for no templating engine used to be
if none is desired.
saltenv
The saltenv to use for files downloaded from the salt filesever
runas
The user to run the build process as
.. versionadded:: 2018.3.3
.. note::
using SHA256 as digest and minimum level dist el6 | [
"Create",
"a",
"source",
"rpm",
"from",
"the",
"given",
"spec",
"file",
"and",
"sources"
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/rpmbuild_pkgbuild.py#L176-L257 | train |
saltstack/salt | salt/modules/rpmbuild_pkgbuild.py | build | def build(runas,
tgt,
dest_dir,
spec,
sources,
deps,
env,
template,
saltenv='base',
log_dir='/var/log/salt/pkgbuild'):
'''
Given the package destination directory, the spec file source and package
sources, use mock to safely build the rpm defined in the spec file
CLI Example:
.. code-block:: bash
salt '*' pkgbuild.build mock epel-7-x86_64 /var/www/html
https://raw.githubusercontent.com/saltstack/libnacl/master/pkg/rpm/python-libnacl.spec
https://pypi.python.org/packages/source/l/libnacl/libnacl-1.3.5.tar.gz
This example command should build the libnacl package for rhel 7 using user
mock and place it in /var/www/html/ on the minion
'''
ret = {}
try:
__salt__['file.chown'](path=dest_dir, user=runas, group='mock')
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
srpm_dir = os.path.join(dest_dir, 'SRPMS')
srpm_build_dir = tempfile.mkdtemp()
try:
srpms = make_src_pkg(srpm_build_dir, spec, sources,
env, template, saltenv, runas)
except Exception as exc:
shutil.rmtree(srpm_build_dir)
log.error('Failed to make src package')
return ret
distset = _get_distset(tgt)
noclean = ''
deps_dir = tempfile.mkdtemp()
deps_list = _get_deps(deps, deps_dir, saltenv)
retrc = 0
for srpm in srpms:
dbase = os.path.dirname(srpm)
results_dir = tempfile.mkdtemp()
try:
__salt__['file.chown'](path=dbase, user=runas, group='mock')
__salt__['file.chown'](path=results_dir, user=runas, group='mock')
cmd = 'mock --root={0} --resultdir={1} --init'.format(tgt, results_dir)
retrc |= __salt__['cmd.retcode'](cmd, runas=runas)
if deps_list and not deps_list.isspace():
cmd = 'mock --root={0} --resultdir={1} --install {2} {3}'.format(tgt, results_dir, deps_list, noclean)
retrc |= __salt__['cmd.retcode'](cmd, runas=runas)
noclean += ' --no-clean'
cmd = 'mock --root={0} --resultdir={1} {2} {3} {4}'.format(
tgt,
results_dir,
distset,
noclean,
srpm)
retrc |= __salt__['cmd.retcode'](cmd, runas=runas)
cmdlist = [
'rpm',
'-qp',
'--queryformat',
'{0}/%{{name}}/%{{version}}-%{{release}}'.format(log_dir),
srpm]
log_dest = __salt__['cmd.run_stdout'](cmdlist, python_shell=False)
for filename in os.listdir(results_dir):
full = os.path.join(results_dir, filename)
if filename.endswith('src.rpm'):
sdest = os.path.join(srpm_dir, filename)
try:
__salt__['file.makedirs_perms'](name=srpm_dir, user=runas, group='mock')
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
shutil.copy(full, sdest)
ret.setdefault('Source Packages', []).append(sdest)
elif filename.endswith('.rpm'):
bdist = os.path.join(dest_dir, filename)
shutil.copy(full, bdist)
ret.setdefault('Packages', []).append(bdist)
else:
log_file = os.path.join(log_dest, filename)
try:
__salt__['file.makedirs_perms'](name=log_dest, user=runas, group='mock')
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
shutil.copy(full, log_file)
ret.setdefault('Log Files', []).append(log_file)
except Exception as exc:
log.error('Error building from %s: %s', srpm, exc)
finally:
shutil.rmtree(results_dir)
if retrc != 0:
raise SaltInvocationError(
'Building packages for destination directory {0}, spec {1}, sources {2}, failed '
'with return error {3}, check logs for further details'.format(
dest_dir,
spec,
sources,
retrc)
)
shutil.rmtree(deps_dir)
shutil.rmtree(srpm_build_dir)
return ret | python | def build(runas,
tgt,
dest_dir,
spec,
sources,
deps,
env,
template,
saltenv='base',
log_dir='/var/log/salt/pkgbuild'):
'''
Given the package destination directory, the spec file source and package
sources, use mock to safely build the rpm defined in the spec file
CLI Example:
.. code-block:: bash
salt '*' pkgbuild.build mock epel-7-x86_64 /var/www/html
https://raw.githubusercontent.com/saltstack/libnacl/master/pkg/rpm/python-libnacl.spec
https://pypi.python.org/packages/source/l/libnacl/libnacl-1.3.5.tar.gz
This example command should build the libnacl package for rhel 7 using user
mock and place it in /var/www/html/ on the minion
'''
ret = {}
try:
__salt__['file.chown'](path=dest_dir, user=runas, group='mock')
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
srpm_dir = os.path.join(dest_dir, 'SRPMS')
srpm_build_dir = tempfile.mkdtemp()
try:
srpms = make_src_pkg(srpm_build_dir, spec, sources,
env, template, saltenv, runas)
except Exception as exc:
shutil.rmtree(srpm_build_dir)
log.error('Failed to make src package')
return ret
distset = _get_distset(tgt)
noclean = ''
deps_dir = tempfile.mkdtemp()
deps_list = _get_deps(deps, deps_dir, saltenv)
retrc = 0
for srpm in srpms:
dbase = os.path.dirname(srpm)
results_dir = tempfile.mkdtemp()
try:
__salt__['file.chown'](path=dbase, user=runas, group='mock')
__salt__['file.chown'](path=results_dir, user=runas, group='mock')
cmd = 'mock --root={0} --resultdir={1} --init'.format(tgt, results_dir)
retrc |= __salt__['cmd.retcode'](cmd, runas=runas)
if deps_list and not deps_list.isspace():
cmd = 'mock --root={0} --resultdir={1} --install {2} {3}'.format(tgt, results_dir, deps_list, noclean)
retrc |= __salt__['cmd.retcode'](cmd, runas=runas)
noclean += ' --no-clean'
cmd = 'mock --root={0} --resultdir={1} {2} {3} {4}'.format(
tgt,
results_dir,
distset,
noclean,
srpm)
retrc |= __salt__['cmd.retcode'](cmd, runas=runas)
cmdlist = [
'rpm',
'-qp',
'--queryformat',
'{0}/%{{name}}/%{{version}}-%{{release}}'.format(log_dir),
srpm]
log_dest = __salt__['cmd.run_stdout'](cmdlist, python_shell=False)
for filename in os.listdir(results_dir):
full = os.path.join(results_dir, filename)
if filename.endswith('src.rpm'):
sdest = os.path.join(srpm_dir, filename)
try:
__salt__['file.makedirs_perms'](name=srpm_dir, user=runas, group='mock')
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
shutil.copy(full, sdest)
ret.setdefault('Source Packages', []).append(sdest)
elif filename.endswith('.rpm'):
bdist = os.path.join(dest_dir, filename)
shutil.copy(full, bdist)
ret.setdefault('Packages', []).append(bdist)
else:
log_file = os.path.join(log_dest, filename)
try:
__salt__['file.makedirs_perms'](name=log_dest, user=runas, group='mock')
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
shutil.copy(full, log_file)
ret.setdefault('Log Files', []).append(log_file)
except Exception as exc:
log.error('Error building from %s: %s', srpm, exc)
finally:
shutil.rmtree(results_dir)
if retrc != 0:
raise SaltInvocationError(
'Building packages for destination directory {0}, spec {1}, sources {2}, failed '
'with return error {3}, check logs for further details'.format(
dest_dir,
spec,
sources,
retrc)
)
shutil.rmtree(deps_dir)
shutil.rmtree(srpm_build_dir)
return ret | [
"def",
"build",
"(",
"runas",
",",
"tgt",
",",
"dest_dir",
",",
"spec",
",",
"sources",
",",
"deps",
",",
"env",
",",
"template",
",",
"saltenv",
"=",
"'base'",
",",
"log_dir",
"=",
"'/var/log/salt/pkgbuild'",
")",
":",
"ret",
"=",
"{",
"}",
"try",
"... | Given the package destination directory, the spec file source and package
sources, use mock to safely build the rpm defined in the spec file
CLI Example:
.. code-block:: bash
salt '*' pkgbuild.build mock epel-7-x86_64 /var/www/html
https://raw.githubusercontent.com/saltstack/libnacl/master/pkg/rpm/python-libnacl.spec
https://pypi.python.org/packages/source/l/libnacl/libnacl-1.3.5.tar.gz
This example command should build the libnacl package for rhel 7 using user
mock and place it in /var/www/html/ on the minion | [
"Given",
"the",
"package",
"destination",
"directory",
"the",
"spec",
"file",
"source",
"and",
"package",
"sources",
"use",
"mock",
"to",
"safely",
"build",
"the",
"rpm",
"defined",
"in",
"the",
"spec",
"file"
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/rpmbuild_pkgbuild.py#L260-L374 | train |
saltstack/salt | salt/modules/rpmbuild_pkgbuild.py | make_repo | def make_repo(repodir,
keyid=None,
env=None,
use_passphrase=False,
gnupghome='/etc/salt/gpgkeys',
runas='root',
timeout=15.0):
'''
Make a package repository and optionally sign packages present
Given the repodir, create a ``yum`` repository out of the rpms therein
and optionally sign it and packages present, the name is directory to
turn into a repo. This state is best used with onchanges linked to
your package building states.
repodir
The directory to find packages that will be in the repository.
keyid
.. versionchanged:: 2016.3.0
Optional Key ID to use in signing packages and repository.
Utilizes Public and Private keys associated with keyid which have
been loaded into the minion's Pillar data.
For example, contents from a Pillar data file with named Public
and Private keys as follows:
.. code-block:: yaml
gpg_pkg_priv_key: |
-----BEGIN PGP PRIVATE KEY BLOCK-----
Version: GnuPG v1
lQO+BFciIfQBCADAPCtzx7I5Rl32escCMZsPzaEKWe7bIX1em4KCKkBoX47IG54b
w82PCE8Y1jF/9Uk2m3RKVWp3YcLlc7Ap3gj6VO4ysvVz28UbnhPxsIkOlf2cq8qc
.
.
Ebe+8JCQTwqSXPRTzXmy/b5WXDeM79CkLWvuGpXFor76D+ECMRPv/rawukEcNptn
R5OmgHqvydEnO4pWbn8JzQO9YX/Us0SMHBVzLC8eIi5ZIopzalvX
=JvW8
-----END PGP PRIVATE KEY BLOCK-----
gpg_pkg_priv_keyname: gpg_pkg_key.pem
gpg_pkg_pub_key: |
-----BEGIN PGP PUBLIC KEY BLOCK-----
Version: GnuPG v1
mQENBFciIfQBCADAPCtzx7I5Rl32escCMZsPzaEKWe7bIX1em4KCKkBoX47IG54b
w82PCE8Y1jF/9Uk2m3RKVWp3YcLlc7Ap3gj6VO4ysvVz28UbnhPxsIkOlf2cq8qc
.
.
bYP7t5iwJmQzRMyFInYRt77wkJBPCpJc9FPNebL9vlZcN4zv0KQta+4alcWivvoP
4QIxE+/+trC6QRw2m2dHk6aAeq/J0Sc7ilZufwnNA71hf9SzRIwcFXMsLx4iLlki
inNqW9c=
=s1CX
-----END PGP PUBLIC KEY BLOCK-----
gpg_pkg_pub_keyname: gpg_pkg_key.pub
env
.. versionchanged:: 2016.3.0
A dictionary of environment variables to be utilized in creating the
repository.
.. note::
This parameter is not used for making ``yum`` repositories.
use_passphrase : False
.. versionadded:: 2016.3.0
Use a passphrase with the signing key presented in ``keyid``.
Passphrase is received from Pillar data which could be passed on the
command line with ``pillar`` parameter. For example:
.. code-block:: bash
pillar='{ "gpg_passphrase" : "my_passphrase" }'
gnupghome : /etc/salt/gpgkeys
.. versionadded:: 2016.3.0
Location where GPG related files are stored, used with ``keyid``.
runas : root
.. versionadded:: 2016.3.0
User to create the repository as, and optionally sign packages.
.. note::
Ensure the user has correct permissions to any files and
directories which are to be utilized.
timeout : 15.0
.. versionadded:: 2016.3.4
Timeout in seconds to wait for the prompt for inputting the passphrase.
CLI Example:
.. code-block:: bash
salt '*' pkgbuild.make_repo /var/www/html/
'''
SIGN_PROMPT_RE = re.compile(r'Enter pass phrase: ', re.M)
define_gpg_name = ''
local_keyid = None
local_uids = None
phrase = ''
if keyid is not None:
# import_keys
pkg_pub_key_file = '{0}/{1}'.format(gnupghome, __salt__['pillar.get']('gpg_pkg_pub_keyname', None))
pkg_priv_key_file = '{0}/{1}'.format(gnupghome, __salt__['pillar.get']('gpg_pkg_priv_keyname', None))
if pkg_pub_key_file is None or pkg_priv_key_file is None:
raise SaltInvocationError(
'Pillar data should contain Public and Private keys associated with \'keyid\''
)
try:
__salt__['gpg.import_key'](user=runas, filename=pkg_pub_key_file, gnupghome=gnupghome)
__salt__['gpg.import_key'](user=runas, filename=pkg_priv_key_file, gnupghome=gnupghome)
except SaltInvocationError:
raise SaltInvocationError(
'Public and Private key files associated with Pillar data and \'keyid\' '
'{0} could not be found'
.format(keyid)
)
# gpg keys should have been loaded as part of setup
# retrieve specified key and preset passphrase
local_keys = __salt__['gpg.list_keys'](user=runas, gnupghome=gnupghome)
for gpg_key in local_keys:
if keyid == gpg_key['keyid'][8:]:
local_uids = gpg_key['uids']
local_keyid = gpg_key['keyid']
break
if local_keyid is None:
raise SaltInvocationError(
'The key ID \'{0}\' was not found in GnuPG keyring at \'{1}\''
.format(keyid, gnupghome)
)
if use_passphrase:
phrase = __salt__['pillar.get']('gpg_passphrase')
if local_uids:
define_gpg_name = '--define=\'%_signature gpg\' --define=\'%_gpg_name {0}\''.format(
local_uids[0]
)
# need to update rpm with public key
cmd = 'rpm --import {0}'.format(pkg_pub_key_file)
retrc = __salt__['cmd.retcode'](cmd, runas=runas, use_vt=True)
if retrc != 0:
raise SaltInvocationError(
'Failed to import public key from file {0} with return '
'error {1}, check logs for further details'.format(
pkg_pub_key_file,
retrc)
)
# sign_it_here
# interval of 0.125 is really too fast on some systems
interval = 0.5
for fileused in os.listdir(repodir):
if fileused.endswith('.rpm'):
abs_file = os.path.join(repodir, fileused)
number_retries = timeout / interval
times_looped = 0
error_msg = 'Failed to sign file {0}'.format(abs_file)
cmd = 'rpm {0} --addsign {1}'.format(define_gpg_name, abs_file)
preexec_fn = functools.partial(salt.utils.user.chugid_and_umask, runas, None)
try:
stdout, stderr = None, None
proc = salt.utils.vt.Terminal(
cmd,
shell=True,
preexec_fn=preexec_fn,
stream_stdout=True,
stream_stderr=True
)
while proc.has_unread_data:
stdout, stderr = proc.recv()
if stdout and SIGN_PROMPT_RE.search(stdout):
# have the prompt for inputting the passphrase
proc.sendline(phrase)
else:
times_looped += 1
if times_looped > number_retries:
raise SaltInvocationError(
'Attemping to sign file {0} failed, timed out after {1} seconds'
.format(abs_file, int(times_looped * interval))
)
time.sleep(interval)
proc_exitstatus = proc.exitstatus
if proc_exitstatus != 0:
raise SaltInvocationError(
'Signing file {0} failed with proc.status {1}'
.format(abs_file, proc_exitstatus)
)
except salt.utils.vt.TerminalException as err:
trace = traceback.format_exc()
log.error(error_msg, err, trace)
finally:
proc.close(terminate=True, kill=True)
cmd = 'createrepo --update {0}'.format(repodir)
return __salt__['cmd.run_all'](cmd, runas=runas) | python | def make_repo(repodir,
keyid=None,
env=None,
use_passphrase=False,
gnupghome='/etc/salt/gpgkeys',
runas='root',
timeout=15.0):
'''
Make a package repository and optionally sign packages present
Given the repodir, create a ``yum`` repository out of the rpms therein
and optionally sign it and packages present, the name is directory to
turn into a repo. This state is best used with onchanges linked to
your package building states.
repodir
The directory to find packages that will be in the repository.
keyid
.. versionchanged:: 2016.3.0
Optional Key ID to use in signing packages and repository.
Utilizes Public and Private keys associated with keyid which have
been loaded into the minion's Pillar data.
For example, contents from a Pillar data file with named Public
and Private keys as follows:
.. code-block:: yaml
gpg_pkg_priv_key: |
-----BEGIN PGP PRIVATE KEY BLOCK-----
Version: GnuPG v1
lQO+BFciIfQBCADAPCtzx7I5Rl32escCMZsPzaEKWe7bIX1em4KCKkBoX47IG54b
w82PCE8Y1jF/9Uk2m3RKVWp3YcLlc7Ap3gj6VO4ysvVz28UbnhPxsIkOlf2cq8qc
.
.
Ebe+8JCQTwqSXPRTzXmy/b5WXDeM79CkLWvuGpXFor76D+ECMRPv/rawukEcNptn
R5OmgHqvydEnO4pWbn8JzQO9YX/Us0SMHBVzLC8eIi5ZIopzalvX
=JvW8
-----END PGP PRIVATE KEY BLOCK-----
gpg_pkg_priv_keyname: gpg_pkg_key.pem
gpg_pkg_pub_key: |
-----BEGIN PGP PUBLIC KEY BLOCK-----
Version: GnuPG v1
mQENBFciIfQBCADAPCtzx7I5Rl32escCMZsPzaEKWe7bIX1em4KCKkBoX47IG54b
w82PCE8Y1jF/9Uk2m3RKVWp3YcLlc7Ap3gj6VO4ysvVz28UbnhPxsIkOlf2cq8qc
.
.
bYP7t5iwJmQzRMyFInYRt77wkJBPCpJc9FPNebL9vlZcN4zv0KQta+4alcWivvoP
4QIxE+/+trC6QRw2m2dHk6aAeq/J0Sc7ilZufwnNA71hf9SzRIwcFXMsLx4iLlki
inNqW9c=
=s1CX
-----END PGP PUBLIC KEY BLOCK-----
gpg_pkg_pub_keyname: gpg_pkg_key.pub
env
.. versionchanged:: 2016.3.0
A dictionary of environment variables to be utilized in creating the
repository.
.. note::
This parameter is not used for making ``yum`` repositories.
use_passphrase : False
.. versionadded:: 2016.3.0
Use a passphrase with the signing key presented in ``keyid``.
Passphrase is received from Pillar data which could be passed on the
command line with ``pillar`` parameter. For example:
.. code-block:: bash
pillar='{ "gpg_passphrase" : "my_passphrase" }'
gnupghome : /etc/salt/gpgkeys
.. versionadded:: 2016.3.0
Location where GPG related files are stored, used with ``keyid``.
runas : root
.. versionadded:: 2016.3.0
User to create the repository as, and optionally sign packages.
.. note::
Ensure the user has correct permissions to any files and
directories which are to be utilized.
timeout : 15.0
.. versionadded:: 2016.3.4
Timeout in seconds to wait for the prompt for inputting the passphrase.
CLI Example:
.. code-block:: bash
salt '*' pkgbuild.make_repo /var/www/html/
'''
SIGN_PROMPT_RE = re.compile(r'Enter pass phrase: ', re.M)
define_gpg_name = ''
local_keyid = None
local_uids = None
phrase = ''
if keyid is not None:
# import_keys
pkg_pub_key_file = '{0}/{1}'.format(gnupghome, __salt__['pillar.get']('gpg_pkg_pub_keyname', None))
pkg_priv_key_file = '{0}/{1}'.format(gnupghome, __salt__['pillar.get']('gpg_pkg_priv_keyname', None))
if pkg_pub_key_file is None or pkg_priv_key_file is None:
raise SaltInvocationError(
'Pillar data should contain Public and Private keys associated with \'keyid\''
)
try:
__salt__['gpg.import_key'](user=runas, filename=pkg_pub_key_file, gnupghome=gnupghome)
__salt__['gpg.import_key'](user=runas, filename=pkg_priv_key_file, gnupghome=gnupghome)
except SaltInvocationError:
raise SaltInvocationError(
'Public and Private key files associated with Pillar data and \'keyid\' '
'{0} could not be found'
.format(keyid)
)
# gpg keys should have been loaded as part of setup
# retrieve specified key and preset passphrase
local_keys = __salt__['gpg.list_keys'](user=runas, gnupghome=gnupghome)
for gpg_key in local_keys:
if keyid == gpg_key['keyid'][8:]:
local_uids = gpg_key['uids']
local_keyid = gpg_key['keyid']
break
if local_keyid is None:
raise SaltInvocationError(
'The key ID \'{0}\' was not found in GnuPG keyring at \'{1}\''
.format(keyid, gnupghome)
)
if use_passphrase:
phrase = __salt__['pillar.get']('gpg_passphrase')
if local_uids:
define_gpg_name = '--define=\'%_signature gpg\' --define=\'%_gpg_name {0}\''.format(
local_uids[0]
)
# need to update rpm with public key
cmd = 'rpm --import {0}'.format(pkg_pub_key_file)
retrc = __salt__['cmd.retcode'](cmd, runas=runas, use_vt=True)
if retrc != 0:
raise SaltInvocationError(
'Failed to import public key from file {0} with return '
'error {1}, check logs for further details'.format(
pkg_pub_key_file,
retrc)
)
# sign_it_here
# interval of 0.125 is really too fast on some systems
interval = 0.5
for fileused in os.listdir(repodir):
if fileused.endswith('.rpm'):
abs_file = os.path.join(repodir, fileused)
number_retries = timeout / interval
times_looped = 0
error_msg = 'Failed to sign file {0}'.format(abs_file)
cmd = 'rpm {0} --addsign {1}'.format(define_gpg_name, abs_file)
preexec_fn = functools.partial(salt.utils.user.chugid_and_umask, runas, None)
try:
stdout, stderr = None, None
proc = salt.utils.vt.Terminal(
cmd,
shell=True,
preexec_fn=preexec_fn,
stream_stdout=True,
stream_stderr=True
)
while proc.has_unread_data:
stdout, stderr = proc.recv()
if stdout and SIGN_PROMPT_RE.search(stdout):
# have the prompt for inputting the passphrase
proc.sendline(phrase)
else:
times_looped += 1
if times_looped > number_retries:
raise SaltInvocationError(
'Attemping to sign file {0} failed, timed out after {1} seconds'
.format(abs_file, int(times_looped * interval))
)
time.sleep(interval)
proc_exitstatus = proc.exitstatus
if proc_exitstatus != 0:
raise SaltInvocationError(
'Signing file {0} failed with proc.status {1}'
.format(abs_file, proc_exitstatus)
)
except salt.utils.vt.TerminalException as err:
trace = traceback.format_exc()
log.error(error_msg, err, trace)
finally:
proc.close(terminate=True, kill=True)
cmd = 'createrepo --update {0}'.format(repodir)
return __salt__['cmd.run_all'](cmd, runas=runas) | [
"def",
"make_repo",
"(",
"repodir",
",",
"keyid",
"=",
"None",
",",
"env",
"=",
"None",
",",
"use_passphrase",
"=",
"False",
",",
"gnupghome",
"=",
"'/etc/salt/gpgkeys'",
",",
"runas",
"=",
"'root'",
",",
"timeout",
"=",
"15.0",
")",
":",
"SIGN_PROMPT_RE",... | Make a package repository and optionally sign packages present
Given the repodir, create a ``yum`` repository out of the rpms therein
and optionally sign it and packages present, the name is directory to
turn into a repo. This state is best used with onchanges linked to
your package building states.
repodir
The directory to find packages that will be in the repository.
keyid
.. versionchanged:: 2016.3.0
Optional Key ID to use in signing packages and repository.
Utilizes Public and Private keys associated with keyid which have
been loaded into the minion's Pillar data.
For example, contents from a Pillar data file with named Public
and Private keys as follows:
.. code-block:: yaml
gpg_pkg_priv_key: |
-----BEGIN PGP PRIVATE KEY BLOCK-----
Version: GnuPG v1
lQO+BFciIfQBCADAPCtzx7I5Rl32escCMZsPzaEKWe7bIX1em4KCKkBoX47IG54b
w82PCE8Y1jF/9Uk2m3RKVWp3YcLlc7Ap3gj6VO4ysvVz28UbnhPxsIkOlf2cq8qc
.
.
Ebe+8JCQTwqSXPRTzXmy/b5WXDeM79CkLWvuGpXFor76D+ECMRPv/rawukEcNptn
R5OmgHqvydEnO4pWbn8JzQO9YX/Us0SMHBVzLC8eIi5ZIopzalvX
=JvW8
-----END PGP PRIVATE KEY BLOCK-----
gpg_pkg_priv_keyname: gpg_pkg_key.pem
gpg_pkg_pub_key: |
-----BEGIN PGP PUBLIC KEY BLOCK-----
Version: GnuPG v1
mQENBFciIfQBCADAPCtzx7I5Rl32escCMZsPzaEKWe7bIX1em4KCKkBoX47IG54b
w82PCE8Y1jF/9Uk2m3RKVWp3YcLlc7Ap3gj6VO4ysvVz28UbnhPxsIkOlf2cq8qc
.
.
bYP7t5iwJmQzRMyFInYRt77wkJBPCpJc9FPNebL9vlZcN4zv0KQta+4alcWivvoP
4QIxE+/+trC6QRw2m2dHk6aAeq/J0Sc7ilZufwnNA71hf9SzRIwcFXMsLx4iLlki
inNqW9c=
=s1CX
-----END PGP PUBLIC KEY BLOCK-----
gpg_pkg_pub_keyname: gpg_pkg_key.pub
env
.. versionchanged:: 2016.3.0
A dictionary of environment variables to be utilized in creating the
repository.
.. note::
This parameter is not used for making ``yum`` repositories.
use_passphrase : False
.. versionadded:: 2016.3.0
Use a passphrase with the signing key presented in ``keyid``.
Passphrase is received from Pillar data which could be passed on the
command line with ``pillar`` parameter. For example:
.. code-block:: bash
pillar='{ "gpg_passphrase" : "my_passphrase" }'
gnupghome : /etc/salt/gpgkeys
.. versionadded:: 2016.3.0
Location where GPG related files are stored, used with ``keyid``.
runas : root
.. versionadded:: 2016.3.0
User to create the repository as, and optionally sign packages.
.. note::
Ensure the user has correct permissions to any files and
directories which are to be utilized.
timeout : 15.0
.. versionadded:: 2016.3.4
Timeout in seconds to wait for the prompt for inputting the passphrase.
CLI Example:
.. code-block:: bash
salt '*' pkgbuild.make_repo /var/www/html/ | [
"Make",
"a",
"package",
"repository",
"and",
"optionally",
"sign",
"packages",
"present"
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/rpmbuild_pkgbuild.py#L377-L595 | train |
saltstack/salt | salt/output/no_return.py | NestDisplay.display | def display(self, ret, indent, prefix, out):
'''
Recursively iterate down through data structures to determine output
'''
if isinstance(ret, six.string_types):
lines = ret.split('\n')
for line in lines:
out += '{0}{1}{2}{3}{4}\n'.format(
self.colors['RED'],
' ' * indent,
prefix,
line,
self.colors['ENDC'])
elif isinstance(ret, dict):
for key in sorted(ret):
val = ret[key]
out += '{0}{1}{2}{3}{4}:\n'.format(
self.colors['CYAN'],
' ' * indent,
prefix,
key,
self.colors['ENDC'])
out = self.display(val, indent + 4, '', out)
return out | python | def display(self, ret, indent, prefix, out):
'''
Recursively iterate down through data structures to determine output
'''
if isinstance(ret, six.string_types):
lines = ret.split('\n')
for line in lines:
out += '{0}{1}{2}{3}{4}\n'.format(
self.colors['RED'],
' ' * indent,
prefix,
line,
self.colors['ENDC'])
elif isinstance(ret, dict):
for key in sorted(ret):
val = ret[key]
out += '{0}{1}{2}{3}{4}:\n'.format(
self.colors['CYAN'],
' ' * indent,
prefix,
key,
self.colors['ENDC'])
out = self.display(val, indent + 4, '', out)
return out | [
"def",
"display",
"(",
"self",
",",
"ret",
",",
"indent",
",",
"prefix",
",",
"out",
")",
":",
"if",
"isinstance",
"(",
"ret",
",",
"six",
".",
"string_types",
")",
":",
"lines",
"=",
"ret",
".",
"split",
"(",
"'\\n'",
")",
"for",
"line",
"in",
"... | Recursively iterate down through data structures to determine output | [
"Recursively",
"iterate",
"down",
"through",
"data",
"structures",
"to",
"determine",
"output"
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/output/no_return.py#L33-L56 | train |
saltstack/salt | salt/engines/thorium.py | start | def start(grains=False, grain_keys=None, pillar=False, pillar_keys=None):
'''
Execute the Thorium runtime
'''
state = salt.thorium.ThorState(
__opts__,
grains,
grain_keys,
pillar,
pillar_keys)
state.start_runtime() | python | def start(grains=False, grain_keys=None, pillar=False, pillar_keys=None):
'''
Execute the Thorium runtime
'''
state = salt.thorium.ThorState(
__opts__,
grains,
grain_keys,
pillar,
pillar_keys)
state.start_runtime() | [
"def",
"start",
"(",
"grains",
"=",
"False",
",",
"grain_keys",
"=",
"None",
",",
"pillar",
"=",
"False",
",",
"pillar_keys",
"=",
"None",
")",
":",
"state",
"=",
"salt",
".",
"thorium",
".",
"ThorState",
"(",
"__opts__",
",",
"grains",
",",
"grain_key... | Execute the Thorium runtime | [
"Execute",
"the",
"Thorium",
"runtime"
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/engines/thorium.py#L11-L21 | train |
saltstack/salt | salt/proxy/junos.py | init | def init(opts):
'''
Open the connection to the Junos device, login, and bind to the
Resource class
'''
opts['multiprocessing'] = False
log.debug('Opening connection to junos')
args = {"host": opts['proxy']['host']}
optional_args = ['user',
'username',
'password',
'passwd',
'port',
'gather_facts',
'mode',
'baud',
'attempts',
'auto_probe',
'ssh_private_key_file',
'ssh_config',
'normalize'
]
if 'username' in opts['proxy'].keys():
opts['proxy']['user'] = opts['proxy'].pop('username')
proxy_keys = opts['proxy'].keys()
for arg in optional_args:
if arg in proxy_keys:
args[arg] = opts['proxy'][arg]
thisproxy['conn'] = jnpr.junos.Device(**args)
try:
thisproxy['conn'].open()
except (ProbeError, ConnectAuthError, ConnectRefusedError, ConnectTimeoutError,
ConnectError) as ex:
log.error("{} : not able to initiate connection to the device".format(str(ex)))
thisproxy['initialized'] = False
return
if 'timeout' in proxy_keys:
timeout = int(opts['proxy']['timeout'])
try:
thisproxy['conn'].timeout = timeout
except Exception as ex:
log.error('Not able to set timeout due to: %s', str(ex))
else:
log.debug('RPC timeout set to %d seconds', timeout)
try:
thisproxy['conn'].bind(cu=jnpr.junos.utils.config.Config)
except Exception as ex:
log.error('Bind failed with Config class due to: {}'.format(str(ex)))
try:
thisproxy['conn'].bind(sw=jnpr.junos.utils.sw.SW)
except Exception as ex:
log.error('Bind failed with SW class due to: {}'.format(str(ex)))
thisproxy['initialized'] = True | python | def init(opts):
'''
Open the connection to the Junos device, login, and bind to the
Resource class
'''
opts['multiprocessing'] = False
log.debug('Opening connection to junos')
args = {"host": opts['proxy']['host']}
optional_args = ['user',
'username',
'password',
'passwd',
'port',
'gather_facts',
'mode',
'baud',
'attempts',
'auto_probe',
'ssh_private_key_file',
'ssh_config',
'normalize'
]
if 'username' in opts['proxy'].keys():
opts['proxy']['user'] = opts['proxy'].pop('username')
proxy_keys = opts['proxy'].keys()
for arg in optional_args:
if arg in proxy_keys:
args[arg] = opts['proxy'][arg]
thisproxy['conn'] = jnpr.junos.Device(**args)
try:
thisproxy['conn'].open()
except (ProbeError, ConnectAuthError, ConnectRefusedError, ConnectTimeoutError,
ConnectError) as ex:
log.error("{} : not able to initiate connection to the device".format(str(ex)))
thisproxy['initialized'] = False
return
if 'timeout' in proxy_keys:
timeout = int(opts['proxy']['timeout'])
try:
thisproxy['conn'].timeout = timeout
except Exception as ex:
log.error('Not able to set timeout due to: %s', str(ex))
else:
log.debug('RPC timeout set to %d seconds', timeout)
try:
thisproxy['conn'].bind(cu=jnpr.junos.utils.config.Config)
except Exception as ex:
log.error('Bind failed with Config class due to: {}'.format(str(ex)))
try:
thisproxy['conn'].bind(sw=jnpr.junos.utils.sw.SW)
except Exception as ex:
log.error('Bind failed with SW class due to: {}'.format(str(ex)))
thisproxy['initialized'] = True | [
"def",
"init",
"(",
"opts",
")",
":",
"opts",
"[",
"'multiprocessing'",
"]",
"=",
"False",
"log",
".",
"debug",
"(",
"'Opening connection to junos'",
")",
"args",
"=",
"{",
"\"host\"",
":",
"opts",
"[",
"'proxy'",
"]",
"[",
"'host'",
"]",
"}",
"optional_... | Open the connection to the Junos device, login, and bind to the
Resource class | [
"Open",
"the",
"connection",
"to",
"the",
"Junos",
"device",
"login",
"and",
"bind",
"to",
"the",
"Resource",
"class"
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/proxy/junos.py#L76-L134 | train |
saltstack/salt | salt/proxy/junos.py | alive | def alive(opts):
'''
Validate and return the connection status with the remote device.
.. versionadded:: 2018.3.0
'''
dev = conn()
thisproxy['conn'].connected = ping()
if not dev.connected:
__salt__['event.fire_master']({}, 'junos/proxy/{}/stop'.format(
opts['proxy']['host']))
return dev.connected | python | def alive(opts):
'''
Validate and return the connection status with the remote device.
.. versionadded:: 2018.3.0
'''
dev = conn()
thisproxy['conn'].connected = ping()
if not dev.connected:
__salt__['event.fire_master']({}, 'junos/proxy/{}/stop'.format(
opts['proxy']['host']))
return dev.connected | [
"def",
"alive",
"(",
"opts",
")",
":",
"dev",
"=",
"conn",
"(",
")",
"thisproxy",
"[",
"'conn'",
"]",
".",
"connected",
"=",
"ping",
"(",
")",
"if",
"not",
"dev",
".",
"connected",
":",
"__salt__",
"[",
"'event.fire_master'",
"]",
"(",
"{",
"}",
",... | Validate and return the connection status with the remote device.
.. versionadded:: 2018.3.0 | [
"Validate",
"and",
"return",
"the",
"connection",
"status",
"with",
"the",
"remote",
"device",
"."
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/proxy/junos.py#L145-L159 | train |
saltstack/salt | salt/proxy/junos.py | ping | def ping():
'''
Ping? Pong!
'''
dev = conn()
# Check that the underlying netconf connection still exists.
if dev._conn is None:
return False
# call rpc only if ncclient queue is empty. If not empty that means other
# rpc call is going on.
if hasattr(dev._conn, '_session'):
if dev._conn._session._transport.is_active():
# there is no on going rpc call. buffer tell can be 1 as it stores
# remaining char after "]]>]]>" which can be a new line char
if dev._conn._session._buffer.tell() <= 1 and \
dev._conn._session._q.empty():
return _rpc_file_list(dev)
else:
log.debug('skipped ping() call as proxy already getting data')
return True
else:
# ssh connection is lost
return False
else:
# other connection modes, like telnet
return _rpc_file_list(dev) | python | def ping():
'''
Ping? Pong!
'''
dev = conn()
# Check that the underlying netconf connection still exists.
if dev._conn is None:
return False
# call rpc only if ncclient queue is empty. If not empty that means other
# rpc call is going on.
if hasattr(dev._conn, '_session'):
if dev._conn._session._transport.is_active():
# there is no on going rpc call. buffer tell can be 1 as it stores
# remaining char after "]]>]]>" which can be a new line char
if dev._conn._session._buffer.tell() <= 1 and \
dev._conn._session._q.empty():
return _rpc_file_list(dev)
else:
log.debug('skipped ping() call as proxy already getting data')
return True
else:
# ssh connection is lost
return False
else:
# other connection modes, like telnet
return _rpc_file_list(dev) | [
"def",
"ping",
"(",
")",
":",
"dev",
"=",
"conn",
"(",
")",
"# Check that the underlying netconf connection still exists.",
"if",
"dev",
".",
"_conn",
"is",
"None",
":",
"return",
"False",
"# call rpc only if ncclient queue is empty. If not empty that means other",
"# rpc c... | Ping? Pong! | [
"Ping?",
"Pong!"
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/proxy/junos.py#L162-L189 | train |
saltstack/salt | salt/utils/iam.py | _retry_get_url | def _retry_get_url(url, num_retries=10, timeout=5):
'''
Retry grabbing a URL.
Based heavily on boto.utils.retry_url
'''
for i in range(0, num_retries):
try:
result = requests.get(url, timeout=timeout, proxies={'http': ''})
if hasattr(result, 'text'):
return result.text
elif hasattr(result, 'content'):
return result.content
else:
return ''
except requests.exceptions.HTTPError as exc:
return ''
except Exception as exc:
pass
log.warning(
'Caught exception reading from URL. Retry no. %s', i
)
log.warning(pprint.pformat(exc))
time.sleep(2 ** i)
log.error(
'Failed to read from URL for %s times. Giving up.', num_retries
)
return '' | python | def _retry_get_url(url, num_retries=10, timeout=5):
'''
Retry grabbing a URL.
Based heavily on boto.utils.retry_url
'''
for i in range(0, num_retries):
try:
result = requests.get(url, timeout=timeout, proxies={'http': ''})
if hasattr(result, 'text'):
return result.text
elif hasattr(result, 'content'):
return result.content
else:
return ''
except requests.exceptions.HTTPError as exc:
return ''
except Exception as exc:
pass
log.warning(
'Caught exception reading from URL. Retry no. %s', i
)
log.warning(pprint.pformat(exc))
time.sleep(2 ** i)
log.error(
'Failed to read from URL for %s times. Giving up.', num_retries
)
return '' | [
"def",
"_retry_get_url",
"(",
"url",
",",
"num_retries",
"=",
"10",
",",
"timeout",
"=",
"5",
")",
":",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"num_retries",
")",
":",
"try",
":",
"result",
"=",
"requests",
".",
"get",
"(",
"url",
",",
"timeout"... | Retry grabbing a URL.
Based heavily on boto.utils.retry_url | [
"Retry",
"grabbing",
"a",
"URL",
".",
"Based",
"heavily",
"on",
"boto",
".",
"utils",
".",
"retry_url"
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/iam.py#L26-L53 | train |
saltstack/salt | salt/utils/iam.py | _convert_key_to_str | def _convert_key_to_str(key):
'''
Stolen completely from boto.providers
'''
# IMPORTANT: on PY2, the secret key must be str and not unicode to work
# properly with hmac.new (see http://bugs.python.org/issue5285)
#
# pylint: disable=incompatible-py3-code,undefined-variable
return salt.utils.data.encode(key) \
if six.PY2 and isinstance(key, unicode) \
else key | python | def _convert_key_to_str(key):
'''
Stolen completely from boto.providers
'''
# IMPORTANT: on PY2, the secret key must be str and not unicode to work
# properly with hmac.new (see http://bugs.python.org/issue5285)
#
# pylint: disable=incompatible-py3-code,undefined-variable
return salt.utils.data.encode(key) \
if six.PY2 and isinstance(key, unicode) \
else key | [
"def",
"_convert_key_to_str",
"(",
"key",
")",
":",
"# IMPORTANT: on PY2, the secret key must be str and not unicode to work",
"# properly with hmac.new (see http://bugs.python.org/issue5285)",
"#",
"# pylint: disable=incompatible-py3-code,undefined-variable",
"return",
"salt",
".",
"utils... | Stolen completely from boto.providers | [
"Stolen",
"completely",
"from",
"boto",
".",
"providers"
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/iam.py#L56-L66 | train |
saltstack/salt | salt/cli/caller.py | BaseCaller.print_docs | def print_docs(self):
'''
Pick up the documentation for all of the modules and print it out.
'''
docs = {}
for name, func in six.iteritems(self.minion.functions):
if name not in docs:
if func.__doc__:
docs[name] = func.__doc__
for name in sorted(docs):
if name.startswith(self.opts.get('fun', '')):
salt.utils.stringutils.print_cli('{0}:\n{1}\n'.format(name, docs[name])) | python | def print_docs(self):
'''
Pick up the documentation for all of the modules and print it out.
'''
docs = {}
for name, func in six.iteritems(self.minion.functions):
if name not in docs:
if func.__doc__:
docs[name] = func.__doc__
for name in sorted(docs):
if name.startswith(self.opts.get('fun', '')):
salt.utils.stringutils.print_cli('{0}:\n{1}\n'.format(name, docs[name])) | [
"def",
"print_docs",
"(",
"self",
")",
":",
"docs",
"=",
"{",
"}",
"for",
"name",
",",
"func",
"in",
"six",
".",
"iteritems",
"(",
"self",
".",
"minion",
".",
"functions",
")",
":",
"if",
"name",
"not",
"in",
"docs",
":",
"if",
"func",
".",
"__do... | Pick up the documentation for all of the modules and print it out. | [
"Pick",
"up",
"the",
"documentation",
"for",
"all",
"of",
"the",
"modules",
"and",
"print",
"it",
"out",
"."
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cli/caller.py#L91-L102 | train |
saltstack/salt | salt/cli/caller.py | BaseCaller.print_grains | def print_grains(self):
'''
Print out the grains
'''
grains = self.minion.opts.get('grains') or salt.loader.grains(self.opts)
salt.output.display_output({'local': grains}, 'grains', self.opts) | python | def print_grains(self):
'''
Print out the grains
'''
grains = self.minion.opts.get('grains') or salt.loader.grains(self.opts)
salt.output.display_output({'local': grains}, 'grains', self.opts) | [
"def",
"print_grains",
"(",
"self",
")",
":",
"grains",
"=",
"self",
".",
"minion",
".",
"opts",
".",
"get",
"(",
"'grains'",
")",
"or",
"salt",
".",
"loader",
".",
"grains",
"(",
"self",
".",
"opts",
")",
"salt",
".",
"output",
".",
"display_output"... | Print out the grains | [
"Print",
"out",
"the",
"grains"
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cli/caller.py#L104-L109 | train |
saltstack/salt | salt/cli/caller.py | BaseCaller.run | def run(self):
'''
Execute the salt call logic
'''
profiling_enabled = self.opts.get('profiling_enabled', False)
try:
pr = salt.utils.profile.activate_profile(profiling_enabled)
try:
ret = self.call()
finally:
salt.utils.profile.output_profile(
pr,
stats_path=self.opts.get('profiling_path', '/tmp/stats'),
stop=True)
out = ret.get('out', 'nested')
if self.opts['print_metadata']:
print_ret = ret
out = 'nested'
else:
print_ret = ret.get('return', {})
salt.output.display_output(
{'local': print_ret},
out=out,
opts=self.opts,
_retcode=ret.get('retcode', 0))
# _retcode will be available in the kwargs of the outputter function
if self.opts.get('retcode_passthrough', False):
sys.exit(ret['retcode'])
elif ret['retcode'] != salt.defaults.exitcodes.EX_OK:
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
except SaltInvocationError as err:
raise SystemExit(err) | python | def run(self):
'''
Execute the salt call logic
'''
profiling_enabled = self.opts.get('profiling_enabled', False)
try:
pr = salt.utils.profile.activate_profile(profiling_enabled)
try:
ret = self.call()
finally:
salt.utils.profile.output_profile(
pr,
stats_path=self.opts.get('profiling_path', '/tmp/stats'),
stop=True)
out = ret.get('out', 'nested')
if self.opts['print_metadata']:
print_ret = ret
out = 'nested'
else:
print_ret = ret.get('return', {})
salt.output.display_output(
{'local': print_ret},
out=out,
opts=self.opts,
_retcode=ret.get('retcode', 0))
# _retcode will be available in the kwargs of the outputter function
if self.opts.get('retcode_passthrough', False):
sys.exit(ret['retcode'])
elif ret['retcode'] != salt.defaults.exitcodes.EX_OK:
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
except SaltInvocationError as err:
raise SystemExit(err) | [
"def",
"run",
"(",
"self",
")",
":",
"profiling_enabled",
"=",
"self",
".",
"opts",
".",
"get",
"(",
"'profiling_enabled'",
",",
"False",
")",
"try",
":",
"pr",
"=",
"salt",
".",
"utils",
".",
"profile",
".",
"activate_profile",
"(",
"profiling_enabled",
... | Execute the salt call logic | [
"Execute",
"the",
"salt",
"call",
"logic"
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cli/caller.py#L111-L142 | train |
saltstack/salt | salt/cli/caller.py | BaseCaller.call | def call(self):
'''
Call the module
'''
ret = {}
fun = self.opts['fun']
ret['jid'] = salt.utils.jid.gen_jid(self.opts)
proc_fn = os.path.join(
salt.minion.get_proc_dir(self.opts['cachedir']),
ret['jid']
)
if fun not in self.minion.functions:
docs = self.minion.functions['sys.doc']('{0}*'.format(fun))
if docs:
docs[fun] = self.minion.functions.missing_fun_string(fun)
ret['out'] = 'nested'
ret['return'] = docs
return ret
sys.stderr.write(self.minion.functions.missing_fun_string(fun))
mod_name = fun.split('.')[0]
if mod_name in self.minion.function_errors:
sys.stderr.write(' Possible reasons: {0}\n'.format(self.minion.function_errors[mod_name]))
else:
sys.stderr.write('\n')
sys.exit(-1)
metadata = self.opts.get('metadata')
if metadata is not None:
metadata = salt.utils.args.yamlify_arg(metadata)
try:
sdata = {
'fun': fun,
'pid': os.getpid(),
'jid': ret['jid'],
'tgt': 'salt-call'}
if metadata is not None:
sdata['metadata'] = metadata
args, kwargs = salt.minion.load_args_and_kwargs(
self.minion.functions[fun],
salt.utils.args.parse_input(
self.opts['arg'],
no_parse=self.opts.get('no_parse', [])),
data=sdata)
try:
with salt.utils.files.fopen(proc_fn, 'w+b') as fp_:
fp_.write(self.serial.dumps(sdata))
except NameError:
# Don't require msgpack with local
pass
except IOError:
sys.stderr.write(
'Cannot write to process directory. '
'Do you have permissions to '
'write to {0} ?\n'.format(proc_fn))
func = self.minion.functions[fun]
data = {
'arg': args,
'fun': fun
}
data.update(kwargs)
executors = getattr(self.minion, 'module_executors', []) or \
salt.utils.args.yamlify_arg(
self.opts.get('module_executors', '[direct_call]')
)
if self.opts.get('executor_opts', None):
data['executor_opts'] = salt.utils.args.yamlify_arg(
self.opts['executor_opts']
)
if isinstance(executors, six.string_types):
executors = [executors]
try:
for name in executors:
fname = '{0}.execute'.format(name)
if fname not in self.minion.executors:
raise SaltInvocationError("Executor '{0}' is not available".format(name))
ret['return'] = self.minion.executors[fname](self.opts, data, func, args, kwargs)
if ret['return'] is not None:
break
except TypeError as exc:
sys.stderr.write('\nPassed invalid arguments: {0}.\n\nUsage:\n'.format(exc))
salt.utils.stringutils.print_cli(func.__doc__)
active_level = LOG_LEVELS.get(
self.opts['log_level'].lower(), logging.ERROR)
if active_level <= logging.DEBUG:
trace = traceback.format_exc()
sys.stderr.write(trace)
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
try:
retcode = sys.modules[
func.__module__].__context__.get('retcode', 0)
except AttributeError:
retcode = salt.defaults.exitcodes.EX_GENERIC
if retcode == 0:
# No nonzero retcode in __context__ dunder. Check if return
# is a dictionary with a "result" or "success" key.
try:
func_result = all(ret['return'].get(x, True)
for x in ('result', 'success'))
except Exception:
# return data is not a dict
func_result = True
if not func_result:
retcode = salt.defaults.exitcodes.EX_GENERIC
ret['retcode'] = retcode
except (CommandExecutionError) as exc:
msg = 'Error running \'{0}\': {1}\n'
active_level = LOG_LEVELS.get(
self.opts['log_level'].lower(), logging.ERROR)
if active_level <= logging.DEBUG:
sys.stderr.write(traceback.format_exc())
sys.stderr.write(msg.format(fun, exc))
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
except CommandNotFoundError as exc:
msg = 'Command required for \'{0}\' not found: {1}\n'
sys.stderr.write(msg.format(fun, exc))
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
try:
os.remove(proc_fn)
except (IOError, OSError):
pass
if hasattr(self.minion.functions[fun], '__outputter__'):
oput = self.minion.functions[fun].__outputter__
if isinstance(oput, six.string_types):
ret['out'] = oput
is_local = self.opts['local'] or self.opts.get(
'file_client', False) == 'local' or self.opts.get(
'master_type') == 'disable'
returners = self.opts.get('return', '').split(',')
if (not is_local) or returners:
ret['id'] = self.opts['id']
ret['fun'] = fun
ret['fun_args'] = self.opts['arg']
if metadata is not None:
ret['metadata'] = metadata
for returner in returners:
if not returner: # if we got an empty returner somehow, skip
continue
try:
ret['success'] = True
self.minion.returners['{0}.returner'.format(returner)](ret)
except Exception:
pass
# return the job infos back up to the respective minion's master
if not is_local:
try:
mret = ret.copy()
mret['jid'] = 'req'
self.return_pub(mret)
except Exception:
pass
elif self.opts['cache_jobs']:
# Local job cache has been enabled
salt.utils.minion.cache_jobs(self.opts, ret['jid'], ret)
return ret | python | def call(self):
'''
Call the module
'''
ret = {}
fun = self.opts['fun']
ret['jid'] = salt.utils.jid.gen_jid(self.opts)
proc_fn = os.path.join(
salt.minion.get_proc_dir(self.opts['cachedir']),
ret['jid']
)
if fun not in self.minion.functions:
docs = self.minion.functions['sys.doc']('{0}*'.format(fun))
if docs:
docs[fun] = self.minion.functions.missing_fun_string(fun)
ret['out'] = 'nested'
ret['return'] = docs
return ret
sys.stderr.write(self.minion.functions.missing_fun_string(fun))
mod_name = fun.split('.')[0]
if mod_name in self.minion.function_errors:
sys.stderr.write(' Possible reasons: {0}\n'.format(self.minion.function_errors[mod_name]))
else:
sys.stderr.write('\n')
sys.exit(-1)
metadata = self.opts.get('metadata')
if metadata is not None:
metadata = salt.utils.args.yamlify_arg(metadata)
try:
sdata = {
'fun': fun,
'pid': os.getpid(),
'jid': ret['jid'],
'tgt': 'salt-call'}
if metadata is not None:
sdata['metadata'] = metadata
args, kwargs = salt.minion.load_args_and_kwargs(
self.minion.functions[fun],
salt.utils.args.parse_input(
self.opts['arg'],
no_parse=self.opts.get('no_parse', [])),
data=sdata)
try:
with salt.utils.files.fopen(proc_fn, 'w+b') as fp_:
fp_.write(self.serial.dumps(sdata))
except NameError:
# Don't require msgpack with local
pass
except IOError:
sys.stderr.write(
'Cannot write to process directory. '
'Do you have permissions to '
'write to {0} ?\n'.format(proc_fn))
func = self.minion.functions[fun]
data = {
'arg': args,
'fun': fun
}
data.update(kwargs)
executors = getattr(self.minion, 'module_executors', []) or \
salt.utils.args.yamlify_arg(
self.opts.get('module_executors', '[direct_call]')
)
if self.opts.get('executor_opts', None):
data['executor_opts'] = salt.utils.args.yamlify_arg(
self.opts['executor_opts']
)
if isinstance(executors, six.string_types):
executors = [executors]
try:
for name in executors:
fname = '{0}.execute'.format(name)
if fname not in self.minion.executors:
raise SaltInvocationError("Executor '{0}' is not available".format(name))
ret['return'] = self.minion.executors[fname](self.opts, data, func, args, kwargs)
if ret['return'] is not None:
break
except TypeError as exc:
sys.stderr.write('\nPassed invalid arguments: {0}.\n\nUsage:\n'.format(exc))
salt.utils.stringutils.print_cli(func.__doc__)
active_level = LOG_LEVELS.get(
self.opts['log_level'].lower(), logging.ERROR)
if active_level <= logging.DEBUG:
trace = traceback.format_exc()
sys.stderr.write(trace)
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
try:
retcode = sys.modules[
func.__module__].__context__.get('retcode', 0)
except AttributeError:
retcode = salt.defaults.exitcodes.EX_GENERIC
if retcode == 0:
# No nonzero retcode in __context__ dunder. Check if return
# is a dictionary with a "result" or "success" key.
try:
func_result = all(ret['return'].get(x, True)
for x in ('result', 'success'))
except Exception:
# return data is not a dict
func_result = True
if not func_result:
retcode = salt.defaults.exitcodes.EX_GENERIC
ret['retcode'] = retcode
except (CommandExecutionError) as exc:
msg = 'Error running \'{0}\': {1}\n'
active_level = LOG_LEVELS.get(
self.opts['log_level'].lower(), logging.ERROR)
if active_level <= logging.DEBUG:
sys.stderr.write(traceback.format_exc())
sys.stderr.write(msg.format(fun, exc))
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
except CommandNotFoundError as exc:
msg = 'Command required for \'{0}\' not found: {1}\n'
sys.stderr.write(msg.format(fun, exc))
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
try:
os.remove(proc_fn)
except (IOError, OSError):
pass
if hasattr(self.minion.functions[fun], '__outputter__'):
oput = self.minion.functions[fun].__outputter__
if isinstance(oput, six.string_types):
ret['out'] = oput
is_local = self.opts['local'] or self.opts.get(
'file_client', False) == 'local' or self.opts.get(
'master_type') == 'disable'
returners = self.opts.get('return', '').split(',')
if (not is_local) or returners:
ret['id'] = self.opts['id']
ret['fun'] = fun
ret['fun_args'] = self.opts['arg']
if metadata is not None:
ret['metadata'] = metadata
for returner in returners:
if not returner: # if we got an empty returner somehow, skip
continue
try:
ret['success'] = True
self.minion.returners['{0}.returner'.format(returner)](ret)
except Exception:
pass
# return the job infos back up to the respective minion's master
if not is_local:
try:
mret = ret.copy()
mret['jid'] = 'req'
self.return_pub(mret)
except Exception:
pass
elif self.opts['cache_jobs']:
# Local job cache has been enabled
salt.utils.minion.cache_jobs(self.opts, ret['jid'], ret)
return ret | [
"def",
"call",
"(",
"self",
")",
":",
"ret",
"=",
"{",
"}",
"fun",
"=",
"self",
".",
"opts",
"[",
"'fun'",
"]",
"ret",
"[",
"'jid'",
"]",
"=",
"salt",
".",
"utils",
".",
"jid",
".",
"gen_jid",
"(",
"self",
".",
"opts",
")",
"proc_fn",
"=",
"o... | Call the module | [
"Call",
"the",
"module"
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cli/caller.py#L144-L301 | train |
saltstack/salt | salt/cli/caller.py | ZeroMQCaller.return_pub | def return_pub(self, ret):
'''
Return the data up to the master
'''
channel = salt.transport.client.ReqChannel.factory(self.opts, usage='salt_call')
load = {'cmd': '_return', 'id': self.opts['id']}
for key, value in six.iteritems(ret):
load[key] = value
try:
channel.send(load)
finally:
channel.close() | python | def return_pub(self, ret):
'''
Return the data up to the master
'''
channel = salt.transport.client.ReqChannel.factory(self.opts, usage='salt_call')
load = {'cmd': '_return', 'id': self.opts['id']}
for key, value in six.iteritems(ret):
load[key] = value
try:
channel.send(load)
finally:
channel.close() | [
"def",
"return_pub",
"(",
"self",
",",
"ret",
")",
":",
"channel",
"=",
"salt",
".",
"transport",
".",
"client",
".",
"ReqChannel",
".",
"factory",
"(",
"self",
".",
"opts",
",",
"usage",
"=",
"'salt_call'",
")",
"load",
"=",
"{",
"'cmd'",
":",
"'_re... | Return the data up to the master | [
"Return",
"the",
"data",
"up",
"to",
"the",
"master"
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cli/caller.py#L308-L319 | train |
saltstack/salt | salt/thorium/local.py | cmd | def cmd(name,
tgt,
func,
arg=(),
tgt_type='glob',
ret='',
kwarg=None,
**kwargs):
'''
Execute a remote execution command
USAGE:
.. code-block:: yaml
run_remote_ex:
local.cmd:
- tgt: '*'
- func: test.ping
run_remote_ex:
local.cmd:
- tgt: '*'
- func: test.sleep
- arg:
- 30
run_remote_ex:
local.cmd:
- tgt: '*'
- func: test.sleep
- kwarg:
length: 30
'''
ret = {'name': name,
'changes': {},
'comment': '',
'result': True}
local = salt.client.get_local_client(mopts=__opts__)
jid = local.cmd_async(tgt,
func,
arg,
tgt_type=tgt_type,
ret=ret,
kwarg=kwarg,
**kwargs)
ret['changes']['jid'] = jid
return ret | python | def cmd(name,
tgt,
func,
arg=(),
tgt_type='glob',
ret='',
kwarg=None,
**kwargs):
'''
Execute a remote execution command
USAGE:
.. code-block:: yaml
run_remote_ex:
local.cmd:
- tgt: '*'
- func: test.ping
run_remote_ex:
local.cmd:
- tgt: '*'
- func: test.sleep
- arg:
- 30
run_remote_ex:
local.cmd:
- tgt: '*'
- func: test.sleep
- kwarg:
length: 30
'''
ret = {'name': name,
'changes': {},
'comment': '',
'result': True}
local = salt.client.get_local_client(mopts=__opts__)
jid = local.cmd_async(tgt,
func,
arg,
tgt_type=tgt_type,
ret=ret,
kwarg=kwarg,
**kwargs)
ret['changes']['jid'] = jid
return ret | [
"def",
"cmd",
"(",
"name",
",",
"tgt",
",",
"func",
",",
"arg",
"=",
"(",
")",
",",
"tgt_type",
"=",
"'glob'",
",",
"ret",
"=",
"''",
",",
"kwarg",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"ret",
"=",
"{",
"'name'",
":",
"name",
",",
... | Execute a remote execution command
USAGE:
.. code-block:: yaml
run_remote_ex:
local.cmd:
- tgt: '*'
- func: test.ping
run_remote_ex:
local.cmd:
- tgt: '*'
- func: test.sleep
- arg:
- 30
run_remote_ex:
local.cmd:
- tgt: '*'
- func: test.sleep
- kwarg:
length: 30 | [
"Execute",
"a",
"remote",
"execution",
"command"
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/thorium/local.py#L12-L59 | train |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.