INSTRUCTION
stringlengths 1
46.3k
| RESPONSE
stringlengths 75
80.2k
|
|---|---|
Wraps function fn in a try catch block that re-raises error_class.
Args:
fn (function): function to wrapped
error_class (Exception): Error class to be re-raised
Returns:
(object): fn wrapped in a try catch.
|
def error_wrapper(fn, error_class): # type: (Callable or None, Exception) -> ...
"""Wraps function fn in a try catch block that re-raises error_class.
Args:
fn (function): function to wrapped
error_class (Exception): Error class to be re-raised
Returns:
(object): fn wrapped in a try catch.
"""
def wrapper(*args, **kwargs):
try:
return fn(*args, **kwargs)
except Exception as e:
six.reraise(error_class, error_class(e), sys.exc_info()[2])
return wrapper
|
A helper callback to be executed after the connection is made to ensure
that Ceph is installed.
|
def ceph_is_installed(module):
"""
A helper callback to be executed after the connection is made to ensure
that Ceph is installed.
"""
ceph_package = Ceph(module.conn)
if not ceph_package.installed:
host = module.conn.hostname
raise RuntimeError(
'ceph needs to be installed in remote host: %s' % host
)
|
Ignoring errors, call `ceph --version` and return only the version
portion of the output. For example, output like::
ceph version 9.0.1-1234kjd (asdflkj2k3jh234jhg)
Would return::
9.0.1-1234kjd
|
def _get_version_output(self):
"""
Ignoring errors, call `ceph --version` and return only the version
portion of the output. For example, output like::
ceph version 9.0.1-1234kjd (asdflkj2k3jh234jhg)
Would return::
9.0.1-1234kjd
"""
if not self.executable:
return ''
command = [self.executable, '--version']
out, _, _ = self._check(self.conn, command)
try:
return out.decode('utf-8').split()[2]
except IndexError:
return ''
|
Returns True if the running system's terminal supports color, and False
otherwise.
|
def supports_color():
"""
Returns True if the running system's terminal supports color, and False
otherwise.
"""
unsupported_platform = (sys.platform in ('win32', 'Pocket PC'))
# isatty is not always implemented, #6223.
is_a_tty = hasattr(sys.stdout, 'isatty') and sys.stdout.isatty()
if unsupported_platform or not is_a_tty:
return False
return True
|
Main entry point to get a colored formatter, it will use the
BASE_FORMAT by default and fall back to no colors if the system
does not support it
|
def color_format():
"""
Main entry point to get a colored formatter, it will use the
BASE_FORMAT by default and fall back to no colors if the system
does not support it
"""
str_format = BASE_COLOR_FORMAT if supports_color() else BASE_FORMAT
color_format = color_message(str_format)
return ColoredFormatter(color_format)
|
A direct check for JSON output on the monitor status.
For newer versions of Ceph (dumpling and newer) a new mon_status command
was added ( `ceph daemon mon mon_status` ) and should be revisited if the
output changes as this check depends on that availability.
|
def mon_status_check(conn, logger, hostname, args):
"""
A direct check for JSON output on the monitor status.
For newer versions of Ceph (dumpling and newer) a new mon_status command
was added ( `ceph daemon mon mon_status` ) and should be revisited if the
output changes as this check depends on that availability.
"""
asok_path = paths.mon.asok(args.cluster, hostname)
out, err, code = remoto.process.check(
conn,
[
'ceph',
'--cluster={cluster}'.format(cluster=args.cluster),
'--admin-daemon',
asok_path,
'mon_status',
],
)
for line in err:
logger.error(line)
try:
return json.loads(b''.join(out).decode('utf-8'))
except ValueError:
return {}
|
Make sure we are able to catch up common mishaps with monitors
and use that state of a monitor to determine what is missing
and warn apropriately about it.
|
def catch_mon_errors(conn, logger, hostname, cfg, args):
"""
Make sure we are able to catch up common mishaps with monitors
and use that state of a monitor to determine what is missing
and warn apropriately about it.
"""
monmap = mon_status_check(conn, logger, hostname, args).get('monmap', {})
mon_initial_members = get_mon_initial_members(args, _cfg=cfg)
public_addr = cfg.safe_get('global', 'public_addr')
public_network = cfg.safe_get('global', 'public_network')
mon_in_monmap = [
mon.get('name')
for mon in monmap.get('mons', [{}])
if mon.get('name') == hostname
]
if mon_initial_members is None or not hostname in mon_initial_members:
logger.warning('%s is not defined in `mon initial members`', hostname)
if not mon_in_monmap:
logger.warning('monitor %s does not exist in monmap', hostname)
if not public_addr and not public_network:
logger.warning('neither `public_addr` nor `public_network` keys are defined for monitors')
logger.warning('monitors may not be able to form quorum')
|
run ``ceph daemon mon.`hostname` mon_status`` on the remote end and provide
not only the output, but be able to return a boolean status of what is
going on.
``False`` represents a monitor that is not doing OK even if it is up and
running, while ``True`` would mean the monitor is up and running correctly.
|
def mon_status(conn, logger, hostname, args, silent=False):
"""
run ``ceph daemon mon.`hostname` mon_status`` on the remote end and provide
not only the output, but be able to return a boolean status of what is
going on.
``False`` represents a monitor that is not doing OK even if it is up and
running, while ``True`` would mean the monitor is up and running correctly.
"""
mon = 'mon.%s' % hostname
try:
out = mon_status_check(conn, logger, hostname, args)
if not out:
logger.warning('monitor: %s, might not be running yet' % mon)
return False
if not silent:
logger.debug('*'*80)
logger.debug('status for monitor: %s' % mon)
for line in json.dumps(out, indent=2, sort_keys=True).split('\n'):
logger.debug(line)
logger.debug('*'*80)
if out['rank'] >= 0:
logger.info('monitor: %s is running' % mon)
return True
if out['rank'] == -1 and out['state']:
logger.info('monitor: %s is currently at the state of %s' % (mon, out['state']))
return True
logger.info('monitor: %s is not running' % mon)
return False
except RuntimeError:
logger.info('monitor: %s is not running' % mon)
return False
|
This is a very, very, dumb parser that will look for `[entity]` sections
and return a list of those sections. It is not possible to parse this with
ConfigParser even though it is almost the same thing.
Since this is only used to spit out warnings, it is OK to just be naive
about the parsing.
|
def keyring_parser(path):
"""
This is a very, very, dumb parser that will look for `[entity]` sections
and return a list of those sections. It is not possible to parse this with
ConfigParser even though it is almost the same thing.
Since this is only used to spit out warnings, it is OK to just be naive
about the parsing.
"""
sections = []
with open(path) as keyring:
lines = keyring.readlines()
for line in lines:
line = line.strip('\n')
if line.startswith('[') and line.endswith(']'):
sections.append(line.strip('[]'))
return sections
|
A helper to collect all keyrings into a single blob that will be
used to inject it to mons with ``--mkfs`` on remote nodes
We require all keyring files to be concatenated to be in a directory
to end with ``.keyring``.
|
def concatenate_keyrings(args):
"""
A helper to collect all keyrings into a single blob that will be
used to inject it to mons with ``--mkfs`` on remote nodes
We require all keyring files to be concatenated to be in a directory
to end with ``.keyring``.
"""
keyring_path = os.path.abspath(args.keyrings)
LOG.info('concatenating keyrings from %s' % keyring_path)
LOG.info('to seed remote monitors')
keyrings = [
os.path.join(keyring_path, f) for f in os.listdir(keyring_path)
if os.path.isfile(os.path.join(keyring_path, f)) and f.endswith('.keyring')
]
contents = []
seen_sections = {}
if not keyrings:
path_from_arg = os.path.abspath(args.keyrings)
raise RuntimeError('could not find any keyrings in %s' % path_from_arg)
for keyring in keyrings:
path = os.path.abspath(keyring)
for section in keyring_parser(path):
if not seen_sections.get(section):
seen_sections[section] = path
LOG.info('adding entity "%s" from keyring %s' % (section, path))
with open(path) as k:
contents.append(k.read())
else:
LOG.warning('will not add keyring: %s' % path)
LOG.warning('entity "%s" from keyring %s is a duplicate' % (section, path))
LOG.warning('already present in keyring: %s' % seen_sections[section])
return ''.join(contents)
|
Make sure that the host that we are connecting to has the same value as the
`hostname` in the remote host, otherwise mons can fail not reaching quorum.
|
def hostname_is_compatible(conn, logger, provided_hostname):
"""
Make sure that the host that we are connecting to has the same value as the
`hostname` in the remote host, otherwise mons can fail not reaching quorum.
"""
logger.debug('determining if provided host has same hostname in remote')
remote_hostname = conn.remote_module.shortname()
if remote_hostname == provided_hostname:
return
logger.warning('*'*80)
logger.warning('provided hostname must match remote hostname')
logger.warning('provided hostname: %s' % provided_hostname)
logger.warning('remote hostname: %s' % remote_hostname)
logger.warning('monitors may not reach quorum and create-keys will not complete')
logger.warning('*'*80)
|
Read the Ceph config file and return the value of mon_initial_members
Optionally, a NeedHostError can be raised if the value is None.
|
def get_mon_initial_members(args, error_on_empty=False, _cfg=None):
"""
Read the Ceph config file and return the value of mon_initial_members
Optionally, a NeedHostError can be raised if the value is None.
"""
if _cfg:
cfg = _cfg
else:
cfg = conf.ceph.load(args)
mon_initial_members = cfg.safe_get('global', 'mon_initial_members')
if not mon_initial_members:
if error_on_empty:
raise exc.NeedHostError(
'could not find `mon initial members` defined in ceph.conf'
)
else:
mon_initial_members = re.split(r'[,\s]+', mon_initial_members)
return mon_initial_members
|
Run a command to check the status of a mon, return a boolean.
We heavily depend on the format of the output, if that ever changes
we need to modify this.
Check daemon status for 3 times
output of the status should be similar to::
mon.mira094: running {"version":"0.61.5"}
or when it fails::
mon.mira094: dead {"version":"0.61.5"}
mon.mira094: not running {"version":"0.61.5"}
|
def is_running(conn, args):
"""
Run a command to check the status of a mon, return a boolean.
We heavily depend on the format of the output, if that ever changes
we need to modify this.
Check daemon status for 3 times
output of the status should be similar to::
mon.mira094: running {"version":"0.61.5"}
or when it fails::
mon.mira094: dead {"version":"0.61.5"}
mon.mira094: not running {"version":"0.61.5"}
"""
stdout, stderr, _ = remoto.process.check(
conn,
args
)
result_string = b' '.join(stdout)
for run_check in [b': running', b' start/running']:
if run_check in result_string:
return True
return False
|
Remote validator that accepts a connection object to ensure that a certain
executable is available returning its full path if so.
Otherwise an exception with thorough details will be raised, informing the
user that the executable was not found.
|
def executable_path(conn, executable):
"""
Remote validator that accepts a connection object to ensure that a certain
executable is available returning its full path if so.
Otherwise an exception with thorough details will be raised, informing the
user that the executable was not found.
"""
executable_path = conn.remote_module.which(executable)
if not executable_path:
raise ExecutableNotFound(executable, conn.hostname)
return executable_path
|
This helper should only used as a fallback (last resort) as it is not
guaranteed that it will be absolutely correct.
|
def is_upstart(conn):
"""
This helper should only used as a fallback (last resort) as it is not
guaranteed that it will be absolutely correct.
"""
# it may be possible that we may be systemd and the caller never checked
# before so lets do that
if is_systemd(conn):
return False
# get the initctl executable, if it doesn't exist we can't proceed so we
# are probably not upstart
initctl = conn.remote_module.which('initctl')
if not initctl:
return False
# finally, try and get output from initctl that might hint this is an upstart
# system. On a Ubuntu 14.04.2 system this would look like:
# $ initctl version
# init (upstart 1.12.1)
stdout, stderr, _ = remoto.process.check(
conn,
[initctl, 'version'],
)
result_string = b' '.join(stdout)
if b'upstart' in result_string:
return True
return False
|
Enable a service on a remote host depending on the type of init system.
Obviously, this should be done for RHEL/Fedora/CentOS systems.
This function does not do any kind of detection.
|
def enable_service(conn, service='ceph'):
"""
Enable a service on a remote host depending on the type of init system.
Obviously, this should be done for RHEL/Fedora/CentOS systems.
This function does not do any kind of detection.
"""
if is_systemd(conn):
remoto.process.run(
conn,
[
'systemctl',
'enable',
'{service}'.format(service=service),
]
)
else:
remoto.process.run(
conn,
[
'chkconfig',
'{service}'.format(service=service),
'on',
]
)
|
Disable a service on a remote host depending on the type of init system.
Obviously, this should be done for RHEL/Fedora/CentOS systems.
This function does not do any kind of detection.
|
def disable_service(conn, service='ceph'):
"""
Disable a service on a remote host depending on the type of init system.
Obviously, this should be done for RHEL/Fedora/CentOS systems.
This function does not do any kind of detection.
"""
if is_systemd(conn):
# Without the check, an error is raised trying to disable an
# already disabled service
if is_systemd_service_enabled(conn, service):
remoto.process.run(
conn,
[
'systemctl',
'disable',
'{service}'.format(service=service),
]
)
|
Stop a service on a remote host depending on the type of init system.
Obviously, this should be done for RHEL/Fedora/CentOS systems.
This function does not do any kind of detection.
|
def stop_service(conn, service='ceph'):
"""
Stop a service on a remote host depending on the type of init system.
Obviously, this should be done for RHEL/Fedora/CentOS systems.
This function does not do any kind of detection.
"""
if is_systemd(conn):
# Without the check, an error is raised trying to stop an
# already stopped service
if is_systemd_service_active(conn, service):
remoto.process.run(
conn,
[
'systemctl',
'stop',
'{service}'.format(service=service),
]
)
|
Stop a service on a remote host depending on the type of init system.
Obviously, this should be done for RHEL/Fedora/CentOS systems.
This function does not do any kind of detection.
|
def start_service(conn, service='ceph'):
"""
Stop a service on a remote host depending on the type of init system.
Obviously, this should be done for RHEL/Fedora/CentOS systems.
This function does not do any kind of detection.
"""
if is_systemd(conn):
remoto.process.run(
conn,
[
'systemctl',
'start',
'{service}'.format(service=service),
]
)
|
Detects if a systemd service is enabled or not.
|
def is_systemd_service_enabled(conn, service='ceph'):
"""
Detects if a systemd service is enabled or not.
"""
_, _, returncode = remoto.process.check(
conn,
[
'systemctl',
'is-enabled',
'--quiet',
'{service}'.format(service=service),
]
)
return returncode == 0
|
Repo definition management
|
def make(parser):
"""
Repo definition management
"""
parser.add_argument(
'repo_name',
metavar='REPO-NAME',
help='Name of repo to manage. Can match an entry in cephdeploy.conf'
)
parser.add_argument(
'--repo-url',
help='a repo URL that mirrors/contains Ceph packages'
)
parser.add_argument(
'--gpg-url',
help='a GPG key URL to be used with custom repos'
)
parser.add_argument(
'--remove', '--delete',
action='store_true',
help='remove repo definition on remote host'
)
parser.add_argument(
'host',
metavar='HOST',
nargs='+',
help='host(s) to install on'
)
parser.set_defaults(
func=repo
)
|
Read the configuration file and look for ceph-deploy sections
to set flags/defaults from the values found. This will alter the
``args`` object that is created by argparse.
|
def set_overrides(args, _conf=None):
"""
Read the configuration file and look for ceph-deploy sections
to set flags/defaults from the values found. This will alter the
``args`` object that is created by argparse.
"""
# Get the subcommand name to avoid overwritting values from other
# subcommands that are not going to be used
subcommand = args.func.__name__
command_section = 'ceph-deploy-%s' % subcommand
conf = _conf or load()
for section_name in conf.sections():
if section_name in ['ceph-deploy-global', command_section]:
override_subcommand(
section_name,
conf.items(section_name),
args
)
return args
|
Given a specific section in the configuration file that maps to
a subcommand (except for the global section) read all the keys that are
actual argument flags and slap the values for that one subcommand.
Return the altered ``args`` object at the end.
|
def override_subcommand(section_name, section_items, args):
"""
Given a specific section in the configuration file that maps to
a subcommand (except for the global section) read all the keys that are
actual argument flags and slap the values for that one subcommand.
Return the altered ``args`` object at the end.
"""
# XXX We are not coercing here any int-like values, so if ArgParse
# does that in the CLI we are totally non-compliant with that expectation
# but we will try and infer a few boolean values
# acceptable boolean states for flags
_boolean_states = {'yes': True, 'true': True, 'on': True,
'no': False, 'false': False, 'off': False}
for k, v, in section_items:
# get the lower case value of `v`, fallback to the booleanized
# (original) value of `v`
try:
normalized_value = v.lower()
except AttributeError:
# probably not a string object that has .lower
normalized_value = v
value = _boolean_states.get(normalized_value, v)
setattr(args, k, value)
return args
|
Attempt to get a configuration value from a certain section
in a ``cfg`` object but returning None if not found. Avoids the need
to be doing try/except {ConfigParser Exceptions} every time.
|
def get_safe(self, section, key, default=None):
"""
Attempt to get a configuration value from a certain section
in a ``cfg`` object but returning None if not found. Avoids the need
to be doing try/except {ConfigParser Exceptions} every time.
"""
try:
return self.get(section, key)
except (configparser.NoSectionError, configparser.NoOptionError):
return default
|
boolean to reflect having (or not) any repository sections
|
def has_repos(self):
"""
boolean to reflect having (or not) any repository sections
"""
for section in self.sections():
if section not in self.reserved_sections:
return True
return False
|
Assumes that the value for a given key is going to be a list
separated by commas. It gets rid of trailing comments.
If just one item is present it returns a list with a single item, if no
key is found an empty list is returned.
|
def get_list(self, section, key):
"""
Assumes that the value for a given key is going to be a list
separated by commas. It gets rid of trailing comments.
If just one item is present it returns a list with a single item, if no
key is found an empty list is returned.
"""
value = self.get_safe(section, key, [])
if value == []:
return value
# strip comments
value = re.split(r'\s+#', value)[0]
# split on commas
value = value.split(',')
# strip spaces
return [x.strip() for x in value]
|
Go through all the repositories defined in the config file and search
for a truthy value for the ``default`` key. If there isn't any return
None.
|
def get_default_repo(self):
"""
Go through all the repositories defined in the config file and search
for a truthy value for the ``default`` key. If there isn't any return
None.
"""
for repo in self.get_repos():
if self.get_safe(repo, 'default') and self.getboolean(repo, 'default'):
return repo
return False
|
Make sure that a given host all subnets specified will have at least one IP
in that range.
|
def validate_host_ip(ips, subnets):
"""
Make sure that a given host all subnets specified will have at least one IP
in that range.
"""
# Make sure we prune ``None`` arguments
subnets = [s for s in subnets if s is not None]
validate_one_subnet = len(subnets) == 1
def ip_in_one_subnet(ips, subnet):
""" ensure an ip exists in at least one subnet """
for ip in ips:
if net.ip_in_subnet(ip, subnet):
return True
return False
for subnet in subnets:
if ip_in_one_subnet(ips, subnet):
if validate_one_subnet:
return
else: # keep going to make sure the other subnets are ok
continue
else:
msg = "subnet (%s) is not valid for any of the ips found %s" % (subnet, str(ips))
raise RuntimeError(msg)
|
Given a public subnet, chose the one IP from the remote host that exists
within the subnet range.
|
def get_public_network_ip(ips, public_subnet):
"""
Given a public subnet, chose the one IP from the remote host that exists
within the subnet range.
"""
for ip in ips:
if net.ip_in_subnet(ip, public_subnet):
return ip
msg = "IPs (%s) are not valid for any of subnet specified %s" % (str(ips), str(public_subnet))
raise RuntimeError(msg)
|
Start deploying a new cluster, and write a CLUSTER.conf and keyring for it.
|
def make(parser):
"""
Start deploying a new cluster, and write a CLUSTER.conf and keyring for it.
"""
parser.add_argument(
'mon',
metavar='MON',
nargs='+',
help='initial monitor hostname, fqdn, or hostname:fqdn pair',
type=arg_validators.Hostname(),
)
parser.add_argument(
'--no-ssh-copykey',
dest='ssh_copykey',
action='store_false',
default=True,
help='do not attempt to copy SSH keys',
)
parser.add_argument(
'--fsid',
dest='fsid',
help='provide an alternate FSID for ceph.conf generation',
)
parser.add_argument(
'--cluster-network',
help='specify the (internal) cluster network',
type=arg_validators.Subnet(),
)
parser.add_argument(
'--public-network',
help='specify the public network for a cluster',
type=arg_validators.Subnet(),
)
parser.set_defaults(
func=new,
)
|
Ceph MDS daemon management
|
def make(parser):
"""
Ceph MDS daemon management
"""
mds_parser = parser.add_subparsers(dest='subcommand')
mds_parser.required = True
mds_create = mds_parser.add_parser(
'create',
help='Deploy Ceph MDS on remote host(s)'
)
mds_create.add_argument(
'mds',
metavar='HOST[:NAME]',
nargs='+',
type=colon_separated,
help='host (and optionally the daemon name) to deploy on',
)
parser.set_defaults(
func=mds,
)
|
Select a init system
Returns the name of a init system (upstart, sysvinit ...).
|
def choose_init(module):
"""
Select a init system
Returns the name of a init system (upstart, sysvinit ...).
"""
if module.normalized_release.int_major < 7:
return 'sysvinit'
if not module.conn.remote_module.path_exists("/usr/lib/systemd/system/ceph.target"):
return 'sysvinit'
if is_systemd(module.conn):
return 'systemd'
return 'systemd'
|
EPEL started packaging Ceph so we need to make sure that the ceph.repo we
install has a higher priority than the EPEL repo so that when installing
Ceph it will come from the repo file we create.
The name of the package changed back and forth (!) since CentOS 4:
From the CentOS wiki::
Note: This plugin has carried at least two differing names over time.
It is named yum-priorities on CentOS-5 but was named
yum-plugin-priorities on CentOS-4. CentOS-6 has reverted to
yum-plugin-priorities.
:params _yum: Used for testing, so we can inject a fake yum
|
def install_yum_priorities(distro, _yum=None):
"""
EPEL started packaging Ceph so we need to make sure that the ceph.repo we
install has a higher priority than the EPEL repo so that when installing
Ceph it will come from the repo file we create.
The name of the package changed back and forth (!) since CentOS 4:
From the CentOS wiki::
Note: This plugin has carried at least two differing names over time.
It is named yum-priorities on CentOS-5 but was named
yum-plugin-priorities on CentOS-4. CentOS-6 has reverted to
yum-plugin-priorities.
:params _yum: Used for testing, so we can inject a fake yum
"""
yum = _yum or pkg_managers.yum
package_name = 'yum-plugin-priorities'
if distro.normalized_name == 'centos':
if distro.release[0] != '6':
package_name = 'yum-priorities'
yum(distro.conn, package_name)
|
Very simple decorator that tries any of the exception(s) passed in as
a single exception class or tuple (containing multiple ones) returning the
exception message and optionally handling the problem if it raises with the
handler if it is provided.
So instead of doing something like this::
def bar():
try:
some_call()
print "Success!"
except TypeError, exc:
print "Error while handling some call: %s" % exc
sys.exit(1)
You would need to decorate it like this to have the same effect::
@catches(TypeError)
def bar():
some_call()
print "Success!"
If multiple exceptions need to be caught they need to be provided as a
tuple::
@catches((TypeError, AttributeError))
def bar():
some_call()
print "Success!"
If adding a handler, it should accept a single argument, which would be the
exception that was raised, it would look like::
def my_handler(exc):
print 'Handling exception %s' % str(exc)
raise SystemExit
@catches(KeyboardInterrupt, handler=my_handler)
def bar():
some_call()
Note that the handler needs to raise its SystemExit if it wants to halt
execution, otherwise the decorator would continue as a normal try/except
block.
:param catch: A tuple with one (or more) Exceptions to catch
:param handler: Optional handler to have custom handling of exceptions
:param exit: Raise a ``SystemExit`` after handling exceptions
:param handle_all: Handle all other exceptions via logging.
|
def catches(catch=None, handler=None, exit=True, handle_all=False):
"""
Very simple decorator that tries any of the exception(s) passed in as
a single exception class or tuple (containing multiple ones) returning the
exception message and optionally handling the problem if it raises with the
handler if it is provided.
So instead of doing something like this::
def bar():
try:
some_call()
print "Success!"
except TypeError, exc:
print "Error while handling some call: %s" % exc
sys.exit(1)
You would need to decorate it like this to have the same effect::
@catches(TypeError)
def bar():
some_call()
print "Success!"
If multiple exceptions need to be caught they need to be provided as a
tuple::
@catches((TypeError, AttributeError))
def bar():
some_call()
print "Success!"
If adding a handler, it should accept a single argument, which would be the
exception that was raised, it would look like::
def my_handler(exc):
print 'Handling exception %s' % str(exc)
raise SystemExit
@catches(KeyboardInterrupt, handler=my_handler)
def bar():
some_call()
Note that the handler needs to raise its SystemExit if it wants to halt
execution, otherwise the decorator would continue as a normal try/except
block.
:param catch: A tuple with one (or more) Exceptions to catch
:param handler: Optional handler to have custom handling of exceptions
:param exit: Raise a ``SystemExit`` after handling exceptions
:param handle_all: Handle all other exceptions via logging.
"""
catch = catch or Exception
logger = logging.getLogger('ceph_deploy')
def decorate(f):
@wraps(f)
def newfunc(*a, **kw):
exit_from_catch = False
try:
return f(*a, **kw)
except catch as e:
if handler:
return handler(e)
else:
logger.error(make_exception_message(e))
if exit:
exit_from_catch = True
sys.exit(1)
except Exception: # anything else, no need to save the exception as a variable
if handle_all is False: # re-raise if we are not supposed to handle everything
raise
# Make sure we don't spit double tracebacks if we are raising
# SystemExit from the `except catch` block
if exit_from_catch:
sys.exit(1)
str_failure = traceback.format_exc()
for line in str_failure.split('\n'):
logger.error("%s" % line)
sys.exit(1)
return newfunc
return decorate
|
An exception is passed in and this function
returns the proper string depending on the result
so it is readable enough.
|
def make_exception_message(exc):
"""
An exception is passed in and this function
returns the proper string depending on the result
so it is readable enough.
"""
if str(exc):
return '%s: %s\n' % (exc.__class__.__name__, exc)
else:
return '%s\n' % (exc.__class__.__name__)
|
detect platform information from remote host
|
def platform_information(_linux_distribution=None):
""" detect platform information from remote host """
linux_distribution = _linux_distribution or platform.linux_distribution
distro, release, codename = linux_distribution()
if not distro:
distro, release, codename = parse_os_release()
if not codename and 'debian' in distro.lower(): # this could be an empty string in Debian
debian_codenames = {
'10': 'buster',
'9': 'stretch',
'8': 'jessie',
'7': 'wheezy',
'6': 'squeeze',
}
major_version = release.split('.')[0]
codename = debian_codenames.get(major_version, '')
# In order to support newer jessie/sid or wheezy/sid strings we test this
# if sid is buried in the minor, we should use sid anyway.
if not codename and '/' in release:
major, minor = release.split('/')
if minor == 'sid':
codename = minor
else:
codename = major
if not codename and 'oracle' in distro.lower(): # this could be an empty string in Oracle linux
codename = 'oracle'
if not codename and 'virtuozzo linux' in distro.lower(): # this could be an empty string in Virtuozzo linux
codename = 'virtuozzo'
if not codename and 'arch' in distro.lower(): # this could be an empty string in Arch linux
codename = 'arch'
return (
str(distro).rstrip(),
str(release).rstrip(),
str(codename).rstrip()
)
|
Extract (distro, release, codename) from /etc/os-release if present
|
def parse_os_release(release_path='/etc/os-release'):
""" Extract (distro, release, codename) from /etc/os-release if present """
release_info = {}
if os.path.isfile(release_path):
for line in open(release_path, 'r').readlines():
line = line.strip()
if line.startswith('#'):
continue
parts = line.split('=')
if len(parts) != 2:
continue
release_info[parts[0].strip()] = parts[1].strip("\"'\n\t ")
# In theory, we want ID/NAME, VERSION_ID and VERSION_CODENAME (with a
# possible fallback to VERSION on the latter), based on information at:
# https://www.freedesktop.org/software/systemd/man/os-release.html
# However, after reviewing several distros /etc/os-release, getting
# the codename is a bit of a mess. It's usually in parentheses in
# VERSION, with some exceptions.
distro = release_info.get('ID', '')
release = release_info.get('VERSION_ID', '')
codename = release_info.get('UBUNTU_CODENAME', release_info.get('VERSION', ''))
match = re.match(r'^[^(]+ \(([^)]+)\)', codename)
if match:
codename = match.group(1).lower()
if not codename and release_info.get('NAME', '') == 'openSUSE Tumbleweed':
codename = 'tumbleweed'
return (distro, release, codename)
|
add deb repo to /etc/apt/sources.list.d/
|
def write_sources_list(url, codename, filename='ceph.list', mode=0o644):
"""add deb repo to /etc/apt/sources.list.d/"""
repo_path = os.path.join('/etc/apt/sources.list.d', filename)
content = 'deb {url} {codename} main\n'.format(
url=url,
codename=codename,
)
write_file(repo_path, content.encode('utf-8'), mode)
|
add deb repo to /etc/apt/sources.list.d/ from content
|
def write_sources_list_content(content, filename='ceph.list', mode=0o644):
"""add deb repo to /etc/apt/sources.list.d/ from content"""
repo_path = os.path.join('/etc/apt/sources.list.d', filename)
if not isinstance(content, str):
content = content.decode('utf-8')
write_file(repo_path, content.encode('utf-8'), mode)
|
add yum repo file in /etc/yum.repos.d/
|
def write_yum_repo(content, filename='ceph.repo'):
"""add yum repo file in /etc/yum.repos.d/"""
repo_path = os.path.join('/etc/yum.repos.d', filename)
if not isinstance(content, str):
content = content.decode('utf-8')
write_file(repo_path, content.encode('utf-8'))
|
write cluster configuration to /etc/ceph/{cluster}.conf
|
def write_conf(cluster, conf, overwrite):
""" write cluster configuration to /etc/ceph/{cluster}.conf """
path = '/etc/ceph/{cluster}.conf'.format(cluster=cluster)
tmp_file = tempfile.NamedTemporaryFile('w', dir='/etc/ceph', delete=False)
err_msg = 'config file %s exists with different content; use --overwrite-conf to overwrite' % path
if os.path.exists(path):
with open(path, 'r') as f:
old = f.read()
if old != conf and not overwrite:
raise RuntimeError(err_msg)
tmp_file.write(conf)
tmp_file.close()
shutil.move(tmp_file.name, path)
os.chmod(path, 0o644)
return
if os.path.exists('/etc/ceph'):
with open(path, 'w') as f:
f.write(conf)
os.chmod(path, 0o644)
else:
err_msg = '/etc/ceph/ does not exist - could not write config'
raise RuntimeError(err_msg)
|
create a keyring file
|
def write_keyring(path, key, uid=-1, gid=-1):
""" create a keyring file """
# Note that we *require* to avoid deletion of the temp file
# otherwise we risk not being able to copy the contents from
# one file system to the other, hence the `delete=False`
tmp_file = tempfile.NamedTemporaryFile('wb', delete=False)
tmp_file.write(key)
tmp_file.close()
keyring_dir = os.path.dirname(path)
if not path_exists(keyring_dir):
makedir(keyring_dir, uid, gid)
shutil.move(tmp_file.name, path)
|
create the mon path if it does not exist
|
def create_mon_path(path, uid=-1, gid=-1):
"""create the mon path if it does not exist"""
if not os.path.exists(path):
os.makedirs(path)
os.chown(path, uid, gid);
|
create a done file to avoid re-doing the mon deployment
|
def create_done_path(done_path, uid=-1, gid=-1):
"""create a done file to avoid re-doing the mon deployment"""
with open(done_path, 'wb'):
pass
os.chown(done_path, uid, gid);
|
create the init path if it does not exist
|
def create_init_path(init_path, uid=-1, gid=-1):
"""create the init path if it does not exist"""
if not os.path.exists(init_path):
with open(init_path, 'wb'):
pass
os.chown(init_path, uid, gid);
|
create the monitor keyring file
|
def write_monitor_keyring(keyring, monitor_keyring, uid=-1, gid=-1):
"""create the monitor keyring file"""
write_file(keyring, monitor_keyring, 0o600, None, uid, gid)
|
find the location of an executable
|
def which(executable):
"""find the location of an executable"""
locations = (
'/usr/local/bin',
'/bin',
'/usr/bin',
'/usr/local/sbin',
'/usr/sbin',
'/sbin',
)
for location in locations:
executable_path = os.path.join(location, executable)
if os.path.exists(executable_path) and os.path.isfile(executable_path):
return executable_path
|
move old monitor data
|
def make_mon_removed_dir(path, file_name):
""" move old monitor data """
try:
os.makedirs('/var/lib/ceph/mon-removed')
except OSError as e:
if e.errno != errno.EEXIST:
raise
shutil.move(path, os.path.join('/var/lib/ceph/mon-removed/', file_name))
|
create path if it doesn't exist
|
def safe_mkdir(path, uid=-1, gid=-1):
""" create path if it doesn't exist """
try:
os.mkdir(path)
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
raise
else:
os.chown(path, uid, gid)
|
create path recursively if it doesn't exist
|
def safe_makedirs(path, uid=-1, gid=-1):
""" create path recursively if it doesn't exist """
try:
os.makedirs(path)
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
raise
else:
os.chown(path, uid, gid)
|
zeroing last few blocks of device
|
def zeroing(dev):
""" zeroing last few blocks of device """
# this kills the crab
#
# sgdisk will wipe out the main copy of the GPT partition
# table (sorry), but it doesn't remove the backup copies, and
# subsequent commands will continue to complain and fail when
# they see those. zeroing the last few blocks of the device
# appears to do the trick.
lba_size = 4096
size = 33 * lba_size
return True
with open(dev, 'wb') as f:
f.seek(-size, os.SEEK_END)
f.write(size*b'\0')
|
Configure Yum priorities to include obsoletes
|
def enable_yum_priority_obsoletes(path="/etc/yum/pluginconf.d/priorities.conf"):
"""Configure Yum priorities to include obsoletes"""
config = configparser.ConfigParser()
config.read(path)
config.set('main', 'check_obsoletes', '1')
with open(path, 'w') as fout:
config.write(fout)
|
Copy ceph.conf to/from remote host(s)
|
def make(parser):
"""
Copy ceph.conf to/from remote host(s)
"""
config_parser = parser.add_subparsers(dest='subcommand')
config_parser.required = True
config_push = config_parser.add_parser(
'push',
help='push Ceph config file to one or more remote hosts'
)
config_push.add_argument(
'client',
metavar='HOST',
nargs='+',
help='host(s) to push the config file to',
)
config_pull = config_parser.add_parser(
'pull',
help='pull Ceph config file from one or more remote hosts'
)
config_pull.add_argument(
'client',
metavar='HOST',
nargs='+',
help='host(s) to pull the config file from',
)
parser.set_defaults(
func=config,
)
|
:param args: Will be used to infer the proper configuration name, or
if args.ceph_conf is passed in, that will take precedence
|
def load(args):
"""
:param args: Will be used to infer the proper configuration name, or
if args.ceph_conf is passed in, that will take precedence
"""
path = args.ceph_conf or '{cluster}.conf'.format(cluster=args.cluster)
try:
f = open(path)
except IOError as e:
raise exc.ConfigError(
"%s; has `ceph-deploy new` been run in this directory?" % e
)
else:
with contextlib.closing(f):
return parse(f)
|
Read the actual file *as is* without parsing/modifiying it
so that it can be written maintaining its same properties.
:param args: Will be used to infer the proper configuration name
:paran path: alternatively, use a path for any configuration file loading
|
def load_raw(args):
"""
Read the actual file *as is* without parsing/modifiying it
so that it can be written maintaining its same properties.
:param args: Will be used to infer the proper configuration name
:paran path: alternatively, use a path for any configuration file loading
"""
path = args.ceph_conf or '{cluster}.conf'.format(cluster=args.cluster)
try:
with open(path) as ceph_conf:
return ceph_conf.read()
except (IOError, OSError) as e:
raise exc.ConfigError(
"%s; has `ceph-deploy new` been run in this directory?" % e
)
|
write cluster configuration to /etc/ceph/{cluster}.conf
|
def write_conf(cluster, conf, overwrite):
""" write cluster configuration to /etc/ceph/{cluster}.conf """
import os
path = '/etc/ceph/{cluster}.conf'.format(cluster=cluster)
tmp = '{path}.{pid}.tmp'.format(path=path, pid=os.getpid())
if os.path.exists(path):
with open(path) as f:
old = f.read()
if old != conf and not overwrite:
raise RuntimeError('config file %s exists with different content; use --overwrite-conf to overwrite' % path)
with open(tmp, 'w') as f:
f.write(conf)
f.flush()
os.fsync(f)
os.rename(tmp, path)
|
Attempt to get a configuration value from a certain section
in a ``cfg`` object but returning None if not found. Avoids the need
to be doing try/except {ConfigParser Exceptions} every time.
|
def safe_get(self, section, key):
"""
Attempt to get a configuration value from a certain section
in a ``cfg`` object but returning None if not found. Avoids the need
to be doing try/except {ConfigParser Exceptions} every time.
"""
try:
#Use full parent function so we can replace it in the class
# if desired
return configparser.RawConfigParser.get(self, section, key)
except (configparser.NoSectionError,
configparser.NoOptionError):
return None
|
Repo files need special care in that a whole line should not be present
if there is no value for it. Because we were using `format()` we could
not conditionally add a line for a repo file. So the end result would
contain a key with a missing value (say if we were passing `None`).
For example, it could look like::
[ceph repo]
name= ceph repo
proxy=
gpgcheck=
Which breaks. This function allows us to conditionally add lines,
preserving an order and be more careful.
Previously, and for historical purposes, this is how the template used
to look::
custom_repo =
[{repo_name}]
name={name}
baseurl={baseurl}
enabled={enabled}
gpgcheck={gpgcheck}
type={_type}
gpgkey={gpgkey}
proxy={proxy}
|
def custom_repo(**kw):
"""
Repo files need special care in that a whole line should not be present
if there is no value for it. Because we were using `format()` we could
not conditionally add a line for a repo file. So the end result would
contain a key with a missing value (say if we were passing `None`).
For example, it could look like::
[ceph repo]
name= ceph repo
proxy=
gpgcheck=
Which breaks. This function allows us to conditionally add lines,
preserving an order and be more careful.
Previously, and for historical purposes, this is how the template used
to look::
custom_repo =
[{repo_name}]
name={name}
baseurl={baseurl}
enabled={enabled}
gpgcheck={gpgcheck}
type={_type}
gpgkey={gpgkey}
proxy={proxy}
"""
lines = []
# by using tuples (vs a dict) we preserve the order of what we want to
# return, like starting with a [repo name]
tmpl = (
('reponame', '[%s]'),
('name', 'name=%s'),
('baseurl', 'baseurl=%s'),
('enabled', 'enabled=%s'),
('gpgcheck', 'gpgcheck=%s'),
('_type', 'type=%s'),
('gpgkey', 'gpgkey=%s'),
('proxy', 'proxy=%s'),
('priority', 'priority=%s'),
)
for line in tmpl:
tmpl_key, tmpl_value = line # key values from tmpl
# ensure that there is an actual value (not None nor empty string)
if tmpl_key in kw and kw.get(tmpl_key) not in (None, ''):
lines.append(tmpl_value % kw.get(tmpl_key))
return '\n'.join(lines)
|
Ensure that vendored code/dirs are removed, possibly when packaging when
the environment flag is set to avoid vendoring.
|
def clean_vendor(name):
"""
Ensure that vendored code/dirs are removed, possibly when packaging when
the environment flag is set to avoid vendoring.
"""
this_dir = path.dirname(path.abspath(__file__))
vendor_dest = path.join(this_dir, 'ceph_deploy/lib/vendor/%s' % name)
run(['rm', '-rf', vendor_dest])
|
This is the main entry point for vendorizing requirements. It expects
a list of tuples that should contain the name of the library and the
version.
For example, a library ``foo`` with version ``0.0.1`` would look like::
vendor_requirements = [
('foo', '0.0.1'),
]
|
def vendorize(vendor_requirements):
"""
This is the main entry point for vendorizing requirements. It expects
a list of tuples that should contain the name of the library and the
version.
For example, a library ``foo`` with version ``0.0.1`` would look like::
vendor_requirements = [
('foo', '0.0.1'),
]
"""
for library in vendor_requirements:
if len(library) == 2:
name, version = library
cmd = None
elif len(library) == 3: # a possible cmd we need to run
name, version, cmd = library
vendor_library(name, version, cmd)
|
Check two keyrings are identical
|
def _keyring_equivalent(keyring_one, keyring_two):
"""
Check two keyrings are identical
"""
def keyring_extract_key(file_path):
"""
Cephx keyring files may or may not have white space before some lines.
They may have some values in quotes, so a safe way to compare is to
extract the key.
"""
with open(file_path) as f:
for line in f:
content = line.strip()
if len(content) == 0:
continue
split_line = content.split('=')
if split_line[0].strip() == 'key':
return "=".join(split_line[1:]).strip()
raise RuntimeError("File '%s' is not a keyring" % file_path)
key_one = keyring_extract_key(keyring_one)
key_two = keyring_extract_key(keyring_two)
return key_one == key_two
|
Get the local filename for a keyring type
|
def keytype_path_to(args, keytype):
"""
Get the local filename for a keyring type
"""
if keytype == "admin":
return '{cluster}.client.admin.keyring'.format(
cluster=args.cluster)
if keytype == "mon":
return '{cluster}.mon.keyring'.format(
cluster=args.cluster)
return '{cluster}.bootstrap-{what}.keyring'.format(
cluster=args.cluster,
what=keytype)
|
Get or create the keyring from the mon using the mon keyring by keytype and
copy to dest_dir
|
def gatherkeys_missing(args, distro, rlogger, keypath, keytype, dest_dir):
"""
Get or create the keyring from the mon using the mon keyring by keytype and
copy to dest_dir
"""
args_prefix = [
'/usr/bin/ceph',
'--connect-timeout=25',
'--cluster={cluster}'.format(
cluster=args.cluster),
'--name', 'mon.',
'--keyring={keypath}'.format(
keypath=keypath),
]
identity = keytype_identity(keytype)
if identity is None:
raise RuntimeError('Could not find identity for keytype:%s' % keytype)
capabilites = keytype_capabilities(keytype)
if capabilites is None:
raise RuntimeError('Could not find capabilites for keytype:%s' % keytype)
# First try getting the key if it already exists, to handle the case where
# it exists but doesn't match the caps we would pass into get-or-create.
# This is the same behvaior as in newer ceph-create-keys
out, err, code = remoto.process.check(
distro.conn,
args_prefix + ['auth', 'get', identity]
)
if code == errno.ENOENT:
out, err, code = remoto.process.check(
distro.conn,
args_prefix + ['auth', 'get-or-create', identity] + capabilites
)
if code != 0:
rlogger.error(
'"ceph auth get-or-create for keytype %s returned %s',
keytype, code
)
for line in err:
rlogger.debug(line)
return False
keyring_name_local = keytype_path_to(args, keytype)
keyring_path_local = os.path.join(dest_dir, keyring_name_local)
with open(keyring_path_local, 'wb') as f:
for line in out:
f.write(line + b'\n')
return True
|
Connect to mon and gather keys if mon is in quorum.
|
def gatherkeys_with_mon(args, host, dest_dir):
"""
Connect to mon and gather keys if mon is in quorum.
"""
distro = hosts.get(host, username=args.username)
remote_hostname = distro.conn.remote_module.shortname()
dir_keytype_mon = ceph_deploy.util.paths.mon.path(args.cluster, remote_hostname)
path_keytype_mon = "%s/keyring" % (dir_keytype_mon)
mon_key = distro.conn.remote_module.get_file(path_keytype_mon)
if mon_key is None:
LOG.warning("No mon key found in host: %s", host)
return False
mon_name_local = keytype_path_to(args, "mon")
mon_path_local = os.path.join(dest_dir, mon_name_local)
with open(mon_path_local, 'wb') as f:
f.write(mon_key)
rlogger = logging.getLogger(host)
path_asok = ceph_deploy.util.paths.mon.asok(args.cluster, remote_hostname)
out, err, code = remoto.process.check(
distro.conn,
[
"/usr/bin/ceph",
"--connect-timeout=25",
"--cluster={cluster}".format(
cluster=args.cluster),
"--admin-daemon={asok}".format(
asok=path_asok),
"mon_status"
]
)
if code != 0:
rlogger.error('"ceph mon_status %s" returned %s', host, code)
for line in err:
rlogger.debug(line)
return False
try:
mon_status = json.loads(b''.join(out).decode('utf-8'))
except ValueError:
rlogger.error('"ceph mon_status %s" output was not json', host)
for line in out:
rlogger.error(line)
return False
mon_number = None
mon_map = mon_status.get('monmap')
if mon_map is None:
rlogger.error("could not find mon map for mons on '%s'", host)
return False
mon_quorum = mon_status.get('quorum')
if mon_quorum is None:
rlogger.error("could not find quorum for mons on '%s'" , host)
return False
mon_map_mons = mon_map.get('mons')
if mon_map_mons is None:
rlogger.error("could not find mons in monmap on '%s'", host)
return False
for mon in mon_map_mons:
if mon.get('name') == remote_hostname:
mon_number = mon.get('rank')
break
if mon_number is None:
rlogger.error("could not find '%s' in monmap", remote_hostname)
return False
if not mon_number in mon_quorum:
rlogger.error("Not yet quorum for '%s'", host)
return False
for keytype in ["admin", "mds", "mgr", "osd", "rgw"]:
if not gatherkeys_missing(args, distro, rlogger, path_keytype_mon, keytype, dest_dir):
# We will return failure if we fail to gather any key
rlogger.error("Failed to return '%s' key from host %s", keytype, host)
return False
return True
|
Gather keys from any mon and store in current working directory.
Backs up keys from previous installs and stores new keys.
|
def gatherkeys(args):
"""
Gather keys from any mon and store in current working directory.
Backs up keys from previous installs and stores new keys.
"""
oldmask = os.umask(0o77)
try:
try:
tmpd = tempfile.mkdtemp()
LOG.info("Storing keys in temp directory %s", tmpd)
sucess = False
for host in args.mon:
sucess = gatherkeys_with_mon(args, host, tmpd)
if sucess:
break
if not sucess:
LOG.error("Failed to connect to host:%s" ,', '.join(args.mon))
raise RuntimeError('Failed to connect any mon')
had_error = False
date_string = time.strftime("%Y%m%d%H%M%S")
for keytype in ["admin", "mds", "mgr", "mon", "osd", "rgw"]:
filename = keytype_path_to(args, keytype)
tmp_path = os.path.join(tmpd, filename)
if not os.path.exists(tmp_path):
LOG.error("No key retrived for '%s'" , keytype)
had_error = True
continue
if not os.path.exists(filename):
LOG.info("Storing %s" % (filename))
shutil.move(tmp_path, filename)
continue
if _keyring_equivalent(tmp_path, filename):
LOG.info("keyring '%s' already exists" , filename)
continue
backup_keyring = "%s-%s" % (filename, date_string)
LOG.info("Replacing '%s' and backing up old key as '%s'", filename, backup_keyring)
shutil.copy(filename, backup_keyring)
shutil.move(tmp_path, filename)
if had_error:
raise RuntimeError('Failed to get all key types')
finally:
LOG.info("Destroy temp directory %s" %(tmpd))
shutil.rmtree(tmpd)
finally:
os.umask(oldmask)
|
Gather authentication keys for provisioning new nodes.
|
def make(parser):
"""
Gather authentication keys for provisioning new nodes.
"""
parser.add_argument(
'mon',
metavar='HOST',
nargs='+',
help='monitor host to pull keys from',
)
parser.set_defaults(
func=gatherkeys,
)
|
Retrieve the module that matches the distribution of a ``hostname``. This
function will connect to that host and retrieve the distribution
information, then return the appropriate module and slap a few attributes
to that module defining the information it found from the hostname.
For example, if host ``node1.example.com`` is an Ubuntu server, the
``debian`` module would be returned and the following would be set::
module.name = 'ubuntu'
module.release = '12.04'
module.codename = 'precise'
:param hostname: A hostname that is reachable/resolvable over the network
:param fallback: Optional fallback to use if no supported distro is found
:param use_rhceph: Whether or not to install RH Ceph on a RHEL machine or
the community distro. Changes what host module is
returned for RHEL.
:params callbacks: A list of callables that accept one argument (the actual
module that contains the connection) that will be
called, in order at the end of the instantiation of the
module.
|
def get(hostname,
username=None,
fallback=None,
detect_sudo=True,
use_rhceph=False,
callbacks=None):
"""
Retrieve the module that matches the distribution of a ``hostname``. This
function will connect to that host and retrieve the distribution
information, then return the appropriate module and slap a few attributes
to that module defining the information it found from the hostname.
For example, if host ``node1.example.com`` is an Ubuntu server, the
``debian`` module would be returned and the following would be set::
module.name = 'ubuntu'
module.release = '12.04'
module.codename = 'precise'
:param hostname: A hostname that is reachable/resolvable over the network
:param fallback: Optional fallback to use if no supported distro is found
:param use_rhceph: Whether or not to install RH Ceph on a RHEL machine or
the community distro. Changes what host module is
returned for RHEL.
:params callbacks: A list of callables that accept one argument (the actual
module that contains the connection) that will be
called, in order at the end of the instantiation of the
module.
"""
conn = get_connection(
hostname,
username=username,
logger=logging.getLogger(hostname),
detect_sudo=detect_sudo
)
try:
conn.import_module(remotes)
except IOError as error:
if 'already closed' in getattr(error, 'message', ''):
raise RuntimeError('remote connection got closed, ensure ``requiretty`` is disabled for %s' % hostname)
distro_name, release, codename = conn.remote_module.platform_information()
if not codename or not _get_distro(distro_name):
raise exc.UnsupportedPlatform(
distro=distro_name,
codename=codename,
release=release)
machine_type = conn.remote_module.machine_type()
module = _get_distro(distro_name, use_rhceph=use_rhceph)
module.name = distro_name
module.normalized_name = _normalized_distro_name(distro_name)
module.normalized_release = _normalized_release(release)
module.distro = module.normalized_name
module.is_el = module.normalized_name in ['redhat', 'centos', 'fedora', 'scientific', 'oracle', 'virtuozzo']
module.is_rpm = module.normalized_name in ['redhat', 'centos',
'fedora', 'scientific', 'suse', 'oracle', 'virtuozzo', 'alt']
module.is_deb = module.normalized_name in ['debian', 'ubuntu']
module.is_pkgtarxz = module.normalized_name in ['arch']
module.release = release
module.codename = codename
module.conn = conn
module.machine_type = machine_type
module.init = module.choose_init(module)
module.packager = module.get_packager(module)
# execute each callback if any
if callbacks:
for c in callbacks:
c(module)
return module
|
A very simple helper, meant to return a connection
that will know about the need to use sudo.
|
def get_connection(hostname, username, logger, threads=5, use_sudo=None, detect_sudo=True):
"""
A very simple helper, meant to return a connection
that will know about the need to use sudo.
"""
if username:
hostname = "%s@%s" % (username, hostname)
try:
conn = remoto.Connection(
hostname,
logger=logger,
threads=threads,
detect_sudo=detect_sudo,
)
# Set a timeout value in seconds to disconnect and move on
# if no data is sent back.
conn.global_timeout = 300
logger.debug("connected to host: %s " % hostname)
return conn
except Exception as error:
msg = "connecting to host: %s " % hostname
errors = "resulted in errors: %s %s" % (error.__class__.__name__, error)
raise RuntimeError(msg + errors)
|
Helper for local connections that are sometimes needed to operate
on local hosts
|
def get_local_connection(logger, use_sudo=False):
"""
Helper for local connections that are sometimes needed to operate
on local hosts
"""
return get_connection(
socket.gethostname(), # cannot rely on 'localhost' here
None,
logger=logger,
threads=1,
use_sudo=use_sudo,
detect_sudo=False
)
|
Ceph MGR daemon management
|
def make(parser):
"""
Ceph MGR daemon management
"""
mgr_parser = parser.add_subparsers(dest='subcommand')
mgr_parser.required = True
mgr_create = mgr_parser.add_parser(
'create',
help='Deploy Ceph MGR on remote host(s)'
)
mgr_create.add_argument(
'mgr',
metavar='HOST[:NAME]',
nargs='+',
type=colon_separated,
help='host (and optionally the daemon name) to deploy on',
)
parser.set_defaults(
func=mgr,
)
|
Manage packages on remote hosts.
|
def make(parser):
"""
Manage packages on remote hosts.
"""
action = parser.add_mutually_exclusive_group()
action.add_argument(
'--install',
metavar='PKG(s)',
help='Comma-separated package(s) to install',
)
action.add_argument(
'--remove',
metavar='PKG(s)',
help='Comma-separated package(s) to remove',
)
parser.add_argument(
'hosts',
nargs='+',
)
parser.set_defaults(
func=pkg,
)
|
Read the bootstrap-osd key for `cluster`.
|
def get_bootstrap_osd_key(cluster):
"""
Read the bootstrap-osd key for `cluster`.
"""
path = '{cluster}.bootstrap-osd.keyring'.format(cluster=cluster)
try:
with open(path, 'rb') as f:
return f.read()
except IOError:
raise RuntimeError('bootstrap-osd keyring not found; run \'gatherkeys\'')
|
Run on osd node, writes the bootstrap key if not there yet.
|
def create_osd_keyring(conn, cluster, key):
"""
Run on osd node, writes the bootstrap key if not there yet.
"""
logger = conn.logger
path = '/var/lib/ceph/bootstrap-osd/{cluster}.keyring'.format(
cluster=cluster,
)
if not conn.remote_module.path_exists(path):
logger.warning('osd keyring does not exist yet, creating one')
conn.remote_module.write_keyring(path, key)
|
Check the status of an OSD. Make sure all are up and in
What good output would look like::
{
"epoch": 8,
"num_osds": 1,
"num_up_osds": 1,
"num_in_osds": "1",
"full": "false",
"nearfull": "false"
}
Note how the booleans are actually strings, so we need to take that into
account and fix it before returning the dictionary. Issue #8108
|
def osd_tree(conn, cluster):
"""
Check the status of an OSD. Make sure all are up and in
What good output would look like::
{
"epoch": 8,
"num_osds": 1,
"num_up_osds": 1,
"num_in_osds": "1",
"full": "false",
"nearfull": "false"
}
Note how the booleans are actually strings, so we need to take that into
account and fix it before returning the dictionary. Issue #8108
"""
ceph_executable = system.executable_path(conn, 'ceph')
command = [
ceph_executable,
'--cluster={cluster}'.format(cluster=cluster),
'osd',
'tree',
'--format=json',
]
out, err, code = remoto.process.check(
conn,
command,
)
try:
loaded_json = json.loads(b''.join(out).decode('utf-8'))
# convert boolean strings to actual booleans because
# --format=json fails to do this properly
for k, v in loaded_json.items():
if v == 'true':
loaded_json[k] = True
elif v == 'false':
loaded_json[k] = False
return loaded_json
except ValueError:
return {}
|
Look for possible issues when checking the status of an OSD and
report them back to the user.
|
def catch_osd_errors(conn, logger, args):
"""
Look for possible issues when checking the status of an OSD and
report them back to the user.
"""
logger.info('checking OSD status...')
status = osd_status_check(conn, args.cluster)
osds = int(status.get('num_osds', 0))
up_osds = int(status.get('num_up_osds', 0))
in_osds = int(status.get('num_in_osds', 0))
full = status.get('full', False)
nearfull = status.get('nearfull', False)
if osds > up_osds:
difference = osds - up_osds
logger.warning('there %s %d OSD%s down' % (
['is', 'are'][difference != 1],
difference,
"s"[difference == 1:])
)
if osds > in_osds:
difference = osds - in_osds
logger.warning('there %s %d OSD%s out' % (
['is', 'are'][difference != 1],
difference,
"s"[difference == 1:])
)
if full:
logger.warning('OSDs are full!')
if nearfull:
logger.warning('OSDs are near full!')
|
Run on osd node, creates an OSD from a data disk.
|
def create_osd(
conn,
cluster,
data,
journal,
zap,
fs_type,
dmcrypt,
dmcrypt_dir,
storetype,
block_wal,
block_db,
**kw):
"""
Run on osd node, creates an OSD from a data disk.
"""
ceph_volume_executable = system.executable_path(conn, 'ceph-volume')
args = [
ceph_volume_executable,
'--cluster', cluster,
'lvm',
'create',
'--%s' % storetype,
'--data', data
]
if zap:
LOG.warning('zapping is no longer supported when preparing')
if dmcrypt:
args.append('--dmcrypt')
# TODO: re-enable dmcrypt support once ceph-volume grows it
LOG.warning('dmcrypt is currently not supported')
if storetype == 'bluestore':
if block_wal:
args.append('--block.wal')
args.append(block_wal)
if block_db:
args.append('--block.db')
args.append(block_db)
elif storetype == 'filestore':
if not journal:
raise RuntimeError('A journal lv or GPT partition must be specified when using filestore')
args.append('--journal')
args.append(journal)
if kw.get('debug'):
remoto.process.run(
conn,
args,
extend_env={'CEPH_VOLUME_DEBUG': '1'}
)
else:
remoto.process.run(
conn,
args
)
|
Prepare a data disk on remote host.
|
def make(parser):
"""
Prepare a data disk on remote host.
"""
sub_command_help = dedent("""
Create OSDs from a data disk on a remote host:
ceph-deploy osd create {node} --data /path/to/device
For bluestore, optional devices can be used::
ceph-deploy osd create {node} --data /path/to/data --block-db /path/to/db-device
ceph-deploy osd create {node} --data /path/to/data --block-wal /path/to/wal-device
ceph-deploy osd create {node} --data /path/to/data --block-db /path/to/db-device --block-wal /path/to/wal-device
For filestore, the journal must be specified, as well as the objectstore::
ceph-deploy osd create {node} --filestore --data /path/to/data --journal /path/to/journal
For data devices, it can be an existing logical volume in the format of:
vg/lv, or a device. For other OSD components like wal, db, and journal, it
can be logical volume (in vg/lv format) or it must be a GPT partition.
"""
)
parser.formatter_class = argparse.RawDescriptionHelpFormatter
parser.description = sub_command_help
osd_parser = parser.add_subparsers(dest='subcommand')
osd_parser.required = True
osd_list = osd_parser.add_parser(
'list',
help='List OSD info from remote host(s)'
)
osd_list.add_argument(
'host',
nargs='+',
metavar='HOST',
help='remote host(s) to list OSDs from'
)
osd_list.add_argument(
'--debug',
action='store_true',
help='Enable debug mode on remote ceph-volume calls',
)
osd_create = osd_parser.add_parser(
'create',
help='Create new Ceph OSD daemon by preparing and activating a device'
)
osd_create.add_argument(
'--data',
metavar='DATA',
help='The OSD data logical volume (vg/lv) or absolute path to device'
)
osd_create.add_argument(
'--journal',
help='Logical Volume (vg/lv) or path to GPT partition',
)
osd_create.add_argument(
'--zap-disk',
action='store_true',
help='DEPRECATED - cannot zap when creating an OSD'
)
osd_create.add_argument(
'--fs-type',
metavar='FS_TYPE',
choices=['xfs',
'btrfs'
],
default='xfs',
help='filesystem to use to format DEVICE (xfs, btrfs)',
)
osd_create.add_argument(
'--dmcrypt',
action='store_true',
help='use dm-crypt on DEVICE',
)
osd_create.add_argument(
'--dmcrypt-key-dir',
metavar='KEYDIR',
default='/etc/ceph/dmcrypt-keys',
help='directory where dm-crypt keys are stored',
)
osd_create.add_argument(
'--filestore',
action='store_true', default=None,
help='filestore objectstore',
)
osd_create.add_argument(
'--bluestore',
action='store_true', default=None,
help='bluestore objectstore',
)
osd_create.add_argument(
'--block-db',
default=None,
help='bluestore block.db path'
)
osd_create.add_argument(
'--block-wal',
default=None,
help='bluestore block.wal path'
)
osd_create.add_argument(
'host',
nargs='?',
metavar='HOST',
help='Remote host to connect'
)
osd_create.add_argument(
'--debug',
action='store_true',
help='Enable debug mode on remote ceph-volume calls',
)
parser.set_defaults(
func=osd,
)
|
Manage disks on a remote host.
|
def make_disk(parser):
"""
Manage disks on a remote host.
"""
disk_parser = parser.add_subparsers(dest='subcommand')
disk_parser.required = True
disk_zap = disk_parser.add_parser(
'zap',
help='destroy existing data and filesystem on LV or partition',
)
disk_zap.add_argument(
'host',
nargs='?',
metavar='HOST',
help='Remote HOST(s) to connect'
)
disk_zap.add_argument(
'disk',
nargs='+',
metavar='DISK',
help='Disk(s) to zap'
)
disk_zap.add_argument(
'--debug',
action='store_true',
help='Enable debug mode on remote ceph-volume calls',
)
disk_list = disk_parser.add_parser(
'list',
help='List disk info from remote host(s)'
)
disk_list.add_argument(
'host',
nargs='+',
metavar='HOST',
help='Remote HOST(s) to list OSDs from'
)
disk_list.add_argument(
'--debug',
action='store_true',
help='Enable debug mode on remote ceph-volume calls',
)
parser.set_defaults(
func=disk,
)
|
Historically everything CentOS, RHEL, and Scientific has been mapped to
`el6` urls, but as we are adding repositories for `rhel`, the URLs should
map correctly to, say, `rhel6` or `rhel7`.
This function looks into the `distro` object and determines the right url
part for the given distro, falling back to `el6` when all else fails.
Specifically to work around the issue of CentOS vs RHEL::
>>> import platform
>>> platform.linux_distribution()
('Red Hat Enterprise Linux Server', '7.0', 'Maipo')
|
def repository_url_part(distro):
"""
Historically everything CentOS, RHEL, and Scientific has been mapped to
`el6` urls, but as we are adding repositories for `rhel`, the URLs should
map correctly to, say, `rhel6` or `rhel7`.
This function looks into the `distro` object and determines the right url
part for the given distro, falling back to `el6` when all else fails.
Specifically to work around the issue of CentOS vs RHEL::
>>> import platform
>>> platform.linux_distribution()
('Red Hat Enterprise Linux Server', '7.0', 'Maipo')
"""
if distro.normalized_release.int_major >= 6:
if distro.normalized_name == 'redhat':
return 'rhel' + distro.normalized_release.major
if distro.normalized_name in ['centos', 'scientific', 'oracle', 'virtuozzo']:
return 'el' + distro.normalized_release.major
return 'el6'
|
args may need a bunch of logic to set proper defaults that argparse is
not well suited for.
|
def sanitize_args(args):
"""
args may need a bunch of logic to set proper defaults that argparse is
not well suited for.
"""
if args.release is None:
args.release = 'nautilus'
args.default_release = True
# XXX This whole dance is because --stable is getting deprecated
if args.stable is not None:
LOG.warning('the --stable flag is deprecated, use --release instead')
args.release = args.stable
# XXX Tango ends here.
return args
|
Since the package split, now there are various different Ceph components to
install like:
* ceph
* ceph-mon
* ceph-mgr
* ceph-osd
* ceph-mds
This helper function should parse the args that may contain specifics about
these flags and return the default if none are passed in (which is, install
everything)
|
def detect_components(args, distro):
"""
Since the package split, now there are various different Ceph components to
install like:
* ceph
* ceph-mon
* ceph-mgr
* ceph-osd
* ceph-mds
This helper function should parse the args that may contain specifics about
these flags and return the default if none are passed in (which is, install
everything)
"""
# the flag that prevents all logic here is the `--repo` flag which is used
# when no packages should be installed, just the repo files, so check for
# that here and return an empty list (which is equivalent to say 'no
# packages should be installed')
if args.repo:
return []
flags = {
'install_osd': 'ceph-osd',
'install_rgw': 'ceph-radosgw',
'install_mds': 'ceph-mds',
'install_mon': 'ceph-mon',
'install_mgr': 'ceph-mgr',
'install_common': 'ceph-common',
'install_tests': 'ceph-test',
}
if distro.is_rpm:
defaults = default_components.rpm
elif distro.is_pkgtarxz:
# archlinux doesn't have components!
flags = {
'install_osd': 'ceph',
'install_rgw': 'ceph',
'install_mds': 'ceph',
'install_mon': 'ceph',
'install_mgr': 'ceph',
'install_common': 'ceph',
'install_tests': 'ceph',
}
defaults = default_components.pkgtarxz
else:
defaults = default_components.deb
# different naming convention for deb than rpm for radosgw
flags['install_rgw'] = 'radosgw'
if args.install_all:
return defaults
else:
components = []
for k, v in flags.items():
if getattr(args, k, False):
components.append(v)
# if we have some components selected from flags then return that,
# otherwise return defaults because no flags and no `--repo` means we
# should get all of them by default
return components or defaults
|
A boolean to determine the logic needed to proceed with a custom repo
installation instead of cramming everything nect to the logic operator.
|
def should_use_custom_repo(args, cd_conf, repo_url):
"""
A boolean to determine the logic needed to proceed with a custom repo
installation instead of cramming everything nect to the logic operator.
"""
if repo_url:
# repo_url signals a CLI override, return False immediately
return False
if cd_conf:
if cd_conf.has_repos:
has_valid_release = args.release in cd_conf.get_repos()
has_default_repo = cd_conf.get_default_repo()
if has_valid_release or has_default_repo:
return True
return False
|
A custom repo install helper that will go through config checks to retrieve
repos (and any extra repos defined) and install those
``cd_conf`` is the object built from argparse that holds the flags and
information needed to determine what metadata from the configuration to be
used.
|
def custom_repo(distro, args, cd_conf, rlogger, install_ceph=None):
"""
A custom repo install helper that will go through config checks to retrieve
repos (and any extra repos defined) and install those
``cd_conf`` is the object built from argparse that holds the flags and
information needed to determine what metadata from the configuration to be
used.
"""
default_repo = cd_conf.get_default_repo()
components = detect_components(args, distro)
if args.release in cd_conf.get_repos():
LOG.info('will use repository from conf: %s' % args.release)
default_repo = args.release
elif default_repo:
LOG.info('will use default repository: %s' % default_repo)
# At this point we know there is a cd_conf and that it has custom
# repos make sure we were able to detect and actual repo
if not default_repo:
LOG.warning('a ceph-deploy config was found with repos \
but could not default to one')
else:
options = dict(cd_conf.items(default_repo))
options['install_ceph'] = False if install_ceph is False else True
extra_repos = cd_conf.get_list(default_repo, 'extra-repos')
rlogger.info('adding custom repository file')
try:
distro.repo_install(
distro,
default_repo,
options.pop('baseurl'),
options.pop('gpgkey'),
components=components,
**options
)
except KeyError as err:
raise RuntimeError('missing required key: %s in config section: %s' % (err, default_repo))
for xrepo in extra_repos:
rlogger.info('adding extra repo file: %s.repo' % xrepo)
options = dict(cd_conf.items(xrepo))
try:
distro.repo_install(
distro,
xrepo,
options.pop('baseurl'),
options.pop('gpgkey'),
components=components,
**options
)
except KeyError as err:
raise RuntimeError('missing required key: %s in config section: %s' % (err, xrepo))
|
For a user that only wants to install the repository only (and avoid
installing Ceph and its dependencies).
|
def install_repo(args):
"""
For a user that only wants to install the repository only (and avoid
installing Ceph and its dependencies).
"""
cd_conf = getattr(args, 'cd_conf', None)
for hostname in args.host:
LOG.debug('Detecting platform for host %s ...', hostname)
distro = hosts.get(
hostname,
username=args.username,
# XXX this should get removed once Ceph packages are split for
# upstream. If default_release is True, it means that the user is
# trying to install on a RHEL machine and should expect to get RHEL
# packages. Otherwise, it will need to specify either a specific
# version, or repo, or a development branch. Other distro users should
# not see any differences.
use_rhceph=args.default_release,
)
rlogger = logging.getLogger(hostname)
LOG.info(
'Distro info: %s %s %s',
distro.name,
distro.release,
distro.codename
)
custom_repo(distro, args, cd_conf, rlogger, install_ceph=False)
|
Install Ceph packages on remote hosts.
|
def make(parser):
"""
Install Ceph packages on remote hosts.
"""
version = parser.add_mutually_exclusive_group()
# XXX deprecated in favor of release
version.add_argument(
'--stable',
nargs='?',
action=StoreVersion,
metavar='CODENAME',
help='[DEPRECATED] install a release known as CODENAME\
(done by default) (default: %(default)s)',
)
version.add_argument(
'--release',
nargs='?',
action=StoreVersion,
metavar='CODENAME',
help='install a release known as CODENAME\
(done by default) (default: %(default)s)',
)
version.add_argument(
'--testing',
nargs=0,
action=StoreVersion,
help='install the latest development release',
)
version.add_argument(
'--dev',
nargs='?',
action=StoreVersion,
const='master',
metavar='BRANCH_OR_TAG',
help='install a bleeding edge build from Git branch\
or tag (default: %(default)s)',
)
parser.add_argument(
'--dev-commit',
nargs='?',
action=StoreVersion,
metavar='COMMIT',
help='install a bleeding edge build from Git commit (defaults to master branch)',
)
version.set_defaults(
stable=None, # XXX deprecated in favor of release
release=None, # Set the default release in sanitize_args()
dev='master',
version_kind='stable',
)
parser.add_argument(
'--mon',
dest='install_mon',
action='store_true',
help='install the mon component only',
)
parser.add_argument(
'--mgr',
dest='install_mgr',
action='store_true',
help='install the mgr component only',
)
parser.add_argument(
'--mds',
dest='install_mds',
action='store_true',
help='install the mds component only',
)
parser.add_argument(
'--rgw',
dest='install_rgw',
action='store_true',
help='install the rgw component only',
)
parser.add_argument(
'--osd',
dest='install_osd',
action='store_true',
help='install the osd component only',
)
parser.add_argument(
'--tests',
dest='install_tests',
action='store_true',
help='install the testing components',
)
parser.add_argument(
'--cli', '--common',
dest='install_common',
action='store_true',
help='install the common component only',
)
parser.add_argument(
'--all',
dest='install_all',
action='store_true',
help='install all Ceph components (mon, osd, mds, rgw) except tests. This is the default',
)
repo = parser.add_mutually_exclusive_group()
repo.add_argument(
'--adjust-repos',
dest='adjust_repos',
action='store_true',
help='install packages modifying source repos',
)
repo.add_argument(
'--no-adjust-repos',
dest='adjust_repos',
action='store_false',
help='install packages without modifying source repos',
)
repo.add_argument(
'--repo',
action='store_true',
help='install repo files only (skips package installation)',
)
repo.set_defaults(
adjust_repos=True,
)
parser.add_argument(
'host',
metavar='HOST',
nargs='+',
help='hosts to install on',
)
parser.add_argument(
'--local-mirror',
nargs='?',
const='PATH',
default=None,
help='Fetch packages and push them to hosts for a local repo mirror',
)
parser.add_argument(
'--repo-url',
nargs='?',
dest='repo_url',
help='specify a repo URL that mirrors/contains Ceph packages',
)
parser.add_argument(
'--gpg-url',
nargs='?',
dest='gpg_url',
help='specify a GPG key URL to be used with custom repos\
(defaults to ceph.com)'
)
parser.add_argument(
'--nogpgcheck',
action='store_true',
help='install packages without gpgcheck',
)
parser.set_defaults(
func=install,
)
|
Remove Ceph packages from remote hosts.
|
def make_uninstall(parser):
"""
Remove Ceph packages from remote hosts.
"""
parser.add_argument(
'host',
metavar='HOST',
nargs='+',
help='hosts to uninstall Ceph from',
)
parser.set_defaults(
func=uninstall,
)
|
Remove Ceph packages from remote hosts and purge all data.
|
def make_purge(parser):
"""
Remove Ceph packages from remote hosts and purge all data.
"""
parser.add_argument(
'host',
metavar='HOST',
nargs='+',
help='hosts to purge Ceph from',
)
parser.set_defaults(
func=purge,
)
|
Purge (delete, destroy, discard, shred) any Ceph data from /var/lib/ceph
|
def make_purge_data(parser):
"""
Purge (delete, destroy, discard, shred) any Ceph data from /var/lib/ceph
"""
parser.add_argument(
'host',
metavar='HOST',
nargs='+',
help='hosts to purge Ceph data from',
)
parser.set_defaults(
func=purgedata,
)
|
Iterate through list of MON hosts, return tuples of (name, host).
|
def mon_hosts(mons):
"""
Iterate through list of MON hosts, return tuples of (name, host).
"""
for m in mons:
if m.count(':'):
(name, host) = m.split(':')
else:
name = m
host = m
if name.count('.') > 0:
name = name.split('.')[0]
yield (name, host)
|
Ceph RGW daemon management
|
def make(parser):
"""
Ceph RGW daemon management
"""
rgw_parser = parser.add_subparsers(dest='subcommand')
rgw_parser.required = True
rgw_create = rgw_parser.add_parser(
'create',
help='Create an RGW instance'
)
rgw_create.add_argument(
'rgw',
metavar='HOST[:NAME]',
nargs='+',
type=colon_separated,
help='host (and optionally the daemon name) to deploy on. \
NAME is automatically prefixed with \'rgw.\'',
)
parser.set_defaults(
func=rgw,
)
|
Ensure that current host can SSH remotely to the remote
host using the ``BatchMode`` option to prevent a password prompt.
That attempt will error with an exit status of 255 and a ``Permission
denied`` message or a``Host key verification failed`` message.
|
def can_connect_passwordless(hostname):
"""
Ensure that current host can SSH remotely to the remote
host using the ``BatchMode`` option to prevent a password prompt.
That attempt will error with an exit status of 255 and a ``Permission
denied`` message or a``Host key verification failed`` message.
"""
# Ensure we are not doing this for local hosts
if not remoto.backends.needs_ssh(hostname):
return True
logger = logging.getLogger(hostname)
with get_local_connection(logger) as conn:
# Check to see if we can login, disabling password prompts
command = ['ssh', '-CT', '-o', 'BatchMode=yes', hostname, 'true']
out, err, retval = remoto.process.check(conn, command, stop_on_error=False)
permission_denied_error = 'Permission denied '
host_key_verify_error = 'Host key verification failed.'
has_key_error = False
for line in err:
if permission_denied_error in line or host_key_verify_error in line:
has_key_error = True
if retval == 255 and has_key_error:
return False
return True
|
Select a init system
Returns the name of a init system (upstart, sysvinit ...).
|
def choose_init(module):
"""
Select a init system
Returns the name of a init system (upstart, sysvinit ...).
"""
# Upstart checks first because when installing ceph, the
# `/lib/systemd/system/ceph.target` file may be created, fooling this
# detection mechanism.
if is_upstart(module.conn):
return 'upstart'
if is_systemd(module.conn) or module.conn.remote_module.path_exists(
"/lib/systemd/system/ceph.target"):
return 'systemd'
return 'sysvinit'
|
Example usage::
>>> from ceph_deploy.util.paths import mon
>>> mon.keyring('mycluster', 'myhostname')
/var/lib/ceph/tmp/mycluster-myhostname.mon.keyring
|
def keyring(cluster, hostname):
"""
Example usage::
>>> from ceph_deploy.util.paths import mon
>>> mon.keyring('mycluster', 'myhostname')
/var/lib/ceph/tmp/mycluster-myhostname.mon.keyring
"""
keyring_file = '%s-%s.mon.keyring' % (cluster, hostname)
return join(constants.tmp_path, keyring_file)
|
Example usage::
>>> from ceph_deploy.util.paths import mon
>>> mon.asok('mycluster', 'myhostname')
/var/run/ceph/mycluster-mon.myhostname.asok
|
def asok(cluster, hostname):
"""
Example usage::
>>> from ceph_deploy.util.paths import mon
>>> mon.asok('mycluster', 'myhostname')
/var/run/ceph/mycluster-mon.myhostname.asok
"""
asok_file = '%s-mon.%s.asok' % (cluster, hostname)
return join(constants.base_run_path, asok_file)
|
Example usage::
>>> from ceph_deploy.util.paths import mon
>>> mon.monmap('mycluster', 'myhostname')
/var/lib/ceph/tmp/mycluster.myhostname.monmap
|
def monmap(cluster, hostname):
"""
Example usage::
>>> from ceph_deploy.util.paths import mon
>>> mon.monmap('mycluster', 'myhostname')
/var/lib/ceph/tmp/mycluster.myhostname.monmap
"""
monmap
mon_map_file = '%s.%s.monmap' % (cluster, hostname)
return join(constants.tmp_path, mon_map_file)
|
Search result of getaddrinfo() for a non-localhost-net address
|
def get_nonlocal_ip(host, subnet=None):
"""
Search result of getaddrinfo() for a non-localhost-net address
"""
try:
ailist = socket.getaddrinfo(host, None)
except socket.gaierror:
raise exc.UnableToResolveError(host)
for ai in ailist:
# an ai is a 5-tuple; the last element is (ip, port)
ip = ai[4][0]
if subnet and ip_in_subnet(ip, subnet):
LOG.info('found ip (%s) for host (%s) to be in cluster subnet (%s)' % (
ip,
host,
subnet,)
)
return ip
if not ip.startswith('127.'):
if subnet:
LOG.warning('could not match ip (%s) for host (%s) for cluster subnet (%s)' % (
ip,
host,
subnet,)
)
return ip
raise exc.UnableToResolveError(host)
|
Does IP exists in a given subnet utility. Returns a boolean
|
def ip_in_subnet(ip, subnet):
"""Does IP exists in a given subnet utility. Returns a boolean"""
ipaddr = int(''.join(['%02x' % int(x) for x in ip.split('.')]), 16)
netstr, bits = subnet.split('/')
netaddr = int(''.join(['%02x' % int(x) for x in netstr.split('.')]), 16)
mask = (0xffffffff << (32 - int(bits))) & 0xffffffff
return (ipaddr & mask) == (netaddr & mask)
|
Returns True if host is within specified subnet, otherwise False
|
def in_subnet(cidr, addrs=None):
"""
Returns True if host is within specified subnet, otherwise False
"""
for address in addrs:
if ip_in_subnet(address, cidr):
return True
return False
|
Returns a list of IPv4/IPv6 addresses assigned to the host. 127.0.0.1/::1 is
ignored, unless 'include_loopback=True' is indicated. If 'interface' is
provided, then only IP addresses from that interface will be returned.
Example output looks like::
>>> ip_addresses(conn)
>>> ['192.168.1.111', '10.0.1.12', '2001:db8::100']
|
def ip_addresses(conn, interface=None, include_loopback=False):
"""
Returns a list of IPv4/IPv6 addresses assigned to the host. 127.0.0.1/::1 is
ignored, unless 'include_loopback=True' is indicated. If 'interface' is
provided, then only IP addresses from that interface will be returned.
Example output looks like::
>>> ip_addresses(conn)
>>> ['192.168.1.111', '10.0.1.12', '2001:db8::100']
"""
ret = set()
ifaces = linux_interfaces(conn)
if interface is None:
target_ifaces = ifaces
else:
target_ifaces = dict((k, v) for k, v in ifaces.items()
if k == interface)
if not target_ifaces:
LOG.error('Interface {0} not found.'.format(interface))
for info in target_ifaces.values():
for ipv4 in info.get('inet', []):
loopback = in_subnet('127.0.0.0/8', [ipv4.get('address')]) or ipv4.get('label') == 'lo'
if not loopback or include_loopback:
ret.add(ipv4['address'])
for secondary in info.get('secondary', []):
addr = secondary.get('address')
if addr and secondary.get('type') == 'inet':
if include_loopback or (not include_loopback and not in_subnet('127.0.0.0/8', [addr])):
ret.add(addr)
for ipv6 in info.get('inet6', []):
# When switching to Python 3 the IPAddress module can do all this work for us
if ipv6.get('address').startswith('fe80::'):
continue
if not include_loopback and '::1' == ipv6.get('address'):
continue
ret.add(ipv6['address'])
if ret:
conn.logger.debug('IP addresses found: %s' % str(list(ret)))
return sorted(list(ret))
|
Obtain interface information for *NIX/BSD variants in remote servers.
Example output from a remote node with a couple of interfaces::
{'eth0': {'hwaddr': '08:00:27:08:c2:e4',
'inet': [{'address': '10.0.2.15',
'broadcast': '10.0.2.255',
'label': 'eth0',
'netmask': '255.255.255.0'}],
'inet6': [{'address': 'fe80::a00:27ff:fe08:c2e4',
'prefixlen': '64'}],
'up': True},
'eth1': {'hwaddr': '08:00:27:70:06:f1',
'inet': [{'address': '192.168.111.101',
'broadcast': '192.168.111.255',
'label': 'eth1',
'netmask': '255.255.255.0'}],
'inet6': [{'address': 'fe80::a00:27ff:fe70:6f1',
'prefixlen': '64'}],
'up': True},
'lo': {'hwaddr': '00:00:00:00:00:00',
'inet': [{'address': '127.0.0.1',
'broadcast': None,
'label': 'lo',
'netmask': '255.0.0.0'}],
'inet6': [{'address': '::1', 'prefixlen': '128'}],
'up': True}}
:param conn: A connection object to a remote node
|
def linux_interfaces(conn):
"""
Obtain interface information for *NIX/BSD variants in remote servers.
Example output from a remote node with a couple of interfaces::
{'eth0': {'hwaddr': '08:00:27:08:c2:e4',
'inet': [{'address': '10.0.2.15',
'broadcast': '10.0.2.255',
'label': 'eth0',
'netmask': '255.255.255.0'}],
'inet6': [{'address': 'fe80::a00:27ff:fe08:c2e4',
'prefixlen': '64'}],
'up': True},
'eth1': {'hwaddr': '08:00:27:70:06:f1',
'inet': [{'address': '192.168.111.101',
'broadcast': '192.168.111.255',
'label': 'eth1',
'netmask': '255.255.255.0'}],
'inet6': [{'address': 'fe80::a00:27ff:fe70:6f1',
'prefixlen': '64'}],
'up': True},
'lo': {'hwaddr': '00:00:00:00:00:00',
'inet': [{'address': '127.0.0.1',
'broadcast': None,
'label': 'lo',
'netmask': '255.0.0.0'}],
'inet6': [{'address': '::1', 'prefixlen': '128'}],
'up': True}}
:param conn: A connection object to a remote node
"""
ifaces = dict()
ip_path = conn.remote_module.which('ip')
ifconfig_path = None if ip_path else conn.remote_module.which('ifconfig')
if ip_path:
cmd1, _, _ = remoto.process.check(
conn,
[
'{0}'.format(ip_path),
'link',
'show',
],
)
cmd2, _, _ = remoto.process.check(
conn,
[
'{0}'.format(ip_path),
'addr',
'show',
],
)
ifaces = _interfaces_ip(b'\n'.join(cmd1).decode('utf-8') + '\n' +
b'\n'.join(cmd2).decode('utf-8'))
elif ifconfig_path:
cmd, _, _ = remoto.process.check(
conn,
[
'{0}'.format(ifconfig_path),
'-a',
]
)
ifaces = _interfaces_ifconfig('\n'.join(cmd))
return ifaces
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.