id int32 0 252k | repo stringlengths 7 55 | path stringlengths 4 127 | func_name stringlengths 1 88 | original_string stringlengths 75 19.8k | language stringclasses 1
value | code stringlengths 51 19.8k | code_tokens list | docstring stringlengths 3 17.3k | docstring_tokens list | sha stringlengths 40 40 | url stringlengths 87 242 |
|---|---|---|---|---|---|---|---|---|---|---|---|
239,700 | RedHatInsights/insights-core | insights/core/dr.py | ComponentType.get_missing_dependencies | def get_missing_dependencies(self, broker):
"""
Gets required and at-least-one dependencies not provided by the broker.
"""
missing_required = [r for r in self.requires if r not in broker]
missing_at_least_one = [d for d in self.at_least_one if not set(d).intersection(broker)]
if missing_required or missing_at_least_one:
return (missing_required, missing_at_least_one) | python | def get_missing_dependencies(self, broker):
missing_required = [r for r in self.requires if r not in broker]
missing_at_least_one = [d for d in self.at_least_one if not set(d).intersection(broker)]
if missing_required or missing_at_least_one:
return (missing_required, missing_at_least_one) | [
"def",
"get_missing_dependencies",
"(",
"self",
",",
"broker",
")",
":",
"missing_required",
"=",
"[",
"r",
"for",
"r",
"in",
"self",
".",
"requires",
"if",
"r",
"not",
"in",
"broker",
"]",
"missing_at_least_one",
"=",
"[",
"d",
"for",
"d",
"in",
"self",... | Gets required and at-least-one dependencies not provided by the broker. | [
"Gets",
"required",
"and",
"at",
"-",
"least",
"-",
"one",
"dependencies",
"not",
"provided",
"by",
"the",
"broker",
"."
] | b57cbf8ed7c089672426ede0441e0a4f789ef4a1 | https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/core/dr.py#L655-L662 |
239,701 | RedHatInsights/insights-core | insights/core/dr.py | Broker.observer | def observer(self, component_type=ComponentType):
"""
You can use ``@broker.observer()`` as a decorator to your callback
instead of :func:`Broker.add_observer`.
"""
def inner(func):
self.add_observer(func, component_type)
return func
return inner | python | def observer(self, component_type=ComponentType):
def inner(func):
self.add_observer(func, component_type)
return func
return inner | [
"def",
"observer",
"(",
"self",
",",
"component_type",
"=",
"ComponentType",
")",
":",
"def",
"inner",
"(",
"func",
")",
":",
"self",
".",
"add_observer",
"(",
"func",
",",
"component_type",
")",
"return",
"func",
"return",
"inner"
] | You can use ``@broker.observer()`` as a decorator to your callback
instead of :func:`Broker.add_observer`. | [
"You",
"can",
"use"
] | b57cbf8ed7c089672426ede0441e0a4f789ef4a1 | https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/core/dr.py#L735-L743 |
239,702 | RedHatInsights/insights-core | insights/core/dr.py | Broker.add_observer | def add_observer(self, o, component_type=ComponentType):
"""
Add a callback that will get invoked after each component is called.
Args:
o (func): the callback function
Keyword Args:
component_type (ComponentType): the :class:`ComponentType` to observe.
The callback will fire any time an instance of the class or its
subclasses is invoked.
The callback should look like this:
.. code-block:: python
def callback(comp, broker):
value = broker.get(comp)
# do something with value
pass
"""
self.observers[component_type].add(o) | python | def add_observer(self, o, component_type=ComponentType):
self.observers[component_type].add(o) | [
"def",
"add_observer",
"(",
"self",
",",
"o",
",",
"component_type",
"=",
"ComponentType",
")",
":",
"self",
".",
"observers",
"[",
"component_type",
"]",
".",
"add",
"(",
"o",
")"
] | Add a callback that will get invoked after each component is called.
Args:
o (func): the callback function
Keyword Args:
component_type (ComponentType): the :class:`ComponentType` to observe.
The callback will fire any time an instance of the class or its
subclasses is invoked.
The callback should look like this:
.. code-block:: python
def callback(comp, broker):
value = broker.get(comp)
# do something with value
pass | [
"Add",
"a",
"callback",
"that",
"will",
"get",
"invoked",
"after",
"each",
"component",
"is",
"called",
"."
] | b57cbf8ed7c089672426ede0441e0a4f789ef4a1 | https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/core/dr.py#L745-L767 |
239,703 | RedHatInsights/insights-core | insights/combiners/logrotate_conf.py | get_tree | def get_tree(root=None):
"""
This is a helper function to get a logrotate configuration component for
your local machine or an archive. It's for use in interactive sessions.
"""
from insights import run
return run(LogRotateConfTree, root=root).get(LogRotateConfTree) | python | def get_tree(root=None):
from insights import run
return run(LogRotateConfTree, root=root).get(LogRotateConfTree) | [
"def",
"get_tree",
"(",
"root",
"=",
"None",
")",
":",
"from",
"insights",
"import",
"run",
"return",
"run",
"(",
"LogRotateConfTree",
",",
"root",
"=",
"root",
")",
".",
"get",
"(",
"LogRotateConfTree",
")"
] | This is a helper function to get a logrotate configuration component for
your local machine or an archive. It's for use in interactive sessions. | [
"This",
"is",
"a",
"helper",
"function",
"to",
"get",
"a",
"logrotate",
"configuration",
"component",
"for",
"your",
"local",
"machine",
"or",
"an",
"archive",
".",
"It",
"s",
"for",
"use",
"in",
"interactive",
"sessions",
"."
] | b57cbf8ed7c089672426ede0441e0a4f789ef4a1 | https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/combiners/logrotate_conf.py#L231-L237 |
239,704 | RedHatInsights/insights-core | insights/formats/_syslog.py | SysLogFormat.logit | def logit(self, msg, pid, user, cname, priority=None):
"""Function for formatting content and logging to syslog"""
if self.stream:
print(msg, file=self.stream)
elif priority == logging.WARNING:
self.logger.warning("{0}[pid:{1}] user:{2}: WARNING - {3}".format(cname, pid, user, msg))
elif priority == logging.ERROR:
self.logger.error("{0}[pid:{1}] user:{2}: ERROR - {3}".format(cname, pid, user, msg))
else:
self.logger.info("{0}[pid:{1}] user:{2}: INFO - {3}".format(cname, pid, user, msg)) | python | def logit(self, msg, pid, user, cname, priority=None):
if self.stream:
print(msg, file=self.stream)
elif priority == logging.WARNING:
self.logger.warning("{0}[pid:{1}] user:{2}: WARNING - {3}".format(cname, pid, user, msg))
elif priority == logging.ERROR:
self.logger.error("{0}[pid:{1}] user:{2}: ERROR - {3}".format(cname, pid, user, msg))
else:
self.logger.info("{0}[pid:{1}] user:{2}: INFO - {3}".format(cname, pid, user, msg)) | [
"def",
"logit",
"(",
"self",
",",
"msg",
",",
"pid",
",",
"user",
",",
"cname",
",",
"priority",
"=",
"None",
")",
":",
"if",
"self",
".",
"stream",
":",
"print",
"(",
"msg",
",",
"file",
"=",
"self",
".",
"stream",
")",
"elif",
"priority",
"==",... | Function for formatting content and logging to syslog | [
"Function",
"for",
"formatting",
"content",
"and",
"logging",
"to",
"syslog"
] | b57cbf8ed7c089672426ede0441e0a4f789ef4a1 | https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/formats/_syslog.py#L45-L55 |
239,705 | RedHatInsights/insights-core | insights/formats/_syslog.py | SysLogFormat.log_exceptions | def log_exceptions(self, c, broker):
"""Gets exceptions to be logged and sends to logit function to be logged to syslog"""
if c in broker.exceptions:
ex = broker.exceptions.get(c)
ex = "Exception in {0} - {1}".format(dr.get_name(c), str(ex))
self.logit(ex, self.pid, self.user, "insights-run", logging.ERROR) | python | def log_exceptions(self, c, broker):
if c in broker.exceptions:
ex = broker.exceptions.get(c)
ex = "Exception in {0} - {1}".format(dr.get_name(c), str(ex))
self.logit(ex, self.pid, self.user, "insights-run", logging.ERROR) | [
"def",
"log_exceptions",
"(",
"self",
",",
"c",
",",
"broker",
")",
":",
"if",
"c",
"in",
"broker",
".",
"exceptions",
":",
"ex",
"=",
"broker",
".",
"exceptions",
".",
"get",
"(",
"c",
")",
"ex",
"=",
"\"Exception in {0} - {1}\"",
".",
"format",
"(",
... | Gets exceptions to be logged and sends to logit function to be logged to syslog | [
"Gets",
"exceptions",
"to",
"be",
"logged",
"and",
"sends",
"to",
"logit",
"function",
"to",
"be",
"logged",
"to",
"syslog"
] | b57cbf8ed7c089672426ede0441e0a4f789ef4a1 | https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/formats/_syslog.py#L57-L63 |
239,706 | RedHatInsights/insights-core | insights/formats/_syslog.py | SysLogFormat.log_rule_info | def log_rule_info(self):
"""Collects rule information and send to logit function to log to syslog"""
for c in sorted(self.broker.get_by_type(rule), key=dr.get_name):
v = self.broker[c]
_type = v.get("type")
if _type:
if _type != "skip":
msg = "Running {0} ".format(dr.get_name(c))
self.logit(msg, self.pid, self.user, "insights-run", logging.INFO)
else:
msg = "Rule skipped {0} ".format(dr.get_name(c))
self.logit(msg, self.pid, self.user, "insights-run", logging.WARNING) | python | def log_rule_info(self):
for c in sorted(self.broker.get_by_type(rule), key=dr.get_name):
v = self.broker[c]
_type = v.get("type")
if _type:
if _type != "skip":
msg = "Running {0} ".format(dr.get_name(c))
self.logit(msg, self.pid, self.user, "insights-run", logging.INFO)
else:
msg = "Rule skipped {0} ".format(dr.get_name(c))
self.logit(msg, self.pid, self.user, "insights-run", logging.WARNING) | [
"def",
"log_rule_info",
"(",
"self",
")",
":",
"for",
"c",
"in",
"sorted",
"(",
"self",
".",
"broker",
".",
"get_by_type",
"(",
"rule",
")",
",",
"key",
"=",
"dr",
".",
"get_name",
")",
":",
"v",
"=",
"self",
".",
"broker",
"[",
"c",
"]",
"_type"... | Collects rule information and send to logit function to log to syslog | [
"Collects",
"rule",
"information",
"and",
"send",
"to",
"logit",
"function",
"to",
"log",
"to",
"syslog"
] | b57cbf8ed7c089672426ede0441e0a4f789ef4a1 | https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/formats/_syslog.py#L72-L84 |
239,707 | RedHatInsights/insights-core | insights/client/data_collector.py | DataCollector._run_pre_command | def _run_pre_command(self, pre_cmd):
'''
Run a pre command to get external args for a command
'''
logger.debug('Executing pre-command: %s', pre_cmd)
try:
pre_proc = Popen(pre_cmd, stdout=PIPE, stderr=STDOUT, shell=True)
except OSError as err:
if err.errno == errno.ENOENT:
logger.debug('Command %s not found', pre_cmd)
return
stdout, stderr = pre_proc.communicate()
the_return_code = pre_proc.poll()
logger.debug("Pre-command results:")
logger.debug("STDOUT: %s", stdout)
logger.debug("STDERR: %s", stderr)
logger.debug("Return Code: %s", the_return_code)
if the_return_code != 0:
return []
if six.PY3:
stdout = stdout.decode('utf-8')
return stdout.splitlines() | python | def _run_pre_command(self, pre_cmd):
'''
Run a pre command to get external args for a command
'''
logger.debug('Executing pre-command: %s', pre_cmd)
try:
pre_proc = Popen(pre_cmd, stdout=PIPE, stderr=STDOUT, shell=True)
except OSError as err:
if err.errno == errno.ENOENT:
logger.debug('Command %s not found', pre_cmd)
return
stdout, stderr = pre_proc.communicate()
the_return_code = pre_proc.poll()
logger.debug("Pre-command results:")
logger.debug("STDOUT: %s", stdout)
logger.debug("STDERR: %s", stderr)
logger.debug("Return Code: %s", the_return_code)
if the_return_code != 0:
return []
if six.PY3:
stdout = stdout.decode('utf-8')
return stdout.splitlines() | [
"def",
"_run_pre_command",
"(",
"self",
",",
"pre_cmd",
")",
":",
"logger",
".",
"debug",
"(",
"'Executing pre-command: %s'",
",",
"pre_cmd",
")",
"try",
":",
"pre_proc",
"=",
"Popen",
"(",
"pre_cmd",
",",
"stdout",
"=",
"PIPE",
",",
"stderr",
"=",
"STDOUT... | Run a pre command to get external args for a command | [
"Run",
"a",
"pre",
"command",
"to",
"get",
"external",
"args",
"for",
"a",
"command"
] | b57cbf8ed7c089672426ede0441e0a4f789ef4a1 | https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/client/data_collector.py#L51-L72 |
239,708 | RedHatInsights/insights-core | insights/client/data_collector.py | DataCollector._parse_file_spec | def _parse_file_spec(self, spec):
'''
Separate wildcard specs into more specs
'''
# separate wildcard specs into more specs
if '*' in spec['file']:
expanded_paths = _expand_paths(spec['file'])
if not expanded_paths:
return []
expanded_specs = []
for p in expanded_paths:
_spec = copy.copy(spec)
_spec['file'] = p
expanded_specs.append(_spec)
return expanded_specs
else:
return [spec] | python | def _parse_file_spec(self, spec):
'''
Separate wildcard specs into more specs
'''
# separate wildcard specs into more specs
if '*' in spec['file']:
expanded_paths = _expand_paths(spec['file'])
if not expanded_paths:
return []
expanded_specs = []
for p in expanded_paths:
_spec = copy.copy(spec)
_spec['file'] = p
expanded_specs.append(_spec)
return expanded_specs
else:
return [spec] | [
"def",
"_parse_file_spec",
"(",
"self",
",",
"spec",
")",
":",
"# separate wildcard specs into more specs",
"if",
"'*'",
"in",
"spec",
"[",
"'file'",
"]",
":",
"expanded_paths",
"=",
"_expand_paths",
"(",
"spec",
"[",
"'file'",
"]",
")",
"if",
"not",
"expanded... | Separate wildcard specs into more specs | [
"Separate",
"wildcard",
"specs",
"into",
"more",
"specs"
] | b57cbf8ed7c089672426ede0441e0a4f789ef4a1 | https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/client/data_collector.py#L74-L91 |
239,709 | RedHatInsights/insights-core | insights/client/data_collector.py | DataCollector._parse_glob_spec | def _parse_glob_spec(self, spec):
'''
Grab globs of things
'''
some_globs = glob.glob(spec['glob'])
if not some_globs:
return []
el_globs = []
for g in some_globs:
_spec = copy.copy(spec)
_spec['file'] = g
el_globs.append(_spec)
return el_globs | python | def _parse_glob_spec(self, spec):
'''
Grab globs of things
'''
some_globs = glob.glob(spec['glob'])
if not some_globs:
return []
el_globs = []
for g in some_globs:
_spec = copy.copy(spec)
_spec['file'] = g
el_globs.append(_spec)
return el_globs | [
"def",
"_parse_glob_spec",
"(",
"self",
",",
"spec",
")",
":",
"some_globs",
"=",
"glob",
".",
"glob",
"(",
"spec",
"[",
"'glob'",
"]",
")",
"if",
"not",
"some_globs",
":",
"return",
"[",
"]",
"el_globs",
"=",
"[",
"]",
"for",
"g",
"in",
"some_globs"... | Grab globs of things | [
"Grab",
"globs",
"of",
"things"
] | b57cbf8ed7c089672426ede0441e0a4f789ef4a1 | https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/client/data_collector.py#L93-L105 |
239,710 | RedHatInsights/insights-core | insights/client/data_collector.py | DataCollector.run_collection | def run_collection(self, conf, rm_conf, branch_info):
'''
Run specs and collect all the data
'''
if rm_conf is None:
rm_conf = {}
logger.debug('Beginning to run collection spec...')
exclude = None
if rm_conf:
try:
exclude = rm_conf['patterns']
logger.warn("WARNING: Skipping patterns found in remove.conf")
except LookupError:
logger.debug('Patterns section of remove.conf is empty.')
for c in conf['commands']:
# remember hostname archive path
if c.get('symbolic_name') == 'hostname':
self.hostname_path = os.path.join(
'insights_commands', mangle.mangle_command(c['command']))
rm_commands = rm_conf.get('commands', [])
if c['command'] in rm_commands or c.get('symbolic_name') in rm_commands:
logger.warn("WARNING: Skipping command %s", c['command'])
elif self.mountpoint == "/" or c.get("image"):
cmd_specs = self._parse_command_spec(c, conf['pre_commands'])
for s in cmd_specs:
cmd_spec = InsightsCommand(self.config, s, exclude, self.mountpoint)
self.archive.add_to_archive(cmd_spec)
for f in conf['files']:
rm_files = rm_conf.get('files', [])
if f['file'] in rm_files or f.get('symbolic_name') in rm_files:
logger.warn("WARNING: Skipping file %s", f['file'])
else:
file_specs = self._parse_file_spec(f)
for s in file_specs:
# filter files post-wildcard parsing
if s['file'] in rm_conf.get('files', []):
logger.warn("WARNING: Skipping file %s", s['file'])
else:
file_spec = InsightsFile(s, exclude, self.mountpoint)
self.archive.add_to_archive(file_spec)
if 'globs' in conf:
for g in conf['globs']:
glob_specs = self._parse_glob_spec(g)
for g in glob_specs:
if g['file'] in rm_conf.get('files', []):
logger.warn("WARNING: Skipping file %s", g)
else:
glob_spec = InsightsFile(g, exclude, self.mountpoint)
self.archive.add_to_archive(glob_spec)
logger.debug('Spec collection finished.')
# collect metadata
logger.debug('Collecting metadata...')
self._write_branch_info(branch_info)
logger.debug('Metadata collection finished.') | python | def run_collection(self, conf, rm_conf, branch_info):
'''
Run specs and collect all the data
'''
if rm_conf is None:
rm_conf = {}
logger.debug('Beginning to run collection spec...')
exclude = None
if rm_conf:
try:
exclude = rm_conf['patterns']
logger.warn("WARNING: Skipping patterns found in remove.conf")
except LookupError:
logger.debug('Patterns section of remove.conf is empty.')
for c in conf['commands']:
# remember hostname archive path
if c.get('symbolic_name') == 'hostname':
self.hostname_path = os.path.join(
'insights_commands', mangle.mangle_command(c['command']))
rm_commands = rm_conf.get('commands', [])
if c['command'] in rm_commands or c.get('symbolic_name') in rm_commands:
logger.warn("WARNING: Skipping command %s", c['command'])
elif self.mountpoint == "/" or c.get("image"):
cmd_specs = self._parse_command_spec(c, conf['pre_commands'])
for s in cmd_specs:
cmd_spec = InsightsCommand(self.config, s, exclude, self.mountpoint)
self.archive.add_to_archive(cmd_spec)
for f in conf['files']:
rm_files = rm_conf.get('files', [])
if f['file'] in rm_files or f.get('symbolic_name') in rm_files:
logger.warn("WARNING: Skipping file %s", f['file'])
else:
file_specs = self._parse_file_spec(f)
for s in file_specs:
# filter files post-wildcard parsing
if s['file'] in rm_conf.get('files', []):
logger.warn("WARNING: Skipping file %s", s['file'])
else:
file_spec = InsightsFile(s, exclude, self.mountpoint)
self.archive.add_to_archive(file_spec)
if 'globs' in conf:
for g in conf['globs']:
glob_specs = self._parse_glob_spec(g)
for g in glob_specs:
if g['file'] in rm_conf.get('files', []):
logger.warn("WARNING: Skipping file %s", g)
else:
glob_spec = InsightsFile(g, exclude, self.mountpoint)
self.archive.add_to_archive(glob_spec)
logger.debug('Spec collection finished.')
# collect metadata
logger.debug('Collecting metadata...')
self._write_branch_info(branch_info)
logger.debug('Metadata collection finished.') | [
"def",
"run_collection",
"(",
"self",
",",
"conf",
",",
"rm_conf",
",",
"branch_info",
")",
":",
"if",
"rm_conf",
"is",
"None",
":",
"rm_conf",
"=",
"{",
"}",
"logger",
".",
"debug",
"(",
"'Beginning to run collection spec...'",
")",
"exclude",
"=",
"None",
... | Run specs and collect all the data | [
"Run",
"specs",
"and",
"collect",
"all",
"the",
"data"
] | b57cbf8ed7c089672426ede0441e0a4f789ef4a1 | https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/client/data_collector.py#L148-L203 |
239,711 | RedHatInsights/insights-core | insights/client/data_collector.py | DataCollector.done | def done(self, conf, rm_conf):
"""
Do finalization stuff
"""
if self.config.obfuscate:
cleaner = SOSCleaner(quiet=True)
clean_opts = CleanOptions(
self.config, self.archive.tmp_dir, rm_conf, self.hostname_path)
fresh = cleaner.clean_report(clean_opts, self.archive.archive_dir)
if clean_opts.keyword_file is not None:
os.remove(clean_opts.keyword_file.name)
logger.warn("WARNING: Skipping keywords found in remove.conf")
return fresh[0]
return self.archive.create_tar_file() | python | def done(self, conf, rm_conf):
if self.config.obfuscate:
cleaner = SOSCleaner(quiet=True)
clean_opts = CleanOptions(
self.config, self.archive.tmp_dir, rm_conf, self.hostname_path)
fresh = cleaner.clean_report(clean_opts, self.archive.archive_dir)
if clean_opts.keyword_file is not None:
os.remove(clean_opts.keyword_file.name)
logger.warn("WARNING: Skipping keywords found in remove.conf")
return fresh[0]
return self.archive.create_tar_file() | [
"def",
"done",
"(",
"self",
",",
"conf",
",",
"rm_conf",
")",
":",
"if",
"self",
".",
"config",
".",
"obfuscate",
":",
"cleaner",
"=",
"SOSCleaner",
"(",
"quiet",
"=",
"True",
")",
"clean_opts",
"=",
"CleanOptions",
"(",
"self",
".",
"config",
",",
"... | Do finalization stuff | [
"Do",
"finalization",
"stuff"
] | b57cbf8ed7c089672426ede0441e0a4f789ef4a1 | https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/client/data_collector.py#L205-L218 |
239,712 | RedHatInsights/insights-core | insights/specs/default.py | DefaultSpecs.sap_sid_nr | def sap_sid_nr(broker):
"""
Get the SID and Instance Number
Typical output of saphostctrl_listinstances::
# /usr/sap/hostctrl/exe/saphostctrl -function ListInstances
Inst Info : SR1 - 01 - liuxc-rhel7-hana-ent - 749, patch 418, changelist 1816226
Returns:
(list): List of tuple of SID and Instance Number.
"""
insts = broker[DefaultSpecs.saphostctrl_listinstances].content
hn = broker[DefaultSpecs.hostname].content[0].split('.')[0].strip()
results = set()
for ins in insts:
ins_splits = ins.split(' - ')
# Local Instance
if ins_splits[2].strip() == hn:
# (sid, nr)
results.add((ins_splits[0].split()[-1].lower(), ins_splits[1].strip()))
return list(results) | python | def sap_sid_nr(broker):
insts = broker[DefaultSpecs.saphostctrl_listinstances].content
hn = broker[DefaultSpecs.hostname].content[0].split('.')[0].strip()
results = set()
for ins in insts:
ins_splits = ins.split(' - ')
# Local Instance
if ins_splits[2].strip() == hn:
# (sid, nr)
results.add((ins_splits[0].split()[-1].lower(), ins_splits[1].strip()))
return list(results) | [
"def",
"sap_sid_nr",
"(",
"broker",
")",
":",
"insts",
"=",
"broker",
"[",
"DefaultSpecs",
".",
"saphostctrl_listinstances",
"]",
".",
"content",
"hn",
"=",
"broker",
"[",
"DefaultSpecs",
".",
"hostname",
"]",
".",
"content",
"[",
"0",
"]",
".",
"split",
... | Get the SID and Instance Number
Typical output of saphostctrl_listinstances::
# /usr/sap/hostctrl/exe/saphostctrl -function ListInstances
Inst Info : SR1 - 01 - liuxc-rhel7-hana-ent - 749, patch 418, changelist 1816226
Returns:
(list): List of tuple of SID and Instance Number. | [
"Get",
"the",
"SID",
"and",
"Instance",
"Number"
] | b57cbf8ed7c089672426ede0441e0a4f789ef4a1 | https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/specs/default.py#L724-L745 |
239,713 | RedHatInsights/insights-core | insights/util/file_permissions.py | FilePermissions.from_dict | def from_dict(self, dirent):
"""
Create a new FilePermissions object from the given dictionary. This
works with the FileListing parser class, which has already done the
hard work of pulling many of these fields out. We create an object
with all the dictionary keys available as properties, and also split
the ``perms`` string up into owner, group
"""
# Check that we have at least as much data as the __init__ requires
for k in ['perms', 'owner', 'group', 'name', 'dir']:
if k not in dirent:
raise ValueError("Need required key '{k}'".format(k=k))
# Copy all values across
for k in dirent:
setattr(self, k, dirent[k])
# Create perms parts
self.perms_owner = self.perms[0:3]
self.perms_group = self.perms[3:6]
self.perms_other = self.perms[6:9]
return self | python | def from_dict(self, dirent):
# Check that we have at least as much data as the __init__ requires
for k in ['perms', 'owner', 'group', 'name', 'dir']:
if k not in dirent:
raise ValueError("Need required key '{k}'".format(k=k))
# Copy all values across
for k in dirent:
setattr(self, k, dirent[k])
# Create perms parts
self.perms_owner = self.perms[0:3]
self.perms_group = self.perms[3:6]
self.perms_other = self.perms[6:9]
return self | [
"def",
"from_dict",
"(",
"self",
",",
"dirent",
")",
":",
"# Check that we have at least as much data as the __init__ requires",
"for",
"k",
"in",
"[",
"'perms'",
",",
"'owner'",
",",
"'group'",
",",
"'name'",
",",
"'dir'",
"]",
":",
"if",
"k",
"not",
"in",
"d... | Create a new FilePermissions object from the given dictionary. This
works with the FileListing parser class, which has already done the
hard work of pulling many of these fields out. We create an object
with all the dictionary keys available as properties, and also split
the ``perms`` string up into owner, group | [
"Create",
"a",
"new",
"FilePermissions",
"object",
"from",
"the",
"given",
"dictionary",
".",
"This",
"works",
"with",
"the",
"FileListing",
"parser",
"class",
"which",
"has",
"already",
"done",
"the",
"hard",
"work",
"of",
"pulling",
"many",
"of",
"these",
... | b57cbf8ed7c089672426ede0441e0a4f789ef4a1 | https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/util/file_permissions.py#L88-L107 |
239,714 | RedHatInsights/insights-core | insights/util/file_permissions.py | FilePermissions.owned_by | def owned_by(self, owner, also_check_group=False):
"""
Checks if the specified user or user and group own the file.
Args:
owner (str): the user (or group) name for which we ask about ownership
also_check_group (bool): if set to True, both user owner and group owner checked
if set to False, only user owner checked
Returns:
bool: True if owner of the file is the specified owner
"""
if also_check_group:
return self.owner == owner and self.group == owner
else:
return self.owner == owner | python | def owned_by(self, owner, also_check_group=False):
if also_check_group:
return self.owner == owner and self.group == owner
else:
return self.owner == owner | [
"def",
"owned_by",
"(",
"self",
",",
"owner",
",",
"also_check_group",
"=",
"False",
")",
":",
"if",
"also_check_group",
":",
"return",
"self",
".",
"owner",
"==",
"owner",
"and",
"self",
".",
"group",
"==",
"owner",
"else",
":",
"return",
"self",
".",
... | Checks if the specified user or user and group own the file.
Args:
owner (str): the user (or group) name for which we ask about ownership
also_check_group (bool): if set to True, both user owner and group owner checked
if set to False, only user owner checked
Returns:
bool: True if owner of the file is the specified owner | [
"Checks",
"if",
"the",
"specified",
"user",
"or",
"user",
"and",
"group",
"own",
"the",
"file",
"."
] | b57cbf8ed7c089672426ede0441e0a4f789ef4a1 | https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/util/file_permissions.py#L109-L124 |
239,715 | RedHatInsights/insights-core | insights/parsers/multipath_conf.py | get_tree | def get_tree(root=None):
"""
This is a helper function to get a multipath configuration component for
your local machine or an archive. It's for use in interactive sessions.
"""
from insights import run
return run(MultipathConfTree, root=root).get(MultipathConfTree) | python | def get_tree(root=None):
from insights import run
return run(MultipathConfTree, root=root).get(MultipathConfTree) | [
"def",
"get_tree",
"(",
"root",
"=",
"None",
")",
":",
"from",
"insights",
"import",
"run",
"return",
"run",
"(",
"MultipathConfTree",
",",
"root",
"=",
"root",
")",
".",
"get",
"(",
"MultipathConfTree",
")"
] | This is a helper function to get a multipath configuration component for
your local machine or an archive. It's for use in interactive sessions. | [
"This",
"is",
"a",
"helper",
"function",
"to",
"get",
"a",
"multipath",
"configuration",
"component",
"for",
"your",
"local",
"machine",
"or",
"an",
"archive",
".",
"It",
"s",
"for",
"use",
"in",
"interactive",
"sessions",
"."
] | b57cbf8ed7c089672426ede0441e0a4f789ef4a1 | https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/parsers/multipath_conf.py#L188-L194 |
239,716 | RedHatInsights/insights-core | insights/client/support.py | InsightsSupport._support_diag_dump | def _support_diag_dump(self):
'''
Collect log info for debug
'''
# check insights config
cfg_block = []
pconn = InsightsConnection(self.config)
logger.info('Insights version: %s', get_nvr())
reg_check = registration_check(pconn)
cfg_block.append('Registration check:')
for key in reg_check:
cfg_block.append(key + ': ' + str(reg_check[key]))
lastupload = 'never'
if os.path.isfile(constants.lastupload_file):
with open(constants.lastupload_file) as upl_file:
lastupload = upl_file.readline().strip()
cfg_block.append('\nLast successful upload was ' + lastupload)
cfg_block.append('auto_config: ' + str(self.config.auto_config))
if self.config.proxy:
obfuscated_proxy = re.sub(r'(.*)(:)(.*)(@.*)',
r'\1\2********\4',
self.config.proxy)
else:
obfuscated_proxy = 'None'
cfg_block.append('proxy: ' + obfuscated_proxy)
logger.info('\n'.join(cfg_block))
logger.info('python-requests: %s', requests.__version__)
succ = pconn.test_connection()
if succ == 0:
logger.info('Connection test: PASS\n')
else:
logger.info('Connection test: FAIL\n')
# run commands
commands = ['uname -a',
'cat /etc/redhat-release',
'env',
'sestatus',
'subscription-manager identity',
'systemctl cat insights-client.timer',
'systemctl cat insights-client.service',
'systemctl status insights-client.timer',
'systemctl status insights-client.service']
for cmd in commands:
logger.info("Running command: %s", cmd)
try:
proc = Popen(
shlex.split(cmd), shell=False, stdout=PIPE, stderr=STDOUT,
close_fds=True)
stdout, stderr = proc.communicate()
except OSError as o:
if 'systemctl' not in cmd:
# suppress output for systemctl cmd failures
logger.info('Error running command "%s": %s', cmd, o)
except Exception as e:
# unknown error
logger.info("Process failed: %s", e)
logger.info("Process output: \n%s", stdout)
# check available disk space for /var/tmp
tmp_dir = '/var/tmp'
dest_dir_stat = os.statvfs(tmp_dir)
dest_dir_size = (dest_dir_stat.f_bavail * dest_dir_stat.f_frsize)
logger.info('Available space in %s:\t%s bytes\t%.1f 1K-blocks\t%.1f MB',
tmp_dir, dest_dir_size,
dest_dir_size / 1024.0,
(dest_dir_size / 1024.0) / 1024.0) | python | def _support_diag_dump(self):
'''
Collect log info for debug
'''
# check insights config
cfg_block = []
pconn = InsightsConnection(self.config)
logger.info('Insights version: %s', get_nvr())
reg_check = registration_check(pconn)
cfg_block.append('Registration check:')
for key in reg_check:
cfg_block.append(key + ': ' + str(reg_check[key]))
lastupload = 'never'
if os.path.isfile(constants.lastupload_file):
with open(constants.lastupload_file) as upl_file:
lastupload = upl_file.readline().strip()
cfg_block.append('\nLast successful upload was ' + lastupload)
cfg_block.append('auto_config: ' + str(self.config.auto_config))
if self.config.proxy:
obfuscated_proxy = re.sub(r'(.*)(:)(.*)(@.*)',
r'\1\2********\4',
self.config.proxy)
else:
obfuscated_proxy = 'None'
cfg_block.append('proxy: ' + obfuscated_proxy)
logger.info('\n'.join(cfg_block))
logger.info('python-requests: %s', requests.__version__)
succ = pconn.test_connection()
if succ == 0:
logger.info('Connection test: PASS\n')
else:
logger.info('Connection test: FAIL\n')
# run commands
commands = ['uname -a',
'cat /etc/redhat-release',
'env',
'sestatus',
'subscription-manager identity',
'systemctl cat insights-client.timer',
'systemctl cat insights-client.service',
'systemctl status insights-client.timer',
'systemctl status insights-client.service']
for cmd in commands:
logger.info("Running command: %s", cmd)
try:
proc = Popen(
shlex.split(cmd), shell=False, stdout=PIPE, stderr=STDOUT,
close_fds=True)
stdout, stderr = proc.communicate()
except OSError as o:
if 'systemctl' not in cmd:
# suppress output for systemctl cmd failures
logger.info('Error running command "%s": %s', cmd, o)
except Exception as e:
# unknown error
logger.info("Process failed: %s", e)
logger.info("Process output: \n%s", stdout)
# check available disk space for /var/tmp
tmp_dir = '/var/tmp'
dest_dir_stat = os.statvfs(tmp_dir)
dest_dir_size = (dest_dir_stat.f_bavail * dest_dir_stat.f_frsize)
logger.info('Available space in %s:\t%s bytes\t%.1f 1K-blocks\t%.1f MB',
tmp_dir, dest_dir_size,
dest_dir_size / 1024.0,
(dest_dir_size / 1024.0) / 1024.0) | [
"def",
"_support_diag_dump",
"(",
"self",
")",
":",
"# check insights config",
"cfg_block",
"=",
"[",
"]",
"pconn",
"=",
"InsightsConnection",
"(",
"self",
".",
"config",
")",
"logger",
".",
"info",
"(",
"'Insights version: %s'",
",",
"get_nvr",
"(",
")",
")",... | Collect log info for debug | [
"Collect",
"log",
"info",
"for",
"debug"
] | b57cbf8ed7c089672426ede0441e0a4f789ef4a1 | https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/client/support.py#L94-L165 |
239,717 | RedHatInsights/insights-core | insights/core/filters.py | add_filter | def add_filter(ds, patterns):
"""
Add a filter or list of filters to a datasource. A filter is a simple
string, and it matches if it is contained anywhere within a line.
Args:
ds (@datasource component): The datasource to filter
patterns (str, [str]): A string, list of strings, or set of strings to
add to the datasource's filters.
"""
if not plugins.is_datasource(ds):
raise Exception("Filters are applicable only to datasources.")
delegate = dr.get_delegate(ds)
if delegate.raw:
raise Exception("Filters aren't applicable to raw datasources.")
if not delegate.filterable:
raise Exception("Filters aren't applicable to %s." % dr.get_name(ds))
if ds in _CACHE:
del _CACHE[ds]
if isinstance(patterns, six.string_types):
FILTERS[ds].add(patterns)
elif isinstance(patterns, list):
FILTERS[ds] |= set(patterns)
elif isinstance(patterns, set):
FILTERS[ds] |= patterns
else:
raise TypeError("patterns must be string, list, or set.") | python | def add_filter(ds, patterns):
if not plugins.is_datasource(ds):
raise Exception("Filters are applicable only to datasources.")
delegate = dr.get_delegate(ds)
if delegate.raw:
raise Exception("Filters aren't applicable to raw datasources.")
if not delegate.filterable:
raise Exception("Filters aren't applicable to %s." % dr.get_name(ds))
if ds in _CACHE:
del _CACHE[ds]
if isinstance(patterns, six.string_types):
FILTERS[ds].add(patterns)
elif isinstance(patterns, list):
FILTERS[ds] |= set(patterns)
elif isinstance(patterns, set):
FILTERS[ds] |= patterns
else:
raise TypeError("patterns must be string, list, or set.") | [
"def",
"add_filter",
"(",
"ds",
",",
"patterns",
")",
":",
"if",
"not",
"plugins",
".",
"is_datasource",
"(",
"ds",
")",
":",
"raise",
"Exception",
"(",
"\"Filters are applicable only to datasources.\"",
")",
"delegate",
"=",
"dr",
".",
"get_delegate",
"(",
"d... | Add a filter or list of filters to a datasource. A filter is a simple
string, and it matches if it is contained anywhere within a line.
Args:
ds (@datasource component): The datasource to filter
patterns (str, [str]): A string, list of strings, or set of strings to
add to the datasource's filters. | [
"Add",
"a",
"filter",
"or",
"list",
"of",
"filters",
"to",
"a",
"datasource",
".",
"A",
"filter",
"is",
"a",
"simple",
"string",
"and",
"it",
"matches",
"if",
"it",
"is",
"contained",
"anywhere",
"within",
"a",
"line",
"."
] | b57cbf8ed7c089672426ede0441e0a4f789ef4a1 | https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/core/filters.py#L49-L79 |
239,718 | RedHatInsights/insights-core | insights/core/filters.py | get_filters | def get_filters(component):
"""
Get the set of filters for the given datasource.
Filters added to a ``RegistryPoint`` will be applied to all datasources that
implement it. Filters added to a datasource implementation apply only to
that implementation.
For example, a filter added to ``Specs.ps_auxww`` will apply to
``DefaultSpecs.ps_auxww``, ``InsightsArchiveSpecs.ps_auxww``,
``SosSpecs.ps_auxww``, etc. But a filter added to ``DefaultSpecs.ps_auxww``
will only apply to ``DefaultSpecs.ps_auxww``. See the modules in
``insights.specs`` for those classes.
Args:
component (a datasource): The target datasource
Returns:
set: The set of filters defined for the datasource
"""
def inner(c, filters=None):
filters = filters or set()
if not ENABLED:
return filters
if not plugins.is_datasource(c):
return filters
if c in FILTERS:
filters |= FILTERS[c]
for d in dr.get_dependents(c):
filters |= inner(d, filters)
return filters
if component not in _CACHE:
_CACHE[component] = inner(component)
return _CACHE[component] | python | def get_filters(component):
def inner(c, filters=None):
filters = filters or set()
if not ENABLED:
return filters
if not plugins.is_datasource(c):
return filters
if c in FILTERS:
filters |= FILTERS[c]
for d in dr.get_dependents(c):
filters |= inner(d, filters)
return filters
if component not in _CACHE:
_CACHE[component] = inner(component)
return _CACHE[component] | [
"def",
"get_filters",
"(",
"component",
")",
":",
"def",
"inner",
"(",
"c",
",",
"filters",
"=",
"None",
")",
":",
"filters",
"=",
"filters",
"or",
"set",
"(",
")",
"if",
"not",
"ENABLED",
":",
"return",
"filters",
"if",
"not",
"plugins",
".",
"is_da... | Get the set of filters for the given datasource.
Filters added to a ``RegistryPoint`` will be applied to all datasources that
implement it. Filters added to a datasource implementation apply only to
that implementation.
For example, a filter added to ``Specs.ps_auxww`` will apply to
``DefaultSpecs.ps_auxww``, ``InsightsArchiveSpecs.ps_auxww``,
``SosSpecs.ps_auxww``, etc. But a filter added to ``DefaultSpecs.ps_auxww``
will only apply to ``DefaultSpecs.ps_auxww``. See the modules in
``insights.specs`` for those classes.
Args:
component (a datasource): The target datasource
Returns:
set: The set of filters defined for the datasource | [
"Get",
"the",
"set",
"of",
"filters",
"for",
"the",
"given",
"datasource",
"."
] | b57cbf8ed7c089672426ede0441e0a4f789ef4a1 | https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/core/filters.py#L82-L119 |
239,719 | RedHatInsights/insights-core | insights/core/filters.py | apply_filters | def apply_filters(target, lines):
"""
Applys filters to the lines of a datasource. This function is used only in
integration tests. Filters are applied in an equivalent but more performant
way at run time.
"""
filters = get_filters(target)
if filters:
for l in lines:
if any(f in l for f in filters):
yield l
else:
for l in lines:
yield l | python | def apply_filters(target, lines):
filters = get_filters(target)
if filters:
for l in lines:
if any(f in l for f in filters):
yield l
else:
for l in lines:
yield l | [
"def",
"apply_filters",
"(",
"target",
",",
"lines",
")",
":",
"filters",
"=",
"get_filters",
"(",
"target",
")",
"if",
"filters",
":",
"for",
"l",
"in",
"lines",
":",
"if",
"any",
"(",
"f",
"in",
"l",
"for",
"f",
"in",
"filters",
")",
":",
"yield"... | Applys filters to the lines of a datasource. This function is used only in
integration tests. Filters are applied in an equivalent but more performant
way at run time. | [
"Applys",
"filters",
"to",
"the",
"lines",
"of",
"a",
"datasource",
".",
"This",
"function",
"is",
"used",
"only",
"in",
"integration",
"tests",
".",
"Filters",
"are",
"applied",
"in",
"an",
"equivalent",
"but",
"more",
"performant",
"way",
"at",
"run",
"t... | b57cbf8ed7c089672426ede0441e0a4f789ef4a1 | https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/core/filters.py#L122-L135 |
239,720 | RedHatInsights/insights-core | insights/core/filters.py | loads | def loads(string):
"""Loads the filters dictionary given a string."""
d = _loads(string)
for k, v in d.items():
FILTERS[dr.get_component(k) or k] = set(v) | python | def loads(string):
d = _loads(string)
for k, v in d.items():
FILTERS[dr.get_component(k) or k] = set(v) | [
"def",
"loads",
"(",
"string",
")",
":",
"d",
"=",
"_loads",
"(",
"string",
")",
"for",
"k",
",",
"v",
"in",
"d",
".",
"items",
"(",
")",
":",
"FILTERS",
"[",
"dr",
".",
"get_component",
"(",
"k",
")",
"or",
"k",
"]",
"=",
"set",
"(",
"v",
... | Loads the filters dictionary given a string. | [
"Loads",
"the",
"filters",
"dictionary",
"given",
"a",
"string",
"."
] | b57cbf8ed7c089672426ede0441e0a4f789ef4a1 | https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/core/filters.py#L143-L147 |
239,721 | RedHatInsights/insights-core | insights/core/filters.py | load | def load(stream=None):
"""
Loads filters from a stream, normally an open file. If one is
not passed, filters are loaded from a default location within
the project.
"""
if stream:
loads(stream.read())
else:
data = pkgutil.get_data(insights.__name__, _filename)
return loads(data) if data else None | python | def load(stream=None):
if stream:
loads(stream.read())
else:
data = pkgutil.get_data(insights.__name__, _filename)
return loads(data) if data else None | [
"def",
"load",
"(",
"stream",
"=",
"None",
")",
":",
"if",
"stream",
":",
"loads",
"(",
"stream",
".",
"read",
"(",
")",
")",
"else",
":",
"data",
"=",
"pkgutil",
".",
"get_data",
"(",
"insights",
".",
"__name__",
",",
"_filename",
")",
"return",
"... | Loads filters from a stream, normally an open file. If one is
not passed, filters are loaded from a default location within
the project. | [
"Loads",
"filters",
"from",
"a",
"stream",
"normally",
"an",
"open",
"file",
".",
"If",
"one",
"is",
"not",
"passed",
"filters",
"are",
"loaded",
"from",
"a",
"default",
"location",
"within",
"the",
"project",
"."
] | b57cbf8ed7c089672426ede0441e0a4f789ef4a1 | https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/core/filters.py#L150-L160 |
239,722 | RedHatInsights/insights-core | insights/core/filters.py | dumps | def dumps():
"""Returns a string representation of the FILTERS dictionary."""
d = {}
for k, v in FILTERS.items():
d[dr.get_name(k)] = list(v)
return _dumps(d) | python | def dumps():
d = {}
for k, v in FILTERS.items():
d[dr.get_name(k)] = list(v)
return _dumps(d) | [
"def",
"dumps",
"(",
")",
":",
"d",
"=",
"{",
"}",
"for",
"k",
",",
"v",
"in",
"FILTERS",
".",
"items",
"(",
")",
":",
"d",
"[",
"dr",
".",
"get_name",
"(",
"k",
")",
"]",
"=",
"list",
"(",
"v",
")",
"return",
"_dumps",
"(",
"d",
")"
] | Returns a string representation of the FILTERS dictionary. | [
"Returns",
"a",
"string",
"representation",
"of",
"the",
"FILTERS",
"dictionary",
"."
] | b57cbf8ed7c089672426ede0441e0a4f789ef4a1 | https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/core/filters.py#L163-L168 |
239,723 | RedHatInsights/insights-core | insights/core/filters.py | dump | def dump(stream=None):
"""
Dumps a string representation of `FILTERS` to a stream, normally an
open file. If none is passed, `FILTERS` is dumped to a default location
within the project.
"""
if stream:
stream.write(dumps())
else:
path = os.path.join(os.path.dirname(insights.__file__), _filename)
with open(path, "wu") as f:
f.write(dumps()) | python | def dump(stream=None):
if stream:
stream.write(dumps())
else:
path = os.path.join(os.path.dirname(insights.__file__), _filename)
with open(path, "wu") as f:
f.write(dumps()) | [
"def",
"dump",
"(",
"stream",
"=",
"None",
")",
":",
"if",
"stream",
":",
"stream",
".",
"write",
"(",
"dumps",
"(",
")",
")",
"else",
":",
"path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"insights",
".... | Dumps a string representation of `FILTERS` to a stream, normally an
open file. If none is passed, `FILTERS` is dumped to a default location
within the project. | [
"Dumps",
"a",
"string",
"representation",
"of",
"FILTERS",
"to",
"a",
"stream",
"normally",
"an",
"open",
"file",
".",
"If",
"none",
"is",
"passed",
"FILTERS",
"is",
"dumped",
"to",
"a",
"default",
"location",
"within",
"the",
"project",
"."
] | b57cbf8ed7c089672426ede0441e0a4f789ef4a1 | https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/core/filters.py#L171-L182 |
239,724 | RedHatInsights/insights-core | insights/parsers/grub_conf.py | _parse_script | def _parse_script(list, line, line_iter):
"""
Eliminate any bash script contained in the grub v2 configuration
"""
ifIdx = 0
while (True):
line = next(line_iter)
if line.startswith("fi"):
if ifIdx == 0:
return
ifIdx -= 1
elif line.startswith("if"):
ifIdx += 1 | python | def _parse_script(list, line, line_iter):
ifIdx = 0
while (True):
line = next(line_iter)
if line.startswith("fi"):
if ifIdx == 0:
return
ifIdx -= 1
elif line.startswith("if"):
ifIdx += 1 | [
"def",
"_parse_script",
"(",
"list",
",",
"line",
",",
"line_iter",
")",
":",
"ifIdx",
"=",
"0",
"while",
"(",
"True",
")",
":",
"line",
"=",
"next",
"(",
"line_iter",
")",
"if",
"line",
".",
"startswith",
"(",
"\"fi\"",
")",
":",
"if",
"ifIdx",
"=... | Eliminate any bash script contained in the grub v2 configuration | [
"Eliminate",
"any",
"bash",
"script",
"contained",
"in",
"the",
"grub",
"v2",
"configuration"
] | b57cbf8ed7c089672426ede0441e0a4f789ef4a1 | https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/parsers/grub_conf.py#L397-L409 |
239,725 | RedHatInsights/insights-core | insights/parsers/grub_conf.py | _parse_title | def _parse_title(line_iter, cur_line, conf):
"""
Parse "title" in grub v1 config
"""
title = []
conf['title'].append(title)
title.append(('title_name', cur_line.split('title', 1)[1].strip()))
while (True):
line = next(line_iter)
if line.startswith("title "):
return line
cmd, opt = _parse_cmd(line)
title.append((cmd, opt)) | python | def _parse_title(line_iter, cur_line, conf):
title = []
conf['title'].append(title)
title.append(('title_name', cur_line.split('title', 1)[1].strip()))
while (True):
line = next(line_iter)
if line.startswith("title "):
return line
cmd, opt = _parse_cmd(line)
title.append((cmd, opt)) | [
"def",
"_parse_title",
"(",
"line_iter",
",",
"cur_line",
",",
"conf",
")",
":",
"title",
"=",
"[",
"]",
"conf",
"[",
"'title'",
"]",
".",
"append",
"(",
"title",
")",
"title",
".",
"append",
"(",
"(",
"'title_name'",
",",
"cur_line",
".",
"split",
"... | Parse "title" in grub v1 config | [
"Parse",
"title",
"in",
"grub",
"v1",
"config"
] | b57cbf8ed7c089672426ede0441e0a4f789ef4a1 | https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/parsers/grub_conf.py#L447-L460 |
239,726 | RedHatInsights/insights-core | insights/parsers/grub_conf.py | GrubConfig.is_kdump_iommu_enabled | def is_kdump_iommu_enabled(self):
"""
Does any kernel have 'intel_iommu=on' set?
Returns:
(bool): ``True`` when 'intel_iommu=on' is set, otherwise returns ``False``
"""
for line in self._boot_entries:
if line.cmdline and IOMMU in line.cmdline:
return True
return False | python | def is_kdump_iommu_enabled(self):
for line in self._boot_entries:
if line.cmdline and IOMMU in line.cmdline:
return True
return False | [
"def",
"is_kdump_iommu_enabled",
"(",
"self",
")",
":",
"for",
"line",
"in",
"self",
".",
"_boot_entries",
":",
"if",
"line",
".",
"cmdline",
"and",
"IOMMU",
"in",
"line",
".",
"cmdline",
":",
"return",
"True",
"return",
"False"
] | Does any kernel have 'intel_iommu=on' set?
Returns:
(bool): ``True`` when 'intel_iommu=on' is set, otherwise returns ``False`` | [
"Does",
"any",
"kernel",
"have",
"intel_iommu",
"=",
"on",
"set?"
] | b57cbf8ed7c089672426ede0441e0a4f789ef4a1 | https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/parsers/grub_conf.py#L177-L188 |
239,727 | RedHatInsights/insights-core | insights/parsers/grub_conf.py | GrubConfig.kernel_initrds | def kernel_initrds(self):
"""
Get the `kernel` and `initrd` files referenced in GRUB configuration files
Returns:
(dict): Returns a dict of the `kernel` and `initrd` files referenced
in GRUB configuration files
"""
kernels = []
initrds = []
name_values = [(k, v) for k, v in self.data.get('configs', [])]
for value in self.data.get('title', []) + self.data.get('menuentry', []):
name_values.extend(value)
for name, value in name_values:
if name.startswith('module'):
if 'vmlinuz' in value:
kernels.append(_parse_kernel_initrds_value(value))
elif 'initrd' in value or 'initramfs' in value:
initrds.append(_parse_kernel_initrds_value(value))
elif (name.startswith(('kernel', 'linux'))):
if 'ipxe.lkrn' in value:
# Machine PXE boots the kernel, assume all is ok
return {}
elif 'xen.gz' not in value:
kernels.append(_parse_kernel_initrds_value(value))
elif name.startswith('initrd') or name.startswith('initrd16'):
initrds.append(_parse_kernel_initrds_value(value))
return {GRUB_KERNELS: kernels, GRUB_INITRDS: initrds} | python | def kernel_initrds(self):
kernels = []
initrds = []
name_values = [(k, v) for k, v in self.data.get('configs', [])]
for value in self.data.get('title', []) + self.data.get('menuentry', []):
name_values.extend(value)
for name, value in name_values:
if name.startswith('module'):
if 'vmlinuz' in value:
kernels.append(_parse_kernel_initrds_value(value))
elif 'initrd' in value or 'initramfs' in value:
initrds.append(_parse_kernel_initrds_value(value))
elif (name.startswith(('kernel', 'linux'))):
if 'ipxe.lkrn' in value:
# Machine PXE boots the kernel, assume all is ok
return {}
elif 'xen.gz' not in value:
kernels.append(_parse_kernel_initrds_value(value))
elif name.startswith('initrd') or name.startswith('initrd16'):
initrds.append(_parse_kernel_initrds_value(value))
return {GRUB_KERNELS: kernels, GRUB_INITRDS: initrds} | [
"def",
"kernel_initrds",
"(",
"self",
")",
":",
"kernels",
"=",
"[",
"]",
"initrds",
"=",
"[",
"]",
"name_values",
"=",
"[",
"(",
"k",
",",
"v",
")",
"for",
"k",
",",
"v",
"in",
"self",
".",
"data",
".",
"get",
"(",
"'configs'",
",",
"[",
"]",
... | Get the `kernel` and `initrd` files referenced in GRUB configuration files
Returns:
(dict): Returns a dict of the `kernel` and `initrd` files referenced
in GRUB configuration files | [
"Get",
"the",
"kernel",
"and",
"initrd",
"files",
"referenced",
"in",
"GRUB",
"configuration",
"files"
] | b57cbf8ed7c089672426ede0441e0a4f789ef4a1 | https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/parsers/grub_conf.py#L192-L222 |
239,728 | RedHatInsights/insights-core | insights/core/serde.py | serializer | def serializer(_type):
"""
Decorator for serializers.
A serializer should accept two parameters: An object and a path which is
a directory on the filesystem where supplementary data can be stored. This
is most often useful for datasources. It should return a dictionary version
of the original object that contains only elements that can be serialized
to json.
"""
def inner(func):
name = dr.get_name(_type)
if name in SERIALIZERS:
msg = "%s already has a serializer registered: %s"
raise Exception(msg % (name, dr.get_name(SERIALIZERS[name])))
SERIALIZERS[name] = func
return func
return inner | python | def serializer(_type):
def inner(func):
name = dr.get_name(_type)
if name in SERIALIZERS:
msg = "%s already has a serializer registered: %s"
raise Exception(msg % (name, dr.get_name(SERIALIZERS[name])))
SERIALIZERS[name] = func
return func
return inner | [
"def",
"serializer",
"(",
"_type",
")",
":",
"def",
"inner",
"(",
"func",
")",
":",
"name",
"=",
"dr",
".",
"get_name",
"(",
"_type",
")",
"if",
"name",
"in",
"SERIALIZERS",
":",
"msg",
"=",
"\"%s already has a serializer registered: %s\"",
"raise",
"Excepti... | Decorator for serializers.
A serializer should accept two parameters: An object and a path which is
a directory on the filesystem where supplementary data can be stored. This
is most often useful for datasources. It should return a dictionary version
of the original object that contains only elements that can be serialized
to json. | [
"Decorator",
"for",
"serializers",
"."
] | b57cbf8ed7c089672426ede0441e0a4f789ef4a1 | https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/core/serde.py#L26-L44 |
239,729 | RedHatInsights/insights-core | insights/core/serde.py | deserializer | def deserializer(_type):
"""
Decorator for deserializers.
A deserializer should accept three parameters: A type, a dictionary, and a
path that may contain supplementary data stored by its paired serializer.
If the serializer stores supplementary data, the relative path to it should
be somewhere in the dict of the second parameter.
"""
def inner(func):
name = dr.get_name(_type)
if name in DESERIALIZERS:
msg = "%s already has a deserializer registered: %s"
raise Exception(msg % (dr.get_name(name), dr.get_name(DESERIALIZERS[name])))
DESERIALIZERS[name] = (_type, func)
return func
return inner | python | def deserializer(_type):
def inner(func):
name = dr.get_name(_type)
if name in DESERIALIZERS:
msg = "%s already has a deserializer registered: %s"
raise Exception(msg % (dr.get_name(name), dr.get_name(DESERIALIZERS[name])))
DESERIALIZERS[name] = (_type, func)
return func
return inner | [
"def",
"deserializer",
"(",
"_type",
")",
":",
"def",
"inner",
"(",
"func",
")",
":",
"name",
"=",
"dr",
".",
"get_name",
"(",
"_type",
")",
"if",
"name",
"in",
"DESERIALIZERS",
":",
"msg",
"=",
"\"%s already has a deserializer registered: %s\"",
"raise",
"E... | Decorator for deserializers.
A deserializer should accept three parameters: A type, a dictionary, and a
path that may contain supplementary data stored by its paired serializer.
If the serializer stores supplementary data, the relative path to it should
be somewhere in the dict of the second parameter. | [
"Decorator",
"for",
"deserializers",
"."
] | b57cbf8ed7c089672426ede0441e0a4f789ef4a1 | https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/core/serde.py#L47-L64 |
239,730 | RedHatInsights/insights-core | insights/core/serde.py | Hydration.hydrate | def hydrate(self, broker=None):
"""
Loads a Broker from a previously saved one. A Broker is created if one
isn't provided.
"""
broker = broker or dr.Broker()
for path in glob(os.path.join(self.meta_data, "*")):
try:
with open(path) as f:
doc = ser.load(f)
res = self._hydrate_one(doc)
comp, results, exec_time, ser_time = res
if results:
broker[comp] = results
broker.exec_times[comp] = exec_time + ser_time
except Exception as ex:
log.warning(ex)
return broker | python | def hydrate(self, broker=None):
broker = broker or dr.Broker()
for path in glob(os.path.join(self.meta_data, "*")):
try:
with open(path) as f:
doc = ser.load(f)
res = self._hydrate_one(doc)
comp, results, exec_time, ser_time = res
if results:
broker[comp] = results
broker.exec_times[comp] = exec_time + ser_time
except Exception as ex:
log.warning(ex)
return broker | [
"def",
"hydrate",
"(",
"self",
",",
"broker",
"=",
"None",
")",
":",
"broker",
"=",
"broker",
"or",
"dr",
".",
"Broker",
"(",
")",
"for",
"path",
"in",
"glob",
"(",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"meta_data",
",",
"\"*\"",
")",... | Loads a Broker from a previously saved one. A Broker is created if one
isn't provided. | [
"Loads",
"a",
"Broker",
"from",
"a",
"previously",
"saved",
"one",
".",
"A",
"Broker",
"is",
"created",
"if",
"one",
"isn",
"t",
"provided",
"."
] | b57cbf8ed7c089672426ede0441e0a4f789ef4a1 | https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/core/serde.py#L144-L161 |
239,731 | RedHatInsights/insights-core | insights/core/serde.py | Hydration.dehydrate | def dehydrate(self, comp, broker):
"""
Saves a component in the given broker to the file system.
"""
if not self.meta_data:
raise Exception("Hydration meta_path not set. Can't dehydrate.")
if not self.created:
fs.ensure_path(self.meta_data, mode=0o770)
if self.data:
fs.ensure_path(self.data, mode=0o770)
self.created = True
c = comp
doc = None
try:
name = dr.get_name(c)
value = broker.get(c)
errors = [t for e in broker.exceptions.get(c, [])
for t in broker.tracebacks[e]]
doc = {
"name": name,
"exec_time": broker.exec_times.get(c),
"errors": errors
}
try:
start = time.time()
doc["results"] = marshal(value, root=self.data, pool=self.pool)
except Exception:
errors.append(traceback.format_exc())
log.debug(traceback.format_exc())
doc["results"] = None
finally:
doc["ser_time"] = time.time() - start
except Exception as ex:
log.exception(ex)
else:
if doc is not None and (doc["results"] or doc["errors"]):
try:
path = os.path.join(self.meta_data, name + "." + self.ser_name)
with open(path, "w") as f:
ser.dump(doc, f)
except Exception as boom:
log.error("Could not serialize %s to %s: %r" % (name, self.ser_name, boom))
if path:
fs.remove(path) | python | def dehydrate(self, comp, broker):
if not self.meta_data:
raise Exception("Hydration meta_path not set. Can't dehydrate.")
if not self.created:
fs.ensure_path(self.meta_data, mode=0o770)
if self.data:
fs.ensure_path(self.data, mode=0o770)
self.created = True
c = comp
doc = None
try:
name = dr.get_name(c)
value = broker.get(c)
errors = [t for e in broker.exceptions.get(c, [])
for t in broker.tracebacks[e]]
doc = {
"name": name,
"exec_time": broker.exec_times.get(c),
"errors": errors
}
try:
start = time.time()
doc["results"] = marshal(value, root=self.data, pool=self.pool)
except Exception:
errors.append(traceback.format_exc())
log.debug(traceback.format_exc())
doc["results"] = None
finally:
doc["ser_time"] = time.time() - start
except Exception as ex:
log.exception(ex)
else:
if doc is not None and (doc["results"] or doc["errors"]):
try:
path = os.path.join(self.meta_data, name + "." + self.ser_name)
with open(path, "w") as f:
ser.dump(doc, f)
except Exception as boom:
log.error("Could not serialize %s to %s: %r" % (name, self.ser_name, boom))
if path:
fs.remove(path) | [
"def",
"dehydrate",
"(",
"self",
",",
"comp",
",",
"broker",
")",
":",
"if",
"not",
"self",
".",
"meta_data",
":",
"raise",
"Exception",
"(",
"\"Hydration meta_path not set. Can't dehydrate.\"",
")",
"if",
"not",
"self",
".",
"created",
":",
"fs",
".",
"ensu... | Saves a component in the given broker to the file system. | [
"Saves",
"a",
"component",
"in",
"the",
"given",
"broker",
"to",
"the",
"file",
"system",
"."
] | b57cbf8ed7c089672426ede0441e0a4f789ef4a1 | https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/core/serde.py#L163-L209 |
239,732 | RedHatInsights/insights-core | insights/core/serde.py | Hydration.make_persister | def make_persister(self, to_persist):
"""
Returns a function that hydrates components as they are evaluated. The
function should be registered as an observer on a Broker just before
execution.
Args:
to_persist (set): Set of components to persist. Skip everything
else.
"""
if not self.meta_data:
raise Exception("Root not set. Can't create persister.")
def persister(c, broker):
if c in to_persist:
self.dehydrate(c, broker)
return persister | python | def make_persister(self, to_persist):
if not self.meta_data:
raise Exception("Root not set. Can't create persister.")
def persister(c, broker):
if c in to_persist:
self.dehydrate(c, broker)
return persister | [
"def",
"make_persister",
"(",
"self",
",",
"to_persist",
")",
":",
"if",
"not",
"self",
".",
"meta_data",
":",
"raise",
"Exception",
"(",
"\"Root not set. Can't create persister.\"",
")",
"def",
"persister",
"(",
"c",
",",
"broker",
")",
":",
"if",
"c",
"in"... | Returns a function that hydrates components as they are evaluated. The
function should be registered as an observer on a Broker just before
execution.
Args:
to_persist (set): Set of components to persist. Skip everything
else. | [
"Returns",
"a",
"function",
"that",
"hydrates",
"components",
"as",
"they",
"are",
"evaluated",
".",
"The",
"function",
"should",
"be",
"registered",
"as",
"an",
"observer",
"on",
"a",
"Broker",
"just",
"before",
"execution",
"."
] | b57cbf8ed7c089672426ede0441e0a4f789ef4a1 | https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/core/serde.py#L211-L228 |
239,733 | RedHatInsights/insights-core | insights/parsers/lvm.py | map_keys | def map_keys(pvs, keys):
"""
Add human readable key names to dictionary while leaving any existing key names.
"""
rs = []
for pv in pvs:
r = dict((v, None) for k, v in keys.items())
for k, v in pv.items():
if k in keys:
r[keys[k]] = v
r[k] = v
rs.append(r)
return rs | python | def map_keys(pvs, keys):
rs = []
for pv in pvs:
r = dict((v, None) for k, v in keys.items())
for k, v in pv.items():
if k in keys:
r[keys[k]] = v
r[k] = v
rs.append(r)
return rs | [
"def",
"map_keys",
"(",
"pvs",
",",
"keys",
")",
":",
"rs",
"=",
"[",
"]",
"for",
"pv",
"in",
"pvs",
":",
"r",
"=",
"dict",
"(",
"(",
"v",
",",
"None",
")",
"for",
"k",
",",
"v",
"in",
"keys",
".",
"items",
"(",
")",
")",
"for",
"k",
",",... | Add human readable key names to dictionary while leaving any existing key names. | [
"Add",
"human",
"readable",
"key",
"names",
"to",
"dictionary",
"while",
"leaving",
"any",
"existing",
"key",
"names",
"."
] | b57cbf8ed7c089672426ede0441e0a4f789ef4a1 | https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/parsers/lvm.py#L43-L55 |
239,734 | RedHatInsights/insights-core | insights/configtree/__init__.py | from_dict | def from_dict(dct):
""" Convert a dictionary into a configtree. """
def inner(d):
results = []
for name, v in d.items():
if isinstance(v, dict):
results.append(Section(name=name, children=from_dict(v)))
elif isinstance(v, list):
if not any(isinstance(i, dict) for i in v):
results.append(Directive(name=name, attrs=v))
else:
for i in v:
if isinstance(i, dict):
results.append(Section(name=name, children=from_dict(i)))
elif isinstance(i, list):
results.append(Directive(name=name, attrs=i))
else:
results.append(Directive(name=name, attrs=[i]))
else:
results.append(Directive(name, attrs=[v]))
return results
return Root(children=inner(dct)) | python | def from_dict(dct):
def inner(d):
results = []
for name, v in d.items():
if isinstance(v, dict):
results.append(Section(name=name, children=from_dict(v)))
elif isinstance(v, list):
if not any(isinstance(i, dict) for i in v):
results.append(Directive(name=name, attrs=v))
else:
for i in v:
if isinstance(i, dict):
results.append(Section(name=name, children=from_dict(i)))
elif isinstance(i, list):
results.append(Directive(name=name, attrs=i))
else:
results.append(Directive(name=name, attrs=[i]))
else:
results.append(Directive(name, attrs=[v]))
return results
return Root(children=inner(dct)) | [
"def",
"from_dict",
"(",
"dct",
")",
":",
"def",
"inner",
"(",
"d",
")",
":",
"results",
"=",
"[",
"]",
"for",
"name",
",",
"v",
"in",
"d",
".",
"items",
"(",
")",
":",
"if",
"isinstance",
"(",
"v",
",",
"dict",
")",
":",
"results",
".",
"app... | Convert a dictionary into a configtree. | [
"Convert",
"a",
"dictionary",
"into",
"a",
"configtree",
"."
] | b57cbf8ed7c089672426ede0441e0a4f789ef4a1 | https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/configtree/__init__.py#L348-L369 |
239,735 | RedHatInsights/insights-core | insights/configtree/__init__.py | __or | def __or(funcs, args):
""" Support list sugar for "or" of two predicates. Used inside `select`. """
results = []
for f in funcs:
result = f(args)
if result:
results.extend(result)
return results | python | def __or(funcs, args):
results = []
for f in funcs:
result = f(args)
if result:
results.extend(result)
return results | [
"def",
"__or",
"(",
"funcs",
",",
"args",
")",
":",
"results",
"=",
"[",
"]",
"for",
"f",
"in",
"funcs",
":",
"result",
"=",
"f",
"(",
"args",
")",
"if",
"result",
":",
"results",
".",
"extend",
"(",
"result",
")",
"return",
"results"
] | Support list sugar for "or" of two predicates. Used inside `select`. | [
"Support",
"list",
"sugar",
"for",
"or",
"of",
"two",
"predicates",
".",
"Used",
"inside",
"select",
"."
] | b57cbf8ed7c089672426ede0441e0a4f789ef4a1 | https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/configtree/__init__.py#L582-L589 |
239,736 | RedHatInsights/insights-core | insights/configtree/__init__.py | BinaryBool | def BinaryBool(pred):
""" Lifts predicates that take an argument into the DSL. """
class Predicate(Bool):
def __init__(self, value, ignore_case=False):
self.value = caseless(value) if ignore_case else value
self.ignore_case = ignore_case
def __call__(self, data):
if not isinstance(data, list):
data = [data]
for d in data:
try:
if pred(caseless(d) if self.ignore_case else d, self.value):
return True
except:
pass
return False
return Predicate | python | def BinaryBool(pred):
class Predicate(Bool):
def __init__(self, value, ignore_case=False):
self.value = caseless(value) if ignore_case else value
self.ignore_case = ignore_case
def __call__(self, data):
if not isinstance(data, list):
data = [data]
for d in data:
try:
if pred(caseless(d) if self.ignore_case else d, self.value):
return True
except:
pass
return False
return Predicate | [
"def",
"BinaryBool",
"(",
"pred",
")",
":",
"class",
"Predicate",
"(",
"Bool",
")",
":",
"def",
"__init__",
"(",
"self",
",",
"value",
",",
"ignore_case",
"=",
"False",
")",
":",
"self",
".",
"value",
"=",
"caseless",
"(",
"value",
")",
"if",
"ignore... | Lifts predicates that take an argument into the DSL. | [
"Lifts",
"predicates",
"that",
"take",
"an",
"argument",
"into",
"the",
"DSL",
"."
] | b57cbf8ed7c089672426ede0441e0a4f789ef4a1 | https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/configtree/__init__.py#L652-L669 |
239,737 | RedHatInsights/insights-core | insights/configtree/__init__.py | select | def select(*queries, **kwargs):
"""
Builds a function that will execute the specified queries against a list of
Nodes.
"""
def make_query(*args):
def simple_query(nodes):
if len(args) == 0:
return nodes
pred = args[0]
results = []
if isinstance(pred, list):
funcs = [make_query(q) for q in pred]
return __or(funcs, nodes)
elif isinstance(pred, tuple):
name, attrs = pred[0], pred[1:]
name_pred = __make_name_pred(name)
attrs_pred = __make_attrs_pred(attrs)
for n in nodes:
if name_pred(n.name) and attrs_pred(n.attrs):
results.append(n)
else:
name_pred = __make_name_pred(pred)
for n in nodes:
if name_pred(n.name):
results.append(n)
return results
if len(args) > 1:
return __compose(make_query(*args[1:]), simple_query)
return simple_query
def deep_query(query, nodes):
""" Slide the query down the branches. """
def inner(children):
results = []
for c in children:
if query([c]):
results.append(c)
results.extend(inner(c.children))
return results
return inner(nodes)
def unique(roots):
seen = set()
results = []
for r in roots:
if r not in seen:
seen.add(r)
results.append(r)
return results
def compiled_query(nodes):
"""
This is the compiled query that can be run against a configuration.
"""
query = make_query(*queries)
roots = kwargs.get("roots", True)
if kwargs.get("deep", False):
results = deep_query(query, nodes)
if roots:
results = unique([r.root for r in results])
elif roots:
results = unique([n.root for n in query(nodes)])
else:
results = query(nodes)
one = kwargs.get("one")
if one is None:
return SearchResult(children=results)
return results[one] if results else None
return compiled_query | python | def select(*queries, **kwargs):
def make_query(*args):
def simple_query(nodes):
if len(args) == 0:
return nodes
pred = args[0]
results = []
if isinstance(pred, list):
funcs = [make_query(q) for q in pred]
return __or(funcs, nodes)
elif isinstance(pred, tuple):
name, attrs = pred[0], pred[1:]
name_pred = __make_name_pred(name)
attrs_pred = __make_attrs_pred(attrs)
for n in nodes:
if name_pred(n.name) and attrs_pred(n.attrs):
results.append(n)
else:
name_pred = __make_name_pred(pred)
for n in nodes:
if name_pred(n.name):
results.append(n)
return results
if len(args) > 1:
return __compose(make_query(*args[1:]), simple_query)
return simple_query
def deep_query(query, nodes):
""" Slide the query down the branches. """
def inner(children):
results = []
for c in children:
if query([c]):
results.append(c)
results.extend(inner(c.children))
return results
return inner(nodes)
def unique(roots):
seen = set()
results = []
for r in roots:
if r not in seen:
seen.add(r)
results.append(r)
return results
def compiled_query(nodes):
"""
This is the compiled query that can be run against a configuration.
"""
query = make_query(*queries)
roots = kwargs.get("roots", True)
if kwargs.get("deep", False):
results = deep_query(query, nodes)
if roots:
results = unique([r.root for r in results])
elif roots:
results = unique([n.root for n in query(nodes)])
else:
results = query(nodes)
one = kwargs.get("one")
if one is None:
return SearchResult(children=results)
return results[one] if results else None
return compiled_query | [
"def",
"select",
"(",
"*",
"queries",
",",
"*",
"*",
"kwargs",
")",
":",
"def",
"make_query",
"(",
"*",
"args",
")",
":",
"def",
"simple_query",
"(",
"nodes",
")",
":",
"if",
"len",
"(",
"args",
")",
"==",
"0",
":",
"return",
"nodes",
"pred",
"="... | Builds a function that will execute the specified queries against a list of
Nodes. | [
"Builds",
"a",
"function",
"that",
"will",
"execute",
"the",
"specified",
"queries",
"against",
"a",
"list",
"of",
"Nodes",
"."
] | b57cbf8ed7c089672426ede0441e0a4f789ef4a1 | https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/configtree/__init__.py#L722-L793 |
239,738 | RedHatInsights/insights-core | insights/configtree/__init__.py | Node.find | def find(self, *queries, **kwargs):
"""
Finds the first result found anywhere in the configuration. Pass
`one=last` for the last result. Returns `None` if no results are found.
"""
kwargs["deep"] = True
kwargs["roots"] = False
if "one" not in kwargs:
kwargs["one"] = first
return self.select(*queries, **kwargs) | python | def find(self, *queries, **kwargs):
kwargs["deep"] = True
kwargs["roots"] = False
if "one" not in kwargs:
kwargs["one"] = first
return self.select(*queries, **kwargs) | [
"def",
"find",
"(",
"self",
",",
"*",
"queries",
",",
"*",
"*",
"kwargs",
")",
":",
"kwargs",
"[",
"\"deep\"",
"]",
"=",
"True",
"kwargs",
"[",
"\"roots\"",
"]",
"=",
"False",
"if",
"\"one\"",
"not",
"in",
"kwargs",
":",
"kwargs",
"[",
"\"one\"",
"... | Finds the first result found anywhere in the configuration. Pass
`one=last` for the last result. Returns `None` if no results are found. | [
"Finds",
"the",
"first",
"result",
"found",
"anywhere",
"in",
"the",
"configuration",
".",
"Pass",
"one",
"=",
"last",
"for",
"the",
"last",
"result",
".",
"Returns",
"None",
"if",
"no",
"results",
"are",
"found",
"."
] | b57cbf8ed7c089672426ede0441e0a4f789ef4a1 | https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/configtree/__init__.py#L165-L174 |
239,739 | RedHatInsights/insights-core | insights/configtree/__init__.py | Node.find_all | def find_all(self, *queries):
"""
Find all results matching the query anywhere in the configuration.
Returns an empty `SearchResult` if no results are found.
"""
return self.select(*queries, deep=True, roots=False) | python | def find_all(self, *queries):
return self.select(*queries, deep=True, roots=False) | [
"def",
"find_all",
"(",
"self",
",",
"*",
"queries",
")",
":",
"return",
"self",
".",
"select",
"(",
"*",
"queries",
",",
"deep",
"=",
"True",
",",
"roots",
"=",
"False",
")"
] | Find all results matching the query anywhere in the configuration.
Returns an empty `SearchResult` if no results are found. | [
"Find",
"all",
"results",
"matching",
"the",
"query",
"anywhere",
"in",
"the",
"configuration",
".",
"Returns",
"an",
"empty",
"SearchResult",
"if",
"no",
"results",
"are",
"found",
"."
] | b57cbf8ed7c089672426ede0441e0a4f789ef4a1 | https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/configtree/__init__.py#L176-L181 |
239,740 | RedHatInsights/insights-core | insights/parsers/installed_rpms.py | pad_version | def pad_version(left, right):
"""Returns two sequences of the same length so that they can be compared.
The shorter of the two arguments is lengthened by inserting extra zeros
before non-integer components. The algorithm attempts to align character
components."""
pair = vcmp(left), vcmp(right)
mn, mx = min(pair, key=len), max(pair, key=len)
for idx, c in enumerate(mx):
try:
a = mx[idx]
b = mn[idx]
if type(a) != type(b):
mn.insert(idx, 0)
except IndexError:
if type(c) is int:
mn.append(0)
elif isinstance(c, six.string_types):
mn.append('')
else:
raise Exception("pad_version failed (%s) (%s)" % (left, right))
return pair | python | def pad_version(left, right):
pair = vcmp(left), vcmp(right)
mn, mx = min(pair, key=len), max(pair, key=len)
for idx, c in enumerate(mx):
try:
a = mx[idx]
b = mn[idx]
if type(a) != type(b):
mn.insert(idx, 0)
except IndexError:
if type(c) is int:
mn.append(0)
elif isinstance(c, six.string_types):
mn.append('')
else:
raise Exception("pad_version failed (%s) (%s)" % (left, right))
return pair | [
"def",
"pad_version",
"(",
"left",
",",
"right",
")",
":",
"pair",
"=",
"vcmp",
"(",
"left",
")",
",",
"vcmp",
"(",
"right",
")",
"mn",
",",
"mx",
"=",
"min",
"(",
"pair",
",",
"key",
"=",
"len",
")",
",",
"max",
"(",
"pair",
",",
"key",
"=",... | Returns two sequences of the same length so that they can be compared.
The shorter of the two arguments is lengthened by inserting extra zeros
before non-integer components. The algorithm attempts to align character
components. | [
"Returns",
"two",
"sequences",
"of",
"the",
"same",
"length",
"so",
"that",
"they",
"can",
"be",
"compared",
".",
"The",
"shorter",
"of",
"the",
"two",
"arguments",
"is",
"lengthened",
"by",
"inserting",
"extra",
"zeros",
"before",
"non",
"-",
"integer",
"... | b57cbf8ed7c089672426ede0441e0a4f789ef4a1 | https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/parsers/installed_rpms.py#L254-L278 |
239,741 | RedHatInsights/insights-core | insights/parsers/installed_rpms.py | InstalledRpm._parse_package | def _parse_package(cls, package_string):
"""
Helper method for parsing package string.
Args:
package_string (str): dash separated package string such as 'bash-4.2.39-3.el7'
Returns:
dict: dictionary containing 'name', 'version', 'release' and 'arch' keys
"""
pkg, arch = rsplit(package_string, cls._arch_sep(package_string))
if arch not in KNOWN_ARCHITECTURES:
pkg, arch = (package_string, None)
pkg, release = rsplit(pkg, '-')
name, version = rsplit(pkg, '-')
epoch, version = version.split(':', 1) if ":" in version else ['0', version]
# oracleasm packages have a dash in their version string, fix that
if name.startswith('oracleasm') and name.endswith('.el5'):
name, version2 = name.split('-', 1)
version = version2 + '-' + version
return {
'name': name,
'version': version,
'release': release,
'arch': arch,
'epoch': epoch
} | python | def _parse_package(cls, package_string):
pkg, arch = rsplit(package_string, cls._arch_sep(package_string))
if arch not in KNOWN_ARCHITECTURES:
pkg, arch = (package_string, None)
pkg, release = rsplit(pkg, '-')
name, version = rsplit(pkg, '-')
epoch, version = version.split(':', 1) if ":" in version else ['0', version]
# oracleasm packages have a dash in their version string, fix that
if name.startswith('oracleasm') and name.endswith('.el5'):
name, version2 = name.split('-', 1)
version = version2 + '-' + version
return {
'name': name,
'version': version,
'release': release,
'arch': arch,
'epoch': epoch
} | [
"def",
"_parse_package",
"(",
"cls",
",",
"package_string",
")",
":",
"pkg",
",",
"arch",
"=",
"rsplit",
"(",
"package_string",
",",
"cls",
".",
"_arch_sep",
"(",
"package_string",
")",
")",
"if",
"arch",
"not",
"in",
"KNOWN_ARCHITECTURES",
":",
"pkg",
","... | Helper method for parsing package string.
Args:
package_string (str): dash separated package string such as 'bash-4.2.39-3.el7'
Returns:
dict: dictionary containing 'name', 'version', 'release' and 'arch' keys | [
"Helper",
"method",
"for",
"parsing",
"package",
"string",
"."
] | b57cbf8ed7c089672426ede0441e0a4f789ef4a1 | https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/parsers/installed_rpms.py#L431-L457 |
239,742 | RedHatInsights/insights-core | insights/parsers/installed_rpms.py | InstalledRpm._parse_line | def _parse_line(cls, line):
"""
Helper method for parsing package line with or without SOS report information.
Args:
line (str): package line with or without SOS report information
Returns:
dict: dictionary containing 'name', 'version', 'release' and 'arch' keys plus
additionally 'installtime', 'buildtime', 'vendor', 'buildserver', 'pgpsig',
'pgpsig_short' if these are present.
"""
try:
pkg, rest = line.split(None, 1)
except ValueError:
rpm = cls._parse_package(line.strip())
return rpm
rpm = cls._parse_package(pkg)
rest = rest.split('\t')
for i, value in enumerate(rest):
rpm[cls.SOSREPORT_KEYS[i]] = value
return rpm | python | def _parse_line(cls, line):
try:
pkg, rest = line.split(None, 1)
except ValueError:
rpm = cls._parse_package(line.strip())
return rpm
rpm = cls._parse_package(pkg)
rest = rest.split('\t')
for i, value in enumerate(rest):
rpm[cls.SOSREPORT_KEYS[i]] = value
return rpm | [
"def",
"_parse_line",
"(",
"cls",
",",
"line",
")",
":",
"try",
":",
"pkg",
",",
"rest",
"=",
"line",
".",
"split",
"(",
"None",
",",
"1",
")",
"except",
"ValueError",
":",
"rpm",
"=",
"cls",
".",
"_parse_package",
"(",
"line",
".",
"strip",
"(",
... | Helper method for parsing package line with or without SOS report information.
Args:
line (str): package line with or without SOS report information
Returns:
dict: dictionary containing 'name', 'version', 'release' and 'arch' keys plus
additionally 'installtime', 'buildtime', 'vendor', 'buildserver', 'pgpsig',
'pgpsig_short' if these are present. | [
"Helper",
"method",
"for",
"parsing",
"package",
"line",
"with",
"or",
"without",
"SOS",
"report",
"information",
"."
] | b57cbf8ed7c089672426ede0441e0a4f789ef4a1 | https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/parsers/installed_rpms.py#L460-L481 |
239,743 | RedHatInsights/insights-core | insights/contrib/importlib.py | _resolve_name | def _resolve_name(name, package, level):
"""Return the absolute name of the module to be imported."""
if not hasattr(package, 'rindex'):
raise ValueError("'package' not set to a string")
dot = len(package)
for x in xrange(level, 1, -1):
try:
dot = package.rindex('.', 0, dot)
except ValueError:
raise ValueError("attempted relative import beyond top-level "
"package")
return "%s.%s" % (package[:dot], name) | python | def _resolve_name(name, package, level):
if not hasattr(package, 'rindex'):
raise ValueError("'package' not set to a string")
dot = len(package)
for x in xrange(level, 1, -1):
try:
dot = package.rindex('.', 0, dot)
except ValueError:
raise ValueError("attempted relative import beyond top-level "
"package")
return "%s.%s" % (package[:dot], name) | [
"def",
"_resolve_name",
"(",
"name",
",",
"package",
",",
"level",
")",
":",
"if",
"not",
"hasattr",
"(",
"package",
",",
"'rindex'",
")",
":",
"raise",
"ValueError",
"(",
"\"'package' not set to a string\"",
")",
"dot",
"=",
"len",
"(",
"package",
")",
"f... | Return the absolute name of the module to be imported. | [
"Return",
"the",
"absolute",
"name",
"of",
"the",
"module",
"to",
"be",
"imported",
"."
] | b57cbf8ed7c089672426ede0441e0a4f789ef4a1 | https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/contrib/importlib.py#L6-L17 |
239,744 | RedHatInsights/insights-core | insights/parsers/krb5.py | _handle_key_value | def _handle_key_value(t_dict, key, value):
"""
Function to handle key has multi value, and return the values as list.
"""
if key in t_dict:
val = t_dict[key]
if isinstance(val, str):
val = [val]
val.append(value)
return val
return value | python | def _handle_key_value(t_dict, key, value):
if key in t_dict:
val = t_dict[key]
if isinstance(val, str):
val = [val]
val.append(value)
return val
return value | [
"def",
"_handle_key_value",
"(",
"t_dict",
",",
"key",
",",
"value",
")",
":",
"if",
"key",
"in",
"t_dict",
":",
"val",
"=",
"t_dict",
"[",
"key",
"]",
"if",
"isinstance",
"(",
"val",
",",
"str",
")",
":",
"val",
"=",
"[",
"val",
"]",
"val",
".",... | Function to handle key has multi value, and return the values as list. | [
"Function",
"to",
"handle",
"key",
"has",
"multi",
"value",
"and",
"return",
"the",
"values",
"as",
"list",
"."
] | b57cbf8ed7c089672426ede0441e0a4f789ef4a1 | https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/parsers/krb5.py#L63-L73 |
239,745 | RedHatInsights/insights-core | insights/formats/__init__.py | get_formatter | def get_formatter(name):
"""
Looks up a formatter class given a prefix to it.
The names are sorted, and the first matching class is returned.
"""
for k in sorted(_FORMATTERS):
if k.startswith(name):
return _FORMATTERS[k] | python | def get_formatter(name):
for k in sorted(_FORMATTERS):
if k.startswith(name):
return _FORMATTERS[k] | [
"def",
"get_formatter",
"(",
"name",
")",
":",
"for",
"k",
"in",
"sorted",
"(",
"_FORMATTERS",
")",
":",
"if",
"k",
".",
"startswith",
"(",
"name",
")",
":",
"return",
"_FORMATTERS",
"[",
"k",
"]"
] | Looks up a formatter class given a prefix to it.
The names are sorted, and the first matching class is returned. | [
"Looks",
"up",
"a",
"formatter",
"class",
"given",
"a",
"prefix",
"to",
"it",
".",
"The",
"names",
"are",
"sorted",
"and",
"the",
"first",
"matching",
"class",
"is",
"returned",
"."
] | b57cbf8ed7c089672426ede0441e0a4f789ef4a1 | https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/formats/__init__.py#L10-L17 |
239,746 | RedHatInsights/insights-core | insights/parsers/nfs_exports.py | NFSExportsBase.all_options | def all_options(self):
"""Returns the set of all options used in all export entries"""
items = chain.from_iterable(hosts.values() for hosts in self.data.values())
return set(chain.from_iterable(items)) | python | def all_options(self):
items = chain.from_iterable(hosts.values() for hosts in self.data.values())
return set(chain.from_iterable(items)) | [
"def",
"all_options",
"(",
"self",
")",
":",
"items",
"=",
"chain",
".",
"from_iterable",
"(",
"hosts",
".",
"values",
"(",
")",
"for",
"hosts",
"in",
"self",
".",
"data",
".",
"values",
"(",
")",
")",
"return",
"set",
"(",
"chain",
".",
"from_iterab... | Returns the set of all options used in all export entries | [
"Returns",
"the",
"set",
"of",
"all",
"options",
"used",
"in",
"all",
"export",
"entries"
] | b57cbf8ed7c089672426ede0441e0a4f789ef4a1 | https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/parsers/nfs_exports.py#L133-L136 |
239,747 | RedHatInsights/insights-core | insights/client/connection.py | InsightsConnection._init_session | def _init_session(self):
"""
Set up the session, auth is handled here
"""
session = requests.Session()
session.headers = {'User-Agent': self.user_agent,
'Accept': 'application/json'}
if self.systemid is not None:
session.headers.update({'systemid': self.systemid})
if self.authmethod == "BASIC":
session.auth = (self.username, self.password)
elif self.authmethod == "CERT":
cert = rhsmCertificate.certpath()
key = rhsmCertificate.keypath()
if rhsmCertificate.exists():
session.cert = (cert, key)
else:
logger.error('ERROR: Certificates not found.')
session.verify = self.cert_verify
session.proxies = self.proxies
session.trust_env = False
if self.proxy_auth:
# HACKY
try:
# Need to make a request that will fail to get proxies set up
net_logger.info("GET %s", self.base_url)
session.request(
"GET", self.base_url, timeout=self.config.http_timeout)
except requests.ConnectionError:
pass
# Major hack, requests/urllib3 does not make access to
# proxy_headers easy
proxy_mgr = session.adapters['https://'].proxy_manager[self.proxies['https']]
auth_map = {'Proxy-Authorization': self.proxy_auth}
proxy_mgr.proxy_headers = auth_map
proxy_mgr.connection_pool_kw['_proxy_headers'] = auth_map
conns = proxy_mgr.pools._container
for conn in conns:
connection = conns[conn]
connection.proxy_headers = auth_map
return session | python | def _init_session(self):
session = requests.Session()
session.headers = {'User-Agent': self.user_agent,
'Accept': 'application/json'}
if self.systemid is not None:
session.headers.update({'systemid': self.systemid})
if self.authmethod == "BASIC":
session.auth = (self.username, self.password)
elif self.authmethod == "CERT":
cert = rhsmCertificate.certpath()
key = rhsmCertificate.keypath()
if rhsmCertificate.exists():
session.cert = (cert, key)
else:
logger.error('ERROR: Certificates not found.')
session.verify = self.cert_verify
session.proxies = self.proxies
session.trust_env = False
if self.proxy_auth:
# HACKY
try:
# Need to make a request that will fail to get proxies set up
net_logger.info("GET %s", self.base_url)
session.request(
"GET", self.base_url, timeout=self.config.http_timeout)
except requests.ConnectionError:
pass
# Major hack, requests/urllib3 does not make access to
# proxy_headers easy
proxy_mgr = session.adapters['https://'].proxy_manager[self.proxies['https']]
auth_map = {'Proxy-Authorization': self.proxy_auth}
proxy_mgr.proxy_headers = auth_map
proxy_mgr.connection_pool_kw['_proxy_headers'] = auth_map
conns = proxy_mgr.pools._container
for conn in conns:
connection = conns[conn]
connection.proxy_headers = auth_map
return session | [
"def",
"_init_session",
"(",
"self",
")",
":",
"session",
"=",
"requests",
".",
"Session",
"(",
")",
"session",
".",
"headers",
"=",
"{",
"'User-Agent'",
":",
"self",
".",
"user_agent",
",",
"'Accept'",
":",
"'application/json'",
"}",
"if",
"self",
".",
... | Set up the session, auth is handled here | [
"Set",
"up",
"the",
"session",
"auth",
"is",
"handled",
"here"
] | b57cbf8ed7c089672426ede0441e0a4f789ef4a1 | https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/client/connection.py#L129-L169 |
239,748 | RedHatInsights/insights-core | insights/client/connection.py | InsightsConnection.handle_fail_rcs | def handle_fail_rcs(self, req):
"""
Bail out if we get a 401 and leave a message
"""
try:
logger.debug("HTTP Status Code: %s", req.status_code)
logger.debug("HTTP Response Text: %s", req.text)
logger.debug("HTTP Response Reason: %s", req.reason)
logger.debug("HTTP Response Content: %s", req.content)
except:
logger.error("Malformed HTTP Request.")
# attempt to read the HTTP response JSON message
try:
logger.debug("HTTP Response Message: %s", req.json()["message"])
except:
logger.debug("No HTTP Response message present.")
# handle specific status codes
if req.status_code >= 400:
logger.info("Debug Information:\nHTTP Status Code: %s",
req.status_code)
logger.info("HTTP Status Text: %s", req.reason)
if req.status_code == 401:
logger.error("Authorization Required.")
logger.error("Please ensure correct credentials "
"in " + constants.default_conf_file)
logger.debug("HTTP Response Text: %s", req.text)
if req.status_code == 402:
# failed registration because of entitlement limit hit
logger.debug('Registration failed by 402 error.')
try:
logger.error(req.json()["message"])
except LookupError:
logger.error("Got 402 but no message")
logger.debug("HTTP Response Text: %s", req.text)
except:
logger.error("Got 402 but no message")
logger.debug("HTTP Response Text: %s", req.text)
if req.status_code == 403 and self.auto_config:
# Insights disabled in satellite
rhsm_hostname = urlparse(self.base_url).hostname
if (rhsm_hostname != 'subscription.rhn.redhat.com' and
rhsm_hostname != 'subscription.rhsm.redhat.com'):
logger.error('Please enable Insights on Satellite server '
'%s to continue.', rhsm_hostname)
if req.status_code == 412:
try:
unreg_date = req.json()["unregistered_at"]
logger.error(req.json()["message"])
write_unregistered_file(unreg_date)
except LookupError:
unreg_date = "412, but no unreg_date or message"
logger.debug("HTTP Response Text: %s", req.text)
except:
unreg_date = "412, but no unreg_date or message"
logger.debug("HTTP Response Text: %s", req.text)
return True
return False | python | def handle_fail_rcs(self, req):
try:
logger.debug("HTTP Status Code: %s", req.status_code)
logger.debug("HTTP Response Text: %s", req.text)
logger.debug("HTTP Response Reason: %s", req.reason)
logger.debug("HTTP Response Content: %s", req.content)
except:
logger.error("Malformed HTTP Request.")
# attempt to read the HTTP response JSON message
try:
logger.debug("HTTP Response Message: %s", req.json()["message"])
except:
logger.debug("No HTTP Response message present.")
# handle specific status codes
if req.status_code >= 400:
logger.info("Debug Information:\nHTTP Status Code: %s",
req.status_code)
logger.info("HTTP Status Text: %s", req.reason)
if req.status_code == 401:
logger.error("Authorization Required.")
logger.error("Please ensure correct credentials "
"in " + constants.default_conf_file)
logger.debug("HTTP Response Text: %s", req.text)
if req.status_code == 402:
# failed registration because of entitlement limit hit
logger.debug('Registration failed by 402 error.')
try:
logger.error(req.json()["message"])
except LookupError:
logger.error("Got 402 but no message")
logger.debug("HTTP Response Text: %s", req.text)
except:
logger.error("Got 402 but no message")
logger.debug("HTTP Response Text: %s", req.text)
if req.status_code == 403 and self.auto_config:
# Insights disabled in satellite
rhsm_hostname = urlparse(self.base_url).hostname
if (rhsm_hostname != 'subscription.rhn.redhat.com' and
rhsm_hostname != 'subscription.rhsm.redhat.com'):
logger.error('Please enable Insights on Satellite server '
'%s to continue.', rhsm_hostname)
if req.status_code == 412:
try:
unreg_date = req.json()["unregistered_at"]
logger.error(req.json()["message"])
write_unregistered_file(unreg_date)
except LookupError:
unreg_date = "412, but no unreg_date or message"
logger.debug("HTTP Response Text: %s", req.text)
except:
unreg_date = "412, but no unreg_date or message"
logger.debug("HTTP Response Text: %s", req.text)
return True
return False | [
"def",
"handle_fail_rcs",
"(",
"self",
",",
"req",
")",
":",
"try",
":",
"logger",
".",
"debug",
"(",
"\"HTTP Status Code: %s\"",
",",
"req",
".",
"status_code",
")",
"logger",
".",
"debug",
"(",
"\"HTTP Response Text: %s\"",
",",
"req",
".",
"text",
")",
... | Bail out if we get a 401 and leave a message | [
"Bail",
"out",
"if",
"we",
"get",
"a",
"401",
"and",
"leave",
"a",
"message"
] | b57cbf8ed7c089672426ede0441e0a4f789ef4a1 | https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/client/connection.py#L352-L411 |
239,749 | RedHatInsights/insights-core | insights/client/connection.py | InsightsConnection.get_satellite5_info | def get_satellite5_info(self, branch_info):
"""
Get remote_leaf for Satellite 5 Managed box
"""
logger.debug(
"Remote branch not -1 but remote leaf is -1, must be Satellite 5")
if os.path.isfile('/etc/sysconfig/rhn/systemid'):
logger.debug("Found systemid file")
sat5_conf = ET.parse('/etc/sysconfig/rhn/systemid').getroot()
leaf_id = None
for member in sat5_conf.getiterator('member'):
if member.find('name').text == 'system_id':
logger.debug("Found member 'system_id'")
leaf_id = member.find('value').find(
'string').text.split('ID-')[1]
logger.debug("Found leaf id: %s", leaf_id)
branch_info['remote_leaf'] = leaf_id
if leaf_id is None:
logger.error("Could not determine leaf_id! Exiting!")
return False | python | def get_satellite5_info(self, branch_info):
logger.debug(
"Remote branch not -1 but remote leaf is -1, must be Satellite 5")
if os.path.isfile('/etc/sysconfig/rhn/systemid'):
logger.debug("Found systemid file")
sat5_conf = ET.parse('/etc/sysconfig/rhn/systemid').getroot()
leaf_id = None
for member in sat5_conf.getiterator('member'):
if member.find('name').text == 'system_id':
logger.debug("Found member 'system_id'")
leaf_id = member.find('value').find(
'string').text.split('ID-')[1]
logger.debug("Found leaf id: %s", leaf_id)
branch_info['remote_leaf'] = leaf_id
if leaf_id is None:
logger.error("Could not determine leaf_id! Exiting!")
return False | [
"def",
"get_satellite5_info",
"(",
"self",
",",
"branch_info",
")",
":",
"logger",
".",
"debug",
"(",
"\"Remote branch not -1 but remote leaf is -1, must be Satellite 5\"",
")",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"'/etc/sysconfig/rhn/systemid'",
")",
":",
"l... | Get remote_leaf for Satellite 5 Managed box | [
"Get",
"remote_leaf",
"for",
"Satellite",
"5",
"Managed",
"box"
] | b57cbf8ed7c089672426ede0441e0a4f789ef4a1 | https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/client/connection.py#L413-L432 |
239,750 | RedHatInsights/insights-core | insights/client/connection.py | InsightsConnection.get_branch_info | def get_branch_info(self):
"""
Retrieve branch_info from Satellite Server
"""
branch_info = None
if os.path.exists(constants.cached_branch_info):
# use cached branch info file if less than 10 minutes old
# (failsafe, should be deleted at end of client run normally)
logger.debug(u'Reading branch info from cached file.')
ctime = datetime.utcfromtimestamp(
os.path.getctime(constants.cached_branch_info))
if datetime.utcnow() < (ctime + timedelta(minutes=5)):
with io.open(constants.cached_branch_info, encoding='utf8', mode='r') as f:
branch_info = json.load(f)
return branch_info
else:
logger.debug(u'Cached branch info is older than 5 minutes.')
logger.debug(u'Obtaining branch information from %s',
self.branch_info_url)
net_logger.info(u'GET %s', self.branch_info_url)
response = self.session.get(self.branch_info_url,
timeout=self.config.http_timeout)
logger.debug(u'GET branch_info status: %s', response.status_code)
if response.status_code != 200:
logger.debug("There was an error obtaining branch information.")
logger.debug(u'Bad status from server: %s', response.status_code)
logger.debug("Assuming default branch information %s" % self.branch_info)
return False
branch_info = response.json()
logger.debug(u'Branch information: %s', json.dumps(branch_info))
# Determine if we are connected to Satellite 5
if ((branch_info[u'remote_branch'] is not -1 and
branch_info[u'remote_leaf'] is -1)):
self.get_satellite5_info(branch_info)
logger.debug(u'Saving branch info to file.')
with io.open(constants.cached_branch_info, encoding='utf8', mode='w') as f:
# json.dump is broke in py2 so use dumps
bi_str = json.dumps(branch_info, ensure_ascii=False)
f.write(bi_str)
self.branch_info = branch_info
return branch_info | python | def get_branch_info(self):
branch_info = None
if os.path.exists(constants.cached_branch_info):
# use cached branch info file if less than 10 minutes old
# (failsafe, should be deleted at end of client run normally)
logger.debug(u'Reading branch info from cached file.')
ctime = datetime.utcfromtimestamp(
os.path.getctime(constants.cached_branch_info))
if datetime.utcnow() < (ctime + timedelta(minutes=5)):
with io.open(constants.cached_branch_info, encoding='utf8', mode='r') as f:
branch_info = json.load(f)
return branch_info
else:
logger.debug(u'Cached branch info is older than 5 minutes.')
logger.debug(u'Obtaining branch information from %s',
self.branch_info_url)
net_logger.info(u'GET %s', self.branch_info_url)
response = self.session.get(self.branch_info_url,
timeout=self.config.http_timeout)
logger.debug(u'GET branch_info status: %s', response.status_code)
if response.status_code != 200:
logger.debug("There was an error obtaining branch information.")
logger.debug(u'Bad status from server: %s', response.status_code)
logger.debug("Assuming default branch information %s" % self.branch_info)
return False
branch_info = response.json()
logger.debug(u'Branch information: %s', json.dumps(branch_info))
# Determine if we are connected to Satellite 5
if ((branch_info[u'remote_branch'] is not -1 and
branch_info[u'remote_leaf'] is -1)):
self.get_satellite5_info(branch_info)
logger.debug(u'Saving branch info to file.')
with io.open(constants.cached_branch_info, encoding='utf8', mode='w') as f:
# json.dump is broke in py2 so use dumps
bi_str = json.dumps(branch_info, ensure_ascii=False)
f.write(bi_str)
self.branch_info = branch_info
return branch_info | [
"def",
"get_branch_info",
"(",
"self",
")",
":",
"branch_info",
"=",
"None",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"constants",
".",
"cached_branch_info",
")",
":",
"# use cached branch info file if less than 10 minutes old",
"# (failsafe, should be deleted at end... | Retrieve branch_info from Satellite Server | [
"Retrieve",
"branch_info",
"from",
"Satellite",
"Server"
] | b57cbf8ed7c089672426ede0441e0a4f789ef4a1 | https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/client/connection.py#L434-L478 |
239,751 | RedHatInsights/insights-core | insights/client/connection.py | InsightsConnection.create_system | def create_system(self, new_machine_id=False):
"""
Create the machine via the API
"""
client_hostname = determine_hostname()
machine_id = generate_machine_id(new_machine_id)
branch_info = self.branch_info
if not branch_info:
return False
remote_branch = branch_info['remote_branch']
remote_leaf = branch_info['remote_leaf']
data = {'machine_id': machine_id,
'remote_branch': remote_branch,
'remote_leaf': remote_leaf,
'hostname': client_hostname}
if self.config.display_name is not None:
data['display_name'] = self.config.display_name
data = json.dumps(data)
post_system_url = self.api_url + '/v1/systems'
logger.debug("POST System: %s", post_system_url)
logger.debug(data)
net_logger.info("POST %s", post_system_url)
return self.session.post(post_system_url,
headers={'Content-Type': 'application/json'},
data=data) | python | def create_system(self, new_machine_id=False):
client_hostname = determine_hostname()
machine_id = generate_machine_id(new_machine_id)
branch_info = self.branch_info
if not branch_info:
return False
remote_branch = branch_info['remote_branch']
remote_leaf = branch_info['remote_leaf']
data = {'machine_id': machine_id,
'remote_branch': remote_branch,
'remote_leaf': remote_leaf,
'hostname': client_hostname}
if self.config.display_name is not None:
data['display_name'] = self.config.display_name
data = json.dumps(data)
post_system_url = self.api_url + '/v1/systems'
logger.debug("POST System: %s", post_system_url)
logger.debug(data)
net_logger.info("POST %s", post_system_url)
return self.session.post(post_system_url,
headers={'Content-Type': 'application/json'},
data=data) | [
"def",
"create_system",
"(",
"self",
",",
"new_machine_id",
"=",
"False",
")",
":",
"client_hostname",
"=",
"determine_hostname",
"(",
")",
"machine_id",
"=",
"generate_machine_id",
"(",
"new_machine_id",
")",
"branch_info",
"=",
"self",
".",
"branch_info",
"if",
... | Create the machine via the API | [
"Create",
"the",
"machine",
"via",
"the",
"API"
] | b57cbf8ed7c089672426ede0441e0a4f789ef4a1 | https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/client/connection.py#L481-L508 |
239,752 | RedHatInsights/insights-core | insights/client/connection.py | InsightsConnection.group_systems | def group_systems(self, group_name, systems):
"""
Adds an array of systems to specified group
Args:
group_name: Display name of group
systems: Array of {'machine_id': machine_id}
"""
api_group_id = None
headers = {'Content-Type': 'application/json'}
group_path = self.api_url + '/v1/groups'
group_get_path = group_path + ('?display_name=%s' % quote(group_name))
logger.debug("GET group: %s", group_get_path)
net_logger.info("GET %s", group_get_path)
get_group = self.session.get(group_get_path)
logger.debug("GET group status: %s", get_group.status_code)
if get_group.status_code == 200:
api_group_id = get_group.json()['id']
if get_group.status_code == 404:
# Group does not exist, POST to create
logger.debug("POST group")
data = json.dumps({'display_name': group_name})
net_logger.info("POST", group_path)
post_group = self.session.post(group_path,
headers=headers,
data=data)
logger.debug("POST group status: %s", post_group.status_code)
logger.debug("POST Group: %s", post_group.json())
self.handle_fail_rcs(post_group)
api_group_id = post_group.json()['id']
logger.debug("PUT group")
data = json.dumps(systems)
net_logger.info("PUT %s", group_path + ('/%s/systems' % api_group_id))
put_group = self.session.put(group_path +
('/%s/systems' % api_group_id),
headers=headers,
data=data)
logger.debug("PUT group status: %d", put_group.status_code)
logger.debug("PUT Group: %s", put_group.json()) | python | def group_systems(self, group_name, systems):
api_group_id = None
headers = {'Content-Type': 'application/json'}
group_path = self.api_url + '/v1/groups'
group_get_path = group_path + ('?display_name=%s' % quote(group_name))
logger.debug("GET group: %s", group_get_path)
net_logger.info("GET %s", group_get_path)
get_group = self.session.get(group_get_path)
logger.debug("GET group status: %s", get_group.status_code)
if get_group.status_code == 200:
api_group_id = get_group.json()['id']
if get_group.status_code == 404:
# Group does not exist, POST to create
logger.debug("POST group")
data = json.dumps({'display_name': group_name})
net_logger.info("POST", group_path)
post_group = self.session.post(group_path,
headers=headers,
data=data)
logger.debug("POST group status: %s", post_group.status_code)
logger.debug("POST Group: %s", post_group.json())
self.handle_fail_rcs(post_group)
api_group_id = post_group.json()['id']
logger.debug("PUT group")
data = json.dumps(systems)
net_logger.info("PUT %s", group_path + ('/%s/systems' % api_group_id))
put_group = self.session.put(group_path +
('/%s/systems' % api_group_id),
headers=headers,
data=data)
logger.debug("PUT group status: %d", put_group.status_code)
logger.debug("PUT Group: %s", put_group.json()) | [
"def",
"group_systems",
"(",
"self",
",",
"group_name",
",",
"systems",
")",
":",
"api_group_id",
"=",
"None",
"headers",
"=",
"{",
"'Content-Type'",
":",
"'application/json'",
"}",
"group_path",
"=",
"self",
".",
"api_url",
"+",
"'/v1/groups'",
"group_get_path"... | Adds an array of systems to specified group
Args:
group_name: Display name of group
systems: Array of {'machine_id': machine_id} | [
"Adds",
"an",
"array",
"of",
"systems",
"to",
"specified",
"group"
] | b57cbf8ed7c089672426ede0441e0a4f789ef4a1 | https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/client/connection.py#L511-L552 |
239,753 | RedHatInsights/insights-core | insights/client/connection.py | InsightsConnection.do_group | def do_group(self):
"""
Do grouping on register
"""
group_id = self.config.group
systems = {'machine_id': generate_machine_id()}
self.group_systems(group_id, systems) | python | def do_group(self):
group_id = self.config.group
systems = {'machine_id': generate_machine_id()}
self.group_systems(group_id, systems) | [
"def",
"do_group",
"(",
"self",
")",
":",
"group_id",
"=",
"self",
".",
"config",
".",
"group",
"systems",
"=",
"{",
"'machine_id'",
":",
"generate_machine_id",
"(",
")",
"}",
"self",
".",
"group_systems",
"(",
"group_id",
",",
"systems",
")"
] | Do grouping on register | [
"Do",
"grouping",
"on",
"register"
] | b57cbf8ed7c089672426ede0441e0a4f789ef4a1 | https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/client/connection.py#L556-L562 |
239,754 | RedHatInsights/insights-core | insights/client/connection.py | InsightsConnection._legacy_api_registration_check | def _legacy_api_registration_check(self):
'''
Check registration status through API
'''
logger.debug('Checking registration status...')
machine_id = generate_machine_id()
try:
url = self.api_url + '/v1/systems/' + machine_id
net_logger.info("GET %s", url)
res = self.session.get(url, timeout=self.config.http_timeout)
except requests.ConnectionError:
# can't connect, run connection test
logger.error('Connection timed out. Running connection test...')
self.test_connection()
return False
# had to do a quick bugfix changing this around,
# which makes the None-False-True dichotomy seem weird
# TODO: reconsider what gets returned, probably this:
# True for registered
# False for unregistered
# None for system 404
try:
# check the 'unregistered_at' key of the response
unreg_status = json.loads(res.content).get('unregistered_at', 'undefined')
# set the global account number
self.config.account_number = json.loads(res.content).get('account_number', 'undefined')
except ValueError:
# bad response, no json object
return False
if unreg_status == 'undefined':
# key not found, machine not yet registered
return None
elif unreg_status is None:
# unregistered_at = null, means this machine IS registered
return True
else:
# machine has been unregistered, this is a timestamp
return unreg_status | python | def _legacy_api_registration_check(self):
'''
Check registration status through API
'''
logger.debug('Checking registration status...')
machine_id = generate_machine_id()
try:
url = self.api_url + '/v1/systems/' + machine_id
net_logger.info("GET %s", url)
res = self.session.get(url, timeout=self.config.http_timeout)
except requests.ConnectionError:
# can't connect, run connection test
logger.error('Connection timed out. Running connection test...')
self.test_connection()
return False
# had to do a quick bugfix changing this around,
# which makes the None-False-True dichotomy seem weird
# TODO: reconsider what gets returned, probably this:
# True for registered
# False for unregistered
# None for system 404
try:
# check the 'unregistered_at' key of the response
unreg_status = json.loads(res.content).get('unregistered_at', 'undefined')
# set the global account number
self.config.account_number = json.loads(res.content).get('account_number', 'undefined')
except ValueError:
# bad response, no json object
return False
if unreg_status == 'undefined':
# key not found, machine not yet registered
return None
elif unreg_status is None:
# unregistered_at = null, means this machine IS registered
return True
else:
# machine has been unregistered, this is a timestamp
return unreg_status | [
"def",
"_legacy_api_registration_check",
"(",
"self",
")",
":",
"logger",
".",
"debug",
"(",
"'Checking registration status...'",
")",
"machine_id",
"=",
"generate_machine_id",
"(",
")",
"try",
":",
"url",
"=",
"self",
".",
"api_url",
"+",
"'/v1/systems/'",
"+",
... | Check registration status through API | [
"Check",
"registration",
"status",
"through",
"API"
] | b57cbf8ed7c089672426ede0441e0a4f789ef4a1 | https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/client/connection.py#L565-L602 |
239,755 | RedHatInsights/insights-core | insights/client/connection.py | InsightsConnection._fetch_system_by_machine_id | def _fetch_system_by_machine_id(self):
'''
Get a system by machine ID
Returns
dict system exists in inventory
False system does not exist in inventory
None error connection or parsing response
'''
machine_id = generate_machine_id()
try:
url = self.api_url + '/inventory/v1/hosts?insights_id=' + machine_id
net_logger.info("GET %s", url)
res = self.session.get(url, timeout=self.config.http_timeout)
except (requests.ConnectionError, requests.Timeout) as e:
logger.error(e)
logger.error('The Insights API could not be reached.')
return None
try:
if (self.handle_fail_rcs(res)):
return None
res_json = json.loads(res.content)
except ValueError as e:
logger.error(e)
logger.error('Could not parse response body.')
return None
if res_json['total'] == 0:
logger.debug('No hosts found with machine ID: %s', machine_id)
return False
return res_json['results'] | python | def _fetch_system_by_machine_id(self):
'''
Get a system by machine ID
Returns
dict system exists in inventory
False system does not exist in inventory
None error connection or parsing response
'''
machine_id = generate_machine_id()
try:
url = self.api_url + '/inventory/v1/hosts?insights_id=' + machine_id
net_logger.info("GET %s", url)
res = self.session.get(url, timeout=self.config.http_timeout)
except (requests.ConnectionError, requests.Timeout) as e:
logger.error(e)
logger.error('The Insights API could not be reached.')
return None
try:
if (self.handle_fail_rcs(res)):
return None
res_json = json.loads(res.content)
except ValueError as e:
logger.error(e)
logger.error('Could not parse response body.')
return None
if res_json['total'] == 0:
logger.debug('No hosts found with machine ID: %s', machine_id)
return False
return res_json['results'] | [
"def",
"_fetch_system_by_machine_id",
"(",
"self",
")",
":",
"machine_id",
"=",
"generate_machine_id",
"(",
")",
"try",
":",
"url",
"=",
"self",
".",
"api_url",
"+",
"'/inventory/v1/hosts?insights_id='",
"+",
"machine_id",
"net_logger",
".",
"info",
"(",
"\"GET %s... | Get a system by machine ID
Returns
dict system exists in inventory
False system does not exist in inventory
None error connection or parsing response | [
"Get",
"a",
"system",
"by",
"machine",
"ID",
"Returns",
"dict",
"system",
"exists",
"in",
"inventory",
"False",
"system",
"does",
"not",
"exist",
"in",
"inventory",
"None",
"error",
"connection",
"or",
"parsing",
"response"
] | b57cbf8ed7c089672426ede0441e0a4f789ef4a1 | https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/client/connection.py#L604-L632 |
239,756 | RedHatInsights/insights-core | insights/client/connection.py | InsightsConnection.api_registration_check | def api_registration_check(self):
'''
Reach out to the inventory API to check
whether a machine exists.
Returns
True system exists in inventory
False system does not exist in inventory
None error connection or parsing response
'''
if self.config.legacy_upload:
return self._legacy_api_registration_check()
logger.debug('Checking registration status...')
results = self._fetch_system_by_machine_id()
if not results:
return results
logger.debug('System found.')
logger.debug('Machine ID: %s', results[0]['insights_id'])
logger.debug('Inventory ID: %s', results[0]['id'])
return True | python | def api_registration_check(self):
'''
Reach out to the inventory API to check
whether a machine exists.
Returns
True system exists in inventory
False system does not exist in inventory
None error connection or parsing response
'''
if self.config.legacy_upload:
return self._legacy_api_registration_check()
logger.debug('Checking registration status...')
results = self._fetch_system_by_machine_id()
if not results:
return results
logger.debug('System found.')
logger.debug('Machine ID: %s', results[0]['insights_id'])
logger.debug('Inventory ID: %s', results[0]['id'])
return True | [
"def",
"api_registration_check",
"(",
"self",
")",
":",
"if",
"self",
".",
"config",
".",
"legacy_upload",
":",
"return",
"self",
".",
"_legacy_api_registration_check",
"(",
")",
"logger",
".",
"debug",
"(",
"'Checking registration status...'",
")",
"results",
"="... | Reach out to the inventory API to check
whether a machine exists.
Returns
True system exists in inventory
False system does not exist in inventory
None error connection or parsing response | [
"Reach",
"out",
"to",
"the",
"inventory",
"API",
"to",
"check",
"whether",
"a",
"machine",
"exists",
"."
] | b57cbf8ed7c089672426ede0441e0a4f789ef4a1 | https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/client/connection.py#L634-L655 |
239,757 | RedHatInsights/insights-core | insights/client/connection.py | InsightsConnection.unregister | def unregister(self):
"""
Unregister this system from the insights service
"""
machine_id = generate_machine_id()
try:
logger.debug("Unregistering %s", machine_id)
url = self.api_url + "/v1/systems/" + machine_id
net_logger.info("DELETE %s", url)
self.session.delete(url)
logger.info(
"Successfully unregistered from the Red Hat Insights Service")
return True
except requests.ConnectionError as e:
logger.debug(e)
logger.error("Could not unregister this system")
return False | python | def unregister(self):
machine_id = generate_machine_id()
try:
logger.debug("Unregistering %s", machine_id)
url = self.api_url + "/v1/systems/" + machine_id
net_logger.info("DELETE %s", url)
self.session.delete(url)
logger.info(
"Successfully unregistered from the Red Hat Insights Service")
return True
except requests.ConnectionError as e:
logger.debug(e)
logger.error("Could not unregister this system")
return False | [
"def",
"unregister",
"(",
"self",
")",
":",
"machine_id",
"=",
"generate_machine_id",
"(",
")",
"try",
":",
"logger",
".",
"debug",
"(",
"\"Unregistering %s\"",
",",
"machine_id",
")",
"url",
"=",
"self",
".",
"api_url",
"+",
"\"/v1/systems/\"",
"+",
"machin... | Unregister this system from the insights service | [
"Unregister",
"this",
"system",
"from",
"the",
"insights",
"service"
] | b57cbf8ed7c089672426ede0441e0a4f789ef4a1 | https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/client/connection.py#L658-L674 |
239,758 | RedHatInsights/insights-core | insights/client/connection.py | InsightsConnection.register | def register(self):
"""
Register this machine
"""
client_hostname = determine_hostname()
# This will undo a blacklist
logger.debug("API: Create system")
system = self.create_system(new_machine_id=False)
if system is False:
return ('Could not reach the Insights service to register.', '', '', '')
# If we get a 409, we know we need to generate a new machine-id
if system.status_code == 409:
system = self.create_system(new_machine_id=True)
self.handle_fail_rcs(system)
logger.debug("System: %s", system.json())
message = system.headers.get("x-rh-message", "")
# Do grouping
if self.config.group is not None:
self.do_group()
# Display registration success messasge to STDOUT and logs
if system.status_code == 201:
try:
system_json = system.json()
machine_id = system_json["machine_id"]
account_number = system_json["account_number"]
logger.info("You successfully registered %s to account %s." % (machine_id, account_number))
except:
logger.debug('Received invalid JSON on system registration.')
logger.debug('API still indicates valid registration with 201 status code.')
logger.debug(system)
logger.debug(system.json())
if self.config.group is not None:
return (message, client_hostname, self.config.group, self.config.display_name)
elif self.config.display_name is not None:
return (message, client_hostname, "None", self.config.display_name)
else:
return (message, client_hostname, "None", "") | python | def register(self):
client_hostname = determine_hostname()
# This will undo a blacklist
logger.debug("API: Create system")
system = self.create_system(new_machine_id=False)
if system is False:
return ('Could not reach the Insights service to register.', '', '', '')
# If we get a 409, we know we need to generate a new machine-id
if system.status_code == 409:
system = self.create_system(new_machine_id=True)
self.handle_fail_rcs(system)
logger.debug("System: %s", system.json())
message = system.headers.get("x-rh-message", "")
# Do grouping
if self.config.group is not None:
self.do_group()
# Display registration success messasge to STDOUT and logs
if system.status_code == 201:
try:
system_json = system.json()
machine_id = system_json["machine_id"]
account_number = system_json["account_number"]
logger.info("You successfully registered %s to account %s." % (machine_id, account_number))
except:
logger.debug('Received invalid JSON on system registration.')
logger.debug('API still indicates valid registration with 201 status code.')
logger.debug(system)
logger.debug(system.json())
if self.config.group is not None:
return (message, client_hostname, self.config.group, self.config.display_name)
elif self.config.display_name is not None:
return (message, client_hostname, "None", self.config.display_name)
else:
return (message, client_hostname, "None", "") | [
"def",
"register",
"(",
"self",
")",
":",
"client_hostname",
"=",
"determine_hostname",
"(",
")",
"# This will undo a blacklist",
"logger",
".",
"debug",
"(",
"\"API: Create system\"",
")",
"system",
"=",
"self",
".",
"create_system",
"(",
"new_machine_id",
"=",
"... | Register this machine | [
"Register",
"this",
"machine"
] | b57cbf8ed7c089672426ede0441e0a4f789ef4a1 | https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/client/connection.py#L677-L719 |
239,759 | RedHatInsights/insights-core | insights/client/connection.py | InsightsConnection._legacy_upload_archive | def _legacy_upload_archive(self, data_collected, duration):
'''
Do an HTTPS upload of the archive
'''
file_name = os.path.basename(data_collected)
try:
from insights.contrib import magic
m = magic.open(magic.MAGIC_MIME)
m.load()
mime_type = m.file(data_collected)
except ImportError:
magic = None
logger.debug('python-magic not installed, using backup function...')
from .utilities import magic_plan_b
mime_type = magic_plan_b(data_collected)
files = {
'file': (file_name, open(data_collected, 'rb'), mime_type)}
if self.config.analyze_container:
logger.debug('Uploading container, image, mountpoint or tarfile.')
upload_url = self.upload_url
else:
logger.debug('Uploading a host.')
upload_url = self.upload_url + '/' + generate_machine_id()
logger.debug("Uploading %s to %s", data_collected, upload_url)
headers = {'x-rh-collection-time': str(duration)}
net_logger.info("POST %s", upload_url)
upload = self.session.post(upload_url, files=files, headers=headers)
logger.debug("Upload status: %s %s %s",
upload.status_code, upload.reason, upload.text)
if upload.status_code in (200, 201):
the_json = json.loads(upload.text)
else:
logger.error("Upload archive failed with status code %s", upload.status_code)
return upload
try:
self.config.account_number = the_json["upload"]["account_number"]
except:
self.config.account_number = None
logger.debug("Upload duration: %s", upload.elapsed)
return upload | python | def _legacy_upload_archive(self, data_collected, duration):
'''
Do an HTTPS upload of the archive
'''
file_name = os.path.basename(data_collected)
try:
from insights.contrib import magic
m = magic.open(magic.MAGIC_MIME)
m.load()
mime_type = m.file(data_collected)
except ImportError:
magic = None
logger.debug('python-magic not installed, using backup function...')
from .utilities import magic_plan_b
mime_type = magic_plan_b(data_collected)
files = {
'file': (file_name, open(data_collected, 'rb'), mime_type)}
if self.config.analyze_container:
logger.debug('Uploading container, image, mountpoint or tarfile.')
upload_url = self.upload_url
else:
logger.debug('Uploading a host.')
upload_url = self.upload_url + '/' + generate_machine_id()
logger.debug("Uploading %s to %s", data_collected, upload_url)
headers = {'x-rh-collection-time': str(duration)}
net_logger.info("POST %s", upload_url)
upload = self.session.post(upload_url, files=files, headers=headers)
logger.debug("Upload status: %s %s %s",
upload.status_code, upload.reason, upload.text)
if upload.status_code in (200, 201):
the_json = json.loads(upload.text)
else:
logger.error("Upload archive failed with status code %s", upload.status_code)
return upload
try:
self.config.account_number = the_json["upload"]["account_number"]
except:
self.config.account_number = None
logger.debug("Upload duration: %s", upload.elapsed)
return upload | [
"def",
"_legacy_upload_archive",
"(",
"self",
",",
"data_collected",
",",
"duration",
")",
":",
"file_name",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"data_collected",
")",
"try",
":",
"from",
"insights",
".",
"contrib",
"import",
"magic",
"m",
"=",
"... | Do an HTTPS upload of the archive | [
"Do",
"an",
"HTTPS",
"upload",
"of",
"the",
"archive"
] | b57cbf8ed7c089672426ede0441e0a4f789ef4a1 | https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/client/connection.py#L722-L766 |
239,760 | RedHatInsights/insights-core | insights/client/connection.py | InsightsConnection.upload_archive | def upload_archive(self, data_collected, content_type, duration):
"""
Do an HTTPS Upload of the archive
"""
if self.config.legacy_upload:
return self._legacy_upload_archive(data_collected, duration)
file_name = os.path.basename(data_collected)
upload_url = self.upload_url
c_facts = {}
try:
c_facts = get_canonical_facts()
except Exception as e:
logger.debug('Error getting canonical facts: %s', e)
if self.config.display_name:
# add display_name to canonical facts
c_facts['display_name'] = self.config.display_name
c_facts = json.dumps(c_facts)
logger.debug('Canonical facts collected:\n%s', c_facts)
files = {
'file': (file_name, open(data_collected, 'rb'), content_type),
'metadata': c_facts
}
logger.debug("Uploading %s to %s", data_collected, upload_url)
net_logger.info("POST %s", upload_url)
upload = self.session.post(upload_url, files=files, headers={})
logger.debug("Upload status: %s %s %s",
upload.status_code, upload.reason, upload.text)
logger.debug('Request ID: %s', upload.headers.get('x-rh-insights-request-id', None))
if upload.status_code == 202:
# 202 from platform, no json response
logger.debug(upload.text)
# upload = registration on platform
write_registered_file()
else:
logger.error(
"Upload archive failed with status code %s",
upload.status_code)
return upload
logger.debug("Upload duration: %s", upload.elapsed)
return upload | python | def upload_archive(self, data_collected, content_type, duration):
if self.config.legacy_upload:
return self._legacy_upload_archive(data_collected, duration)
file_name = os.path.basename(data_collected)
upload_url = self.upload_url
c_facts = {}
try:
c_facts = get_canonical_facts()
except Exception as e:
logger.debug('Error getting canonical facts: %s', e)
if self.config.display_name:
# add display_name to canonical facts
c_facts['display_name'] = self.config.display_name
c_facts = json.dumps(c_facts)
logger.debug('Canonical facts collected:\n%s', c_facts)
files = {
'file': (file_name, open(data_collected, 'rb'), content_type),
'metadata': c_facts
}
logger.debug("Uploading %s to %s", data_collected, upload_url)
net_logger.info("POST %s", upload_url)
upload = self.session.post(upload_url, files=files, headers={})
logger.debug("Upload status: %s %s %s",
upload.status_code, upload.reason, upload.text)
logger.debug('Request ID: %s', upload.headers.get('x-rh-insights-request-id', None))
if upload.status_code == 202:
# 202 from platform, no json response
logger.debug(upload.text)
# upload = registration on platform
write_registered_file()
else:
logger.error(
"Upload archive failed with status code %s",
upload.status_code)
return upload
logger.debug("Upload duration: %s", upload.elapsed)
return upload | [
"def",
"upload_archive",
"(",
"self",
",",
"data_collected",
",",
"content_type",
",",
"duration",
")",
":",
"if",
"self",
".",
"config",
".",
"legacy_upload",
":",
"return",
"self",
".",
"_legacy_upload_archive",
"(",
"data_collected",
",",
"duration",
")",
"... | Do an HTTPS Upload of the archive | [
"Do",
"an",
"HTTPS",
"Upload",
"of",
"the",
"archive"
] | b57cbf8ed7c089672426ede0441e0a4f789ef4a1 | https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/client/connection.py#L768-L811 |
239,761 | RedHatInsights/insights-core | insights/client/connection.py | InsightsConnection.set_display_name | def set_display_name(self, display_name):
'''
Set display name of a system independently of upload.
'''
if self.config.legacy_upload:
return self._legacy_set_display_name(display_name)
system = self._fetch_system_by_machine_id()
if not system:
return system
inventory_id = system[0]['id']
req_url = self.base_url + '/inventory/v1/hosts/' + inventory_id
try:
net_logger.info("PATCH %s", req_url)
res = self.session.patch(req_url, json={'display_name': display_name})
except (requests.ConnectionError, requests.Timeout) as e:
logger.error(e)
logger.error('The Insights API could not be reached.')
return False
if (self.handle_fail_rcs(res)):
logger.error('Could not update display name.')
return False
logger.info('Display name updated to ' + display_name + '.')
return True | python | def set_display_name(self, display_name):
'''
Set display name of a system independently of upload.
'''
if self.config.legacy_upload:
return self._legacy_set_display_name(display_name)
system = self._fetch_system_by_machine_id()
if not system:
return system
inventory_id = system[0]['id']
req_url = self.base_url + '/inventory/v1/hosts/' + inventory_id
try:
net_logger.info("PATCH %s", req_url)
res = self.session.patch(req_url, json={'display_name': display_name})
except (requests.ConnectionError, requests.Timeout) as e:
logger.error(e)
logger.error('The Insights API could not be reached.')
return False
if (self.handle_fail_rcs(res)):
logger.error('Could not update display name.')
return False
logger.info('Display name updated to ' + display_name + '.')
return True | [
"def",
"set_display_name",
"(",
"self",
",",
"display_name",
")",
":",
"if",
"self",
".",
"config",
".",
"legacy_upload",
":",
"return",
"self",
".",
"_legacy_set_display_name",
"(",
"display_name",
")",
"system",
"=",
"self",
".",
"_fetch_system_by_machine_id",
... | Set display name of a system independently of upload. | [
"Set",
"display",
"name",
"of",
"a",
"system",
"independently",
"of",
"upload",
"."
] | b57cbf8ed7c089672426ede0441e0a4f789ef4a1 | https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/client/connection.py#L851-L874 |
239,762 | RedHatInsights/insights-core | insights/client/config.py | InsightsConfig._update_dict | def _update_dict(self, dict_):
'''
Update without allowing undefined options or overwrite of class methods
'''
dict_ = dict((k, v) for k, v in dict_.items() if (
k not in self._init_attrs))
# zzz
if 'no_gpg' in dict_ and dict_['no_gpg']:
dict_['gpg'] = False
unknown_opts = set(dict_.keys()).difference(set(DEFAULT_OPTS.keys()))
if unknown_opts and self._print_errors:
# only print error once
sys.stdout.write(
'WARNING: Unknown options: ' +
', '.join(list(unknown_opts)) + '\n')
if 'no_schedule' in unknown_opts:
sys.stdout.write('WARNING: Config option `no_schedule` has '
'been deprecated. To disable automatic '
'scheduling for Red Hat Insights, run '
'`insights-client --disable-schedule`\n')
for u in unknown_opts:
dict_.pop(u, None)
self.__dict__.update(dict_) | python | def _update_dict(self, dict_):
'''
Update without allowing undefined options or overwrite of class methods
'''
dict_ = dict((k, v) for k, v in dict_.items() if (
k not in self._init_attrs))
# zzz
if 'no_gpg' in dict_ and dict_['no_gpg']:
dict_['gpg'] = False
unknown_opts = set(dict_.keys()).difference(set(DEFAULT_OPTS.keys()))
if unknown_opts and self._print_errors:
# only print error once
sys.stdout.write(
'WARNING: Unknown options: ' +
', '.join(list(unknown_opts)) + '\n')
if 'no_schedule' in unknown_opts:
sys.stdout.write('WARNING: Config option `no_schedule` has '
'been deprecated. To disable automatic '
'scheduling for Red Hat Insights, run '
'`insights-client --disable-schedule`\n')
for u in unknown_opts:
dict_.pop(u, None)
self.__dict__.update(dict_) | [
"def",
"_update_dict",
"(",
"self",
",",
"dict_",
")",
":",
"dict_",
"=",
"dict",
"(",
"(",
"k",
",",
"v",
")",
"for",
"k",
",",
"v",
"in",
"dict_",
".",
"items",
"(",
")",
"if",
"(",
"k",
"not",
"in",
"self",
".",
"_init_attrs",
")",
")",
"#... | Update without allowing undefined options or overwrite of class methods | [
"Update",
"without",
"allowing",
"undefined",
"options",
"or",
"overwrite",
"of",
"class",
"methods"
] | b57cbf8ed7c089672426ede0441e0a4f789ef4a1 | https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/client/config.py#L393-L417 |
239,763 | RedHatInsights/insights-core | insights/client/config.py | InsightsConfig._load_config_file | def _load_config_file(self, fname=None):
'''
Load config from config file. If fname is not specified,
config is loaded from the file named by InsightsConfig.conf
'''
parsedconfig = ConfigParser.RawConfigParser()
try:
parsedconfig.read(fname or self.conf)
except ConfigParser.Error:
if self._print_errors:
sys.stdout.write(
'ERROR: Could not read configuration file, '
'using defaults\n')
return
try:
if parsedconfig.has_section(constants.app_name):
d = dict(parsedconfig.items(constants.app_name))
elif parsedconfig.has_section('redhat-access-insights'):
d = dict(parsedconfig.items('redhat-access-insights'))
else:
raise ConfigParser.Error
except ConfigParser.Error:
if self._print_errors:
sys.stdout.write(
'ERROR: Could not read configuration file, '
'using defaults\n')
return
for key in d:
try:
if key == 'retries' or key == 'cmd_timeout':
d[key] = parsedconfig.getint(constants.app_name, key)
if key == 'http_timeout':
d[key] = parsedconfig.getfloat(constants.app_name, key)
if key in DEFAULT_BOOLS and isinstance(
d[key], six.string_types):
d[key] = parsedconfig.getboolean(constants.app_name, key)
except ValueError as e:
if self._print_errors:
sys.stdout.write(
'ERROR: {0}.\nCould not read configuration file, '
'using defaults\n'.format(e))
return
self._update_dict(d) | python | def _load_config_file(self, fname=None):
'''
Load config from config file. If fname is not specified,
config is loaded from the file named by InsightsConfig.conf
'''
parsedconfig = ConfigParser.RawConfigParser()
try:
parsedconfig.read(fname or self.conf)
except ConfigParser.Error:
if self._print_errors:
sys.stdout.write(
'ERROR: Could not read configuration file, '
'using defaults\n')
return
try:
if parsedconfig.has_section(constants.app_name):
d = dict(parsedconfig.items(constants.app_name))
elif parsedconfig.has_section('redhat-access-insights'):
d = dict(parsedconfig.items('redhat-access-insights'))
else:
raise ConfigParser.Error
except ConfigParser.Error:
if self._print_errors:
sys.stdout.write(
'ERROR: Could not read configuration file, '
'using defaults\n')
return
for key in d:
try:
if key == 'retries' or key == 'cmd_timeout':
d[key] = parsedconfig.getint(constants.app_name, key)
if key == 'http_timeout':
d[key] = parsedconfig.getfloat(constants.app_name, key)
if key in DEFAULT_BOOLS and isinstance(
d[key], six.string_types):
d[key] = parsedconfig.getboolean(constants.app_name, key)
except ValueError as e:
if self._print_errors:
sys.stdout.write(
'ERROR: {0}.\nCould not read configuration file, '
'using defaults\n'.format(e))
return
self._update_dict(d) | [
"def",
"_load_config_file",
"(",
"self",
",",
"fname",
"=",
"None",
")",
":",
"parsedconfig",
"=",
"ConfigParser",
".",
"RawConfigParser",
"(",
")",
"try",
":",
"parsedconfig",
".",
"read",
"(",
"fname",
"or",
"self",
".",
"conf",
")",
"except",
"ConfigPar... | Load config from config file. If fname is not specified,
config is loaded from the file named by InsightsConfig.conf | [
"Load",
"config",
"from",
"config",
"file",
".",
"If",
"fname",
"is",
"not",
"specified",
"config",
"is",
"loaded",
"from",
"the",
"file",
"named",
"by",
"InsightsConfig",
".",
"conf"
] | b57cbf8ed7c089672426ede0441e0a4f789ef4a1 | https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/client/config.py#L493-L535 |
239,764 | RedHatInsights/insights-core | insights/client/config.py | InsightsConfig.load_all | def load_all(self):
'''
Helper function for actual Insights client use
'''
# check for custom conf file before loading conf
self._load_command_line(conf_only=True)
self._load_config_file()
self._load_env()
self._load_command_line()
self._imply_options()
self._validate_options()
return self | python | def load_all(self):
'''
Helper function for actual Insights client use
'''
# check for custom conf file before loading conf
self._load_command_line(conf_only=True)
self._load_config_file()
self._load_env()
self._load_command_line()
self._imply_options()
self._validate_options()
return self | [
"def",
"load_all",
"(",
"self",
")",
":",
"# check for custom conf file before loading conf",
"self",
".",
"_load_command_line",
"(",
"conf_only",
"=",
"True",
")",
"self",
".",
"_load_config_file",
"(",
")",
"self",
".",
"_load_env",
"(",
")",
"self",
".",
"_lo... | Helper function for actual Insights client use | [
"Helper",
"function",
"for",
"actual",
"Insights",
"client",
"use"
] | b57cbf8ed7c089672426ede0441e0a4f789ef4a1 | https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/client/config.py#L537-L548 |
239,765 | RedHatInsights/insights-core | insights/client/config.py | InsightsConfig._validate_options | def _validate_options(self):
'''
Make sure there are no conflicting or invalid options
'''
if self.obfuscate_hostname and not self.obfuscate:
raise ValueError(
'Option `obfuscate_hostname` requires `obfuscate`')
if self.analyze_image_id is not None and len(self.analyze_image_id) < 12:
raise ValueError(
'Image/Container ID must be at least twelve characters long.')
if self.enable_schedule and self.disable_schedule:
raise ValueError(
'Conflicting options: --enable-schedule and --disable-schedule')
if self.analyze_container and (self.register or self.unregister):
raise ValueError('Registration not supported with '
'image or container analysis.')
if self.to_json and self.to_stdout:
raise ValueError(
'Conflicting options: --to-stdout and --to-json')
if self.payload and not self.content_type:
raise ValueError(
'--payload requires --content-type')
if not self.legacy_upload:
if self.group:
raise ValueError(
'--group is not supported at this time.')
if self.analyze_image_id:
raise ValueError(
'--analyze-image-id is not supported at this time.')
if self.analyze_file:
raise ValueError(
'--analyze-file is not supported at this time.')
if self.analyze_mountpoint:
raise ValueError(
'--analyze-mountpoint is not supported at this time.')
if self.analyze_container:
raise ValueError(
'--analyze-container is not supported at this time.') | python | def _validate_options(self):
'''
Make sure there are no conflicting or invalid options
'''
if self.obfuscate_hostname and not self.obfuscate:
raise ValueError(
'Option `obfuscate_hostname` requires `obfuscate`')
if self.analyze_image_id is not None and len(self.analyze_image_id) < 12:
raise ValueError(
'Image/Container ID must be at least twelve characters long.')
if self.enable_schedule and self.disable_schedule:
raise ValueError(
'Conflicting options: --enable-schedule and --disable-schedule')
if self.analyze_container and (self.register or self.unregister):
raise ValueError('Registration not supported with '
'image or container analysis.')
if self.to_json and self.to_stdout:
raise ValueError(
'Conflicting options: --to-stdout and --to-json')
if self.payload and not self.content_type:
raise ValueError(
'--payload requires --content-type')
if not self.legacy_upload:
if self.group:
raise ValueError(
'--group is not supported at this time.')
if self.analyze_image_id:
raise ValueError(
'--analyze-image-id is not supported at this time.')
if self.analyze_file:
raise ValueError(
'--analyze-file is not supported at this time.')
if self.analyze_mountpoint:
raise ValueError(
'--analyze-mountpoint is not supported at this time.')
if self.analyze_container:
raise ValueError(
'--analyze-container is not supported at this time.') | [
"def",
"_validate_options",
"(",
"self",
")",
":",
"if",
"self",
".",
"obfuscate_hostname",
"and",
"not",
"self",
".",
"obfuscate",
":",
"raise",
"ValueError",
"(",
"'Option `obfuscate_hostname` requires `obfuscate`'",
")",
"if",
"self",
".",
"analyze_image_id",
"is... | Make sure there are no conflicting or invalid options | [
"Make",
"sure",
"there",
"are",
"no",
"conflicting",
"or",
"invalid",
"options"
] | b57cbf8ed7c089672426ede0441e0a4f789ef4a1 | https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/client/config.py#L550-L587 |
239,766 | RedHatInsights/insights-core | insights/client/config.py | InsightsConfig._imply_options | def _imply_options(self):
'''
Some options enable others automatically
'''
self.no_upload = self.no_upload or self.to_stdout or self.offline
self.auto_update = self.auto_update and not self.offline
if (self.analyze_container or
self.analyze_file or
self.analyze_mountpoint or
self.analyze_image_id):
self.analyze_container = True
self.to_json = self.to_json or self.analyze_container
self.register = (self.register or self.reregister) and not self.offline
self.keep_archive = self.keep_archive or self.no_upload
if self.payload:
self.legacy_upload = False | python | def _imply_options(self):
'''
Some options enable others automatically
'''
self.no_upload = self.no_upload or self.to_stdout or self.offline
self.auto_update = self.auto_update and not self.offline
if (self.analyze_container or
self.analyze_file or
self.analyze_mountpoint or
self.analyze_image_id):
self.analyze_container = True
self.to_json = self.to_json or self.analyze_container
self.register = (self.register or self.reregister) and not self.offline
self.keep_archive = self.keep_archive or self.no_upload
if self.payload:
self.legacy_upload = False | [
"def",
"_imply_options",
"(",
"self",
")",
":",
"self",
".",
"no_upload",
"=",
"self",
".",
"no_upload",
"or",
"self",
".",
"to_stdout",
"or",
"self",
".",
"offline",
"self",
".",
"auto_update",
"=",
"self",
".",
"auto_update",
"and",
"not",
"self",
".",... | Some options enable others automatically | [
"Some",
"options",
"enable",
"others",
"automatically"
] | b57cbf8ed7c089672426ede0441e0a4f789ef4a1 | https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/client/config.py#L589-L604 |
239,767 | RedHatInsights/insights-core | insights/parsers/httpd_conf.py | dict_deep_merge | def dict_deep_merge(tgt, src):
"""
Utility function to merge the source dictionary `src` to the target
dictionary recursively
Note:
The type of the values in the dictionary can only be `dict` or `list`
Parameters:
tgt (dict): The target dictionary
src (dict): The source dictionary
"""
for k, v in src.items():
if k in tgt:
if isinstance(tgt[k], dict) and isinstance(v, dict):
dict_deep_merge(tgt[k], v)
else:
tgt[k].extend(deepcopy(v))
else:
tgt[k] = deepcopy(v) | python | def dict_deep_merge(tgt, src):
for k, v in src.items():
if k in tgt:
if isinstance(tgt[k], dict) and isinstance(v, dict):
dict_deep_merge(tgt[k], v)
else:
tgt[k].extend(deepcopy(v))
else:
tgt[k] = deepcopy(v) | [
"def",
"dict_deep_merge",
"(",
"tgt",
",",
"src",
")",
":",
"for",
"k",
",",
"v",
"in",
"src",
".",
"items",
"(",
")",
":",
"if",
"k",
"in",
"tgt",
":",
"if",
"isinstance",
"(",
"tgt",
"[",
"k",
"]",
",",
"dict",
")",
"and",
"isinstance",
"(",
... | Utility function to merge the source dictionary `src` to the target
dictionary recursively
Note:
The type of the values in the dictionary can only be `dict` or `list`
Parameters:
tgt (dict): The target dictionary
src (dict): The source dictionary | [
"Utility",
"function",
"to",
"merge",
"the",
"source",
"dictionary",
"src",
"to",
"the",
"target",
"dictionary",
"recursively"
] | b57cbf8ed7c089672426ede0441e0a4f789ef4a1 | https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/parsers/httpd_conf.py#L195-L214 |
239,768 | RedHatInsights/insights-core | insights/client/mount.py | Mount._activate_thin_device | def _activate_thin_device(name, dm_id, size, pool):
"""
Provisions an LVM device-mapper thin device reflecting,
DM device id 'dm_id' in the docker pool.
"""
table = '0 %d thin /dev/mapper/%s %s' % (int(size) // 512, pool, dm_id)
cmd = ['dmsetup', 'create', name, '--table', table]
r = util.subp(cmd)
if r.return_code != 0:
raise MountError('Failed to create thin device: %s' %
r.stderr.decode(sys.getdefaultencoding())) | python | def _activate_thin_device(name, dm_id, size, pool):
table = '0 %d thin /dev/mapper/%s %s' % (int(size) // 512, pool, dm_id)
cmd = ['dmsetup', 'create', name, '--table', table]
r = util.subp(cmd)
if r.return_code != 0:
raise MountError('Failed to create thin device: %s' %
r.stderr.decode(sys.getdefaultencoding())) | [
"def",
"_activate_thin_device",
"(",
"name",
",",
"dm_id",
",",
"size",
",",
"pool",
")",
":",
"table",
"=",
"'0 %d thin /dev/mapper/%s %s'",
"%",
"(",
"int",
"(",
"size",
")",
"//",
"512",
",",
"pool",
",",
"dm_id",
")",
"cmd",
"=",
"[",
"'dmsetup'",
... | Provisions an LVM device-mapper thin device reflecting,
DM device id 'dm_id' in the docker pool. | [
"Provisions",
"an",
"LVM",
"device",
"-",
"mapper",
"thin",
"device",
"reflecting",
"DM",
"device",
"id",
"dm_id",
"in",
"the",
"docker",
"pool",
"."
] | b57cbf8ed7c089672426ede0441e0a4f789ef4a1 | https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/client/mount.py#L74-L84 |
239,769 | RedHatInsights/insights-core | insights/client/mount.py | Mount.remove_thin_device | def remove_thin_device(name, force=False):
"""
Destroys a thin device via subprocess call.
"""
cmd = ['dmsetup', 'remove', '--retry', name]
r = util.subp(cmd)
if not force:
if r.return_code != 0:
raise MountError('Could not remove thin device:\n%s' %
r.stderr.decode(sys.getdefaultencoding()).split("\n")[0]) | python | def remove_thin_device(name, force=False):
cmd = ['dmsetup', 'remove', '--retry', name]
r = util.subp(cmd)
if not force:
if r.return_code != 0:
raise MountError('Could not remove thin device:\n%s' %
r.stderr.decode(sys.getdefaultencoding()).split("\n")[0]) | [
"def",
"remove_thin_device",
"(",
"name",
",",
"force",
"=",
"False",
")",
":",
"cmd",
"=",
"[",
"'dmsetup'",
",",
"'remove'",
",",
"'--retry'",
",",
"name",
"]",
"r",
"=",
"util",
".",
"subp",
"(",
"cmd",
")",
"if",
"not",
"force",
":",
"if",
"r",... | Destroys a thin device via subprocess call. | [
"Destroys",
"a",
"thin",
"device",
"via",
"subprocess",
"call",
"."
] | b57cbf8ed7c089672426ede0441e0a4f789ef4a1 | https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/client/mount.py#L87-L96 |
239,770 | RedHatInsights/insights-core | insights/client/mount.py | Mount._is_device_active | def _is_device_active(device):
"""
Checks dmsetup to see if a device is already active
"""
cmd = ['dmsetup', 'info', device]
dmsetup_info = util.subp(cmd)
for dm_line in dmsetup_info.stdout.split("\n"):
line = dm_line.split(':')
if ('State' in line[0].strip()) and ('ACTIVE' in line[1].strip()):
return True
return False | python | def _is_device_active(device):
cmd = ['dmsetup', 'info', device]
dmsetup_info = util.subp(cmd)
for dm_line in dmsetup_info.stdout.split("\n"):
line = dm_line.split(':')
if ('State' in line[0].strip()) and ('ACTIVE' in line[1].strip()):
return True
return False | [
"def",
"_is_device_active",
"(",
"device",
")",
":",
"cmd",
"=",
"[",
"'dmsetup'",
",",
"'info'",
",",
"device",
"]",
"dmsetup_info",
"=",
"util",
".",
"subp",
"(",
"cmd",
")",
"for",
"dm_line",
"in",
"dmsetup_info",
".",
"stdout",
".",
"split",
"(",
"... | Checks dmsetup to see if a device is already active | [
"Checks",
"dmsetup",
"to",
"see",
"if",
"a",
"device",
"is",
"already",
"active"
] | b57cbf8ed7c089672426ede0441e0a4f789ef4a1 | https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/client/mount.py#L99-L109 |
239,771 | RedHatInsights/insights-core | insights/client/mount.py | Mount.mount_path | def mount_path(source, target, bind=False):
"""
Subprocess call to mount dev at path.
"""
cmd = ['mount']
if bind:
cmd.append('--bind')
cmd.append(source)
cmd.append(target)
r = util.subp(cmd)
if r.return_code != 0:
raise MountError('Could not mount docker container:\n' +
' '.join(cmd) + '\n%s' %
r.stderr.decode(sys.getdefaultencoding())) | python | def mount_path(source, target, bind=False):
cmd = ['mount']
if bind:
cmd.append('--bind')
cmd.append(source)
cmd.append(target)
r = util.subp(cmd)
if r.return_code != 0:
raise MountError('Could not mount docker container:\n' +
' '.join(cmd) + '\n%s' %
r.stderr.decode(sys.getdefaultencoding())) | [
"def",
"mount_path",
"(",
"source",
",",
"target",
",",
"bind",
"=",
"False",
")",
":",
"cmd",
"=",
"[",
"'mount'",
"]",
"if",
"bind",
":",
"cmd",
".",
"append",
"(",
"'--bind'",
")",
"cmd",
".",
"append",
"(",
"source",
")",
"cmd",
".",
"append",
... | Subprocess call to mount dev at path. | [
"Subprocess",
"call",
"to",
"mount",
"dev",
"at",
"path",
"."
] | b57cbf8ed7c089672426ede0441e0a4f789ef4a1 | https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/client/mount.py#L121-L134 |
239,772 | RedHatInsights/insights-core | insights/client/mount.py | Mount.get_dev_at_mountpoint | def get_dev_at_mountpoint(mntpoint):
"""
Retrieves the device mounted at mntpoint, or raises
MountError if none.
"""
results = util.subp(['findmnt', '-o', 'SOURCE', mntpoint])
if results.return_code != 0:
raise MountError('No device mounted at %s' % mntpoint)
stdout = results.stdout.decode(sys.getdefaultencoding())
return stdout.replace('SOURCE\n', '').strip().split('\n')[-1] | python | def get_dev_at_mountpoint(mntpoint):
results = util.subp(['findmnt', '-o', 'SOURCE', mntpoint])
if results.return_code != 0:
raise MountError('No device mounted at %s' % mntpoint)
stdout = results.stdout.decode(sys.getdefaultencoding())
return stdout.replace('SOURCE\n', '').strip().split('\n')[-1] | [
"def",
"get_dev_at_mountpoint",
"(",
"mntpoint",
")",
":",
"results",
"=",
"util",
".",
"subp",
"(",
"[",
"'findmnt'",
",",
"'-o'",
",",
"'SOURCE'",
",",
"mntpoint",
"]",
")",
"if",
"results",
".",
"return_code",
"!=",
"0",
":",
"raise",
"MountError",
"(... | Retrieves the device mounted at mntpoint, or raises
MountError if none. | [
"Retrieves",
"the",
"device",
"mounted",
"at",
"mntpoint",
"or",
"raises",
"MountError",
"if",
"none",
"."
] | b57cbf8ed7c089672426ede0441e0a4f789ef4a1 | https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/client/mount.py#L137-L147 |
239,773 | RedHatInsights/insights-core | insights/client/mount.py | Mount.unmount_path | def unmount_path(path, force=False):
"""
Unmounts the directory specified by path.
"""
r = util.subp(['umount', path])
if not force:
if r.return_code != 0:
raise ValueError(r.stderr) | python | def unmount_path(path, force=False):
r = util.subp(['umount', path])
if not force:
if r.return_code != 0:
raise ValueError(r.stderr) | [
"def",
"unmount_path",
"(",
"path",
",",
"force",
"=",
"False",
")",
":",
"r",
"=",
"util",
".",
"subp",
"(",
"[",
"'umount'",
",",
"path",
"]",
")",
"if",
"not",
"force",
":",
"if",
"r",
".",
"return_code",
"!=",
"0",
":",
"raise",
"ValueError",
... | Unmounts the directory specified by path. | [
"Unmounts",
"the",
"directory",
"specified",
"by",
"path",
"."
] | b57cbf8ed7c089672426ede0441e0a4f789ef4a1 | https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/client/mount.py#L150-L157 |
239,774 | RedHatInsights/insights-core | insights/client/mount.py | DockerMount._create_temp_container | def _create_temp_container(self, iid):
"""
Create a temporary container from a given iid.
Temporary containers are marked with a sentinel environment
variable so that they can be cleaned on unmount.
"""
try:
return self.client.create_container(
image=iid, command='/bin/true',
environment=['_ATOMIC_TEMP_CONTAINER'],
detach=True, network_disabled=True)['Id']
except docker.errors.APIError as ex:
raise MountError('Error creating temporary container:\n%s' % str(ex)) | python | def _create_temp_container(self, iid):
try:
return self.client.create_container(
image=iid, command='/bin/true',
environment=['_ATOMIC_TEMP_CONTAINER'],
detach=True, network_disabled=True)['Id']
except docker.errors.APIError as ex:
raise MountError('Error creating temporary container:\n%s' % str(ex)) | [
"def",
"_create_temp_container",
"(",
"self",
",",
"iid",
")",
":",
"try",
":",
"return",
"self",
".",
"client",
".",
"create_container",
"(",
"image",
"=",
"iid",
",",
"command",
"=",
"'/bin/true'",
",",
"environment",
"=",
"[",
"'_ATOMIC_TEMP_CONTAINER'",
... | Create a temporary container from a given iid.
Temporary containers are marked with a sentinel environment
variable so that they can be cleaned on unmount. | [
"Create",
"a",
"temporary",
"container",
"from",
"a",
"given",
"iid",
"."
] | b57cbf8ed7c089672426ede0441e0a4f789ef4a1 | https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/client/mount.py#L176-L189 |
239,775 | RedHatInsights/insights-core | insights/client/mount.py | DockerMount._clone | def _clone(self, cid):
"""
Create a temporary image snapshot from a given cid.
Temporary image snapshots are marked with a sentinel label
so that they can be cleaned on unmount.
"""
try:
iid = self.client.commit(
container=cid,
conf={
'Labels': {
'io.projectatomic.Temporary': 'true'
}
}
)['Id']
except docker.errors.APIError as ex:
raise MountError(str(ex))
self.tmp_image = iid
return self._create_temp_container(iid) | python | def _clone(self, cid):
try:
iid = self.client.commit(
container=cid,
conf={
'Labels': {
'io.projectatomic.Temporary': 'true'
}
}
)['Id']
except docker.errors.APIError as ex:
raise MountError(str(ex))
self.tmp_image = iid
return self._create_temp_container(iid) | [
"def",
"_clone",
"(",
"self",
",",
"cid",
")",
":",
"try",
":",
"iid",
"=",
"self",
".",
"client",
".",
"commit",
"(",
"container",
"=",
"cid",
",",
"conf",
"=",
"{",
"'Labels'",
":",
"{",
"'io.projectatomic.Temporary'",
":",
"'true'",
"}",
"}",
")",... | Create a temporary image snapshot from a given cid.
Temporary image snapshots are marked with a sentinel label
so that they can be cleaned on unmount. | [
"Create",
"a",
"temporary",
"image",
"snapshot",
"from",
"a",
"given",
"cid",
"."
] | b57cbf8ed7c089672426ede0441e0a4f789ef4a1 | https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/client/mount.py#L191-L210 |
239,776 | RedHatInsights/insights-core | insights/client/mount.py | DockerMount._identifier_as_cid | def _identifier_as_cid(self, identifier):
"""
Returns a container uuid for identifier.
If identifier is an image UUID or image tag, create a temporary
container and return its uuid.
"""
def __cname_matches(container, identifier):
return any([n for n in (container['Names'] or [])
if matches(n, '/' + identifier)])
# Determine if identifier is a container
containers = [c['Id'] for c in self.client.containers(all=True)
if (__cname_matches(c, identifier) or
matches(c['Id'], identifier + '*'))]
if len(containers) > 1:
raise SelectionMatchError(identifier, containers)
elif len(containers) == 1:
c = containers[0]
return self._clone(c)
# Determine if identifier is an image UUID
images = [i for i in set(self.client.images(all=True, quiet=True))
if i.startswith(identifier)]
if len(images) > 1:
raise SelectionMatchError(identifier, images)
elif len(images) == 1:
return self._create_temp_container(images[0])
# Match image tag.
images = util.image_by_name(identifier)
if len(images) > 1:
tags = [t for i in images for t in i['RepoTags']]
raise SelectionMatchError(identifier, tags)
elif len(images) == 1:
return self._create_temp_container(images[0]['Id'].replace("sha256:", ""))
raise MountError('{} did not match any image or container.'
''.format(identifier)) | python | def _identifier_as_cid(self, identifier):
def __cname_matches(container, identifier):
return any([n for n in (container['Names'] or [])
if matches(n, '/' + identifier)])
# Determine if identifier is a container
containers = [c['Id'] for c in self.client.containers(all=True)
if (__cname_matches(c, identifier) or
matches(c['Id'], identifier + '*'))]
if len(containers) > 1:
raise SelectionMatchError(identifier, containers)
elif len(containers) == 1:
c = containers[0]
return self._clone(c)
# Determine if identifier is an image UUID
images = [i for i in set(self.client.images(all=True, quiet=True))
if i.startswith(identifier)]
if len(images) > 1:
raise SelectionMatchError(identifier, images)
elif len(images) == 1:
return self._create_temp_container(images[0])
# Match image tag.
images = util.image_by_name(identifier)
if len(images) > 1:
tags = [t for i in images for t in i['RepoTags']]
raise SelectionMatchError(identifier, tags)
elif len(images) == 1:
return self._create_temp_container(images[0]['Id'].replace("sha256:", ""))
raise MountError('{} did not match any image or container.'
''.format(identifier)) | [
"def",
"_identifier_as_cid",
"(",
"self",
",",
"identifier",
")",
":",
"def",
"__cname_matches",
"(",
"container",
",",
"identifier",
")",
":",
"return",
"any",
"(",
"[",
"n",
"for",
"n",
"in",
"(",
"container",
"[",
"'Names'",
"]",
"or",
"[",
"]",
")"... | Returns a container uuid for identifier.
If identifier is an image UUID or image tag, create a temporary
container and return its uuid. | [
"Returns",
"a",
"container",
"uuid",
"for",
"identifier",
"."
] | b57cbf8ed7c089672426ede0441e0a4f789ef4a1 | https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/client/mount.py#L216-L256 |
239,777 | RedHatInsights/insights-core | insights/client/mount.py | DockerMount.mount | def mount(self, identifier):
"""
Mounts a container or image referred to by identifier to
the host filesystem.
"""
driver = self.client.info()['Driver']
driver_mount_fn = getattr(self, "_mount_" + driver,
self._unsupported_backend)
cid = driver_mount_fn(identifier)
# Return mount path so it can be later unmounted by path
return self.mountpoint, cid | python | def mount(self, identifier):
driver = self.client.info()['Driver']
driver_mount_fn = getattr(self, "_mount_" + driver,
self._unsupported_backend)
cid = driver_mount_fn(identifier)
# Return mount path so it can be later unmounted by path
return self.mountpoint, cid | [
"def",
"mount",
"(",
"self",
",",
"identifier",
")",
":",
"driver",
"=",
"self",
".",
"client",
".",
"info",
"(",
")",
"[",
"'Driver'",
"]",
"driver_mount_fn",
"=",
"getattr",
"(",
"self",
",",
"\"_mount_\"",
"+",
"driver",
",",
"self",
".",
"_unsuppor... | Mounts a container or image referred to by identifier to
the host filesystem. | [
"Mounts",
"a",
"container",
"or",
"image",
"referred",
"to",
"by",
"identifier",
"to",
"the",
"host",
"filesystem",
"."
] | b57cbf8ed7c089672426ede0441e0a4f789ef4a1 | https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/client/mount.py#L274-L286 |
239,778 | RedHatInsights/insights-core | insights/client/mount.py | DockerMount._mount_devicemapper | def _mount_devicemapper(self, identifier):
"""
Devicemapper mount backend.
"""
info = self.client.info()
# cid is the contaienr_id of the temp container
cid = self._identifier_as_cid(identifier)
cinfo = self.client.inspect_container(cid)
dm_dev_name, dm_dev_id, dm_dev_size = '', '', ''
dm_pool = info['DriverStatus'][0][1]
try:
dm_dev_name = cinfo['GraphDriver']['Data']['DeviceName']
dm_dev_id = cinfo['GraphDriver']['Data']['DeviceId']
dm_dev_size = cinfo['GraphDriver']['Data']['DeviceSize']
except:
# TODO: deprecated when GraphDriver patch makes it upstream
dm_dev_id, dm_dev_size = DockerMount._no_gd_api_dm(cid)
dm_dev_name = dm_pool.replace('pool', cid)
# grab list of devces
dmsetupLs = dmsetupWrap.getDmsetupLs()
if dmsetupLs == -1:
raise MountError('Error: dmsetup returned non zero error ')
# ENSURE device exists!
if dm_dev_name not in dmsetupLs:
# IF device doesn't exist yet we create it!
Mount._activate_thin_device(dm_dev_name, dm_dev_id, dm_dev_size,
dm_pool)
# check that device is shown in /dev/mapper, if not we can use the
# major minor numbers in /dev/block
mapperDir = os.path.join('/dev/mapper', dm_dev_name)
if os.path.exists(mapperDir):
dm_dev_path = mapperDir
else:
# get new dmsetupLs after device has been created!
dmsetupLs = dmsetupWrap.getDmsetupLs()
# test if device exists in dmsetupls, if so, get its majorminor found in /dev/block
majorMinor = dmsetupWrap.getMajorMinor(dm_dev_name, dmsetupLs)
blockDir = os.path.join('/dev/block', majorMinor)
# FIXME, coudl be due to Virtual box, but occasionally the block device
# will not be created by the time we check it exists below, so we
# can wait a half a second to let it be created up
import time
time.sleep(0.1)
if os.path.exists(blockDir):
dm_dev_path = blockDir
else:
raise MountError('Error: Block device found in dmsetup ls '
'but not in /dev/mapper/ or /dev/block')
options = ['ro', 'nosuid', 'nodev']
# XFS should get nouuid
fstype = Mount._get_fs(dm_dev_path).decode(sys.getdefaultencoding())
if fstype.upper() == 'XFS' and 'nouuid' not in options:
if 'nouuid' not in options:
options.append('nouuid')
try:
Mount.mount_path(dm_dev_path, self.mountpoint)
except MountError as de:
self._cleanup_container(cinfo)
Mount.remove_thin_device(dm_dev_name)
raise de
# return the temp container ID so we can unmount later
return cid | python | def _mount_devicemapper(self, identifier):
info = self.client.info()
# cid is the contaienr_id of the temp container
cid = self._identifier_as_cid(identifier)
cinfo = self.client.inspect_container(cid)
dm_dev_name, dm_dev_id, dm_dev_size = '', '', ''
dm_pool = info['DriverStatus'][0][1]
try:
dm_dev_name = cinfo['GraphDriver']['Data']['DeviceName']
dm_dev_id = cinfo['GraphDriver']['Data']['DeviceId']
dm_dev_size = cinfo['GraphDriver']['Data']['DeviceSize']
except:
# TODO: deprecated when GraphDriver patch makes it upstream
dm_dev_id, dm_dev_size = DockerMount._no_gd_api_dm(cid)
dm_dev_name = dm_pool.replace('pool', cid)
# grab list of devces
dmsetupLs = dmsetupWrap.getDmsetupLs()
if dmsetupLs == -1:
raise MountError('Error: dmsetup returned non zero error ')
# ENSURE device exists!
if dm_dev_name not in dmsetupLs:
# IF device doesn't exist yet we create it!
Mount._activate_thin_device(dm_dev_name, dm_dev_id, dm_dev_size,
dm_pool)
# check that device is shown in /dev/mapper, if not we can use the
# major minor numbers in /dev/block
mapperDir = os.path.join('/dev/mapper', dm_dev_name)
if os.path.exists(mapperDir):
dm_dev_path = mapperDir
else:
# get new dmsetupLs after device has been created!
dmsetupLs = dmsetupWrap.getDmsetupLs()
# test if device exists in dmsetupls, if so, get its majorminor found in /dev/block
majorMinor = dmsetupWrap.getMajorMinor(dm_dev_name, dmsetupLs)
blockDir = os.path.join('/dev/block', majorMinor)
# FIXME, coudl be due to Virtual box, but occasionally the block device
# will not be created by the time we check it exists below, so we
# can wait a half a second to let it be created up
import time
time.sleep(0.1)
if os.path.exists(blockDir):
dm_dev_path = blockDir
else:
raise MountError('Error: Block device found in dmsetup ls '
'but not in /dev/mapper/ or /dev/block')
options = ['ro', 'nosuid', 'nodev']
# XFS should get nouuid
fstype = Mount._get_fs(dm_dev_path).decode(sys.getdefaultencoding())
if fstype.upper() == 'XFS' and 'nouuid' not in options:
if 'nouuid' not in options:
options.append('nouuid')
try:
Mount.mount_path(dm_dev_path, self.mountpoint)
except MountError as de:
self._cleanup_container(cinfo)
Mount.remove_thin_device(dm_dev_name)
raise de
# return the temp container ID so we can unmount later
return cid | [
"def",
"_mount_devicemapper",
"(",
"self",
",",
"identifier",
")",
":",
"info",
"=",
"self",
".",
"client",
".",
"info",
"(",
")",
"# cid is the contaienr_id of the temp container",
"cid",
"=",
"self",
".",
"_identifier_as_cid",
"(",
"identifier",
")",
"cinfo",
... | Devicemapper mount backend. | [
"Devicemapper",
"mount",
"backend",
"."
] | b57cbf8ed7c089672426ede0441e0a4f789ef4a1 | https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/client/mount.py#L293-L366 |
239,779 | RedHatInsights/insights-core | insights/client/mount.py | DockerMount._mount_overlay | def _mount_overlay(self, identifier):
"""
OverlayFS mount backend.
"""
cid = self._identifier_as_cid(identifier)
cinfo = self.client.inspect_container(cid)
ld, ud, wd = '', '', ''
try:
ld = cinfo['GraphDriver']['Data']['lowerDir']
ud = cinfo['GraphDriver']['Data']['upperDir']
wd = cinfo['GraphDriver']['Data']['workDir']
except:
ld, ud, wd = DockerMount._no_gd_api_overlay(cid)
options = ['ro', 'lowerdir=' + ld, 'upperdir=' + ud, 'workdir=' + wd]
optstring = ','.join(options)
cmd = ['mount', '-t', 'overlay', '-o', optstring, 'overlay',
self.mountpoint]
status = util.subp(cmd)
if status.return_code != 0:
self._cleanup_container(cinfo)
raise MountError('Failed to mount OverlayFS device.\n%s' %
status.stderr.decode(sys.getdefaultencoding()))
return cid | python | def _mount_overlay(self, identifier):
cid = self._identifier_as_cid(identifier)
cinfo = self.client.inspect_container(cid)
ld, ud, wd = '', '', ''
try:
ld = cinfo['GraphDriver']['Data']['lowerDir']
ud = cinfo['GraphDriver']['Data']['upperDir']
wd = cinfo['GraphDriver']['Data']['workDir']
except:
ld, ud, wd = DockerMount._no_gd_api_overlay(cid)
options = ['ro', 'lowerdir=' + ld, 'upperdir=' + ud, 'workdir=' + wd]
optstring = ','.join(options)
cmd = ['mount', '-t', 'overlay', '-o', optstring, 'overlay',
self.mountpoint]
status = util.subp(cmd)
if status.return_code != 0:
self._cleanup_container(cinfo)
raise MountError('Failed to mount OverlayFS device.\n%s' %
status.stderr.decode(sys.getdefaultencoding()))
return cid | [
"def",
"_mount_overlay",
"(",
"self",
",",
"identifier",
")",
":",
"cid",
"=",
"self",
".",
"_identifier_as_cid",
"(",
"identifier",
")",
"cinfo",
"=",
"self",
".",
"client",
".",
"inspect_container",
"(",
"cid",
")",
"ld",
",",
"ud",
",",
"wd",
"=",
"... | OverlayFS mount backend. | [
"OverlayFS",
"mount",
"backend",
"."
] | b57cbf8ed7c089672426ede0441e0a4f789ef4a1 | https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/client/mount.py#L368-L394 |
239,780 | RedHatInsights/insights-core | insights/client/mount.py | DockerMount._cleanup_container | def _cleanup_container(self, cinfo):
"""
Remove a container and clean up its image if necessary.
"""
# I'm not a fan of doing this again here.
env = cinfo['Config']['Env']
if (env and '_ATOMIC_TEMP_CONTAINER' not in env) or not env:
return
iid = cinfo['Image']
self.client.remove_container(cinfo['Id'])
try:
labels = self.client.inspect_image(iid)['Config']['Labels']
except TypeError:
labels = {}
if labels and 'io.projectatomic.Temporary' in labels:
if labels['io.projectatomic.Temporary'] == 'true':
self.client.remove_image(iid) | python | def _cleanup_container(self, cinfo):
# I'm not a fan of doing this again here.
env = cinfo['Config']['Env']
if (env and '_ATOMIC_TEMP_CONTAINER' not in env) or not env:
return
iid = cinfo['Image']
self.client.remove_container(cinfo['Id'])
try:
labels = self.client.inspect_image(iid)['Config']['Labels']
except TypeError:
labels = {}
if labels and 'io.projectatomic.Temporary' in labels:
if labels['io.projectatomic.Temporary'] == 'true':
self.client.remove_image(iid) | [
"def",
"_cleanup_container",
"(",
"self",
",",
"cinfo",
")",
":",
"# I'm not a fan of doing this again here.",
"env",
"=",
"cinfo",
"[",
"'Config'",
"]",
"[",
"'Env'",
"]",
"if",
"(",
"env",
"and",
"'_ATOMIC_TEMP_CONTAINER'",
"not",
"in",
"env",
")",
"or",
"no... | Remove a container and clean up its image if necessary. | [
"Remove",
"a",
"container",
"and",
"clean",
"up",
"its",
"image",
"if",
"necessary",
"."
] | b57cbf8ed7c089672426ede0441e0a4f789ef4a1 | https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/client/mount.py#L396-L413 |
239,781 | RedHatInsights/insights-core | insights/client/mount.py | DockerMount._unmount_devicemapper | def _unmount_devicemapper(self, cid):
"""
Devicemapper unmount backend.
"""
mountpoint = self.mountpoint
Mount.unmount_path(mountpoint)
cinfo = self.client.inspect_container(cid)
dev_name = cinfo['GraphDriver']['Data']['DeviceName']
Mount.remove_thin_device(dev_name)
self._cleanup_container(cinfo) | python | def _unmount_devicemapper(self, cid):
mountpoint = self.mountpoint
Mount.unmount_path(mountpoint)
cinfo = self.client.inspect_container(cid)
dev_name = cinfo['GraphDriver']['Data']['DeviceName']
Mount.remove_thin_device(dev_name)
self._cleanup_container(cinfo) | [
"def",
"_unmount_devicemapper",
"(",
"self",
",",
"cid",
")",
":",
"mountpoint",
"=",
"self",
".",
"mountpoint",
"Mount",
".",
"unmount_path",
"(",
"mountpoint",
")",
"cinfo",
"=",
"self",
".",
"client",
".",
"inspect_container",
"(",
"cid",
")",
"dev_name",... | Devicemapper unmount backend. | [
"Devicemapper",
"unmount",
"backend",
"."
] | b57cbf8ed7c089672426ede0441e0a4f789ef4a1 | https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/client/mount.py#L430-L441 |
239,782 | RedHatInsights/insights-core | insights/client/mount.py | DockerMount._unmount_overlay | def _unmount_overlay(self, cid):
"""
OverlayFS unmount backend.
"""
mountpoint = self.mountpoint
Mount.unmount_path(mountpoint)
self._cleanup_container(self.client.inspect_container(cid)) | python | def _unmount_overlay(self, cid):
mountpoint = self.mountpoint
Mount.unmount_path(mountpoint)
self._cleanup_container(self.client.inspect_container(cid)) | [
"def",
"_unmount_overlay",
"(",
"self",
",",
"cid",
")",
":",
"mountpoint",
"=",
"self",
".",
"mountpoint",
"Mount",
".",
"unmount_path",
"(",
"mountpoint",
")",
"self",
".",
"_cleanup_container",
"(",
"self",
".",
"client",
".",
"inspect_container",
"(",
"c... | OverlayFS unmount backend. | [
"OverlayFS",
"unmount",
"backend",
"."
] | b57cbf8ed7c089672426ede0441e0a4f789ef4a1 | https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/client/mount.py#L443-L449 |
239,783 | RedHatInsights/insights-core | insights/specs/jdr_archive.py | JDRSpecs.jboss_standalone_conf_file | def jboss_standalone_conf_file(broker):
"""Get which jboss standalone conf file is using from server log"""
log_files = broker[JDRSpecs.jboss_standalone_server_log]
if log_files:
log_content = log_files[-1].content
results = []
for line in log_content:
if "sun.java.command =" in line and ".jdr" not in line and "-Djboss.server.base.dir" in line:
results.append(line)
if results:
# default is standalone.xml
config_xml = 'standalone.xml'
java_command = results[-1]
if '--server-config' in java_command:
config_xml = java_command.split('--server-config=')[1].split()[0]
elif '-c ' in java_command:
config_xml = java_command.split('-c ')[1].split()[0]
return [config_xml]
return [] | python | def jboss_standalone_conf_file(broker):
log_files = broker[JDRSpecs.jboss_standalone_server_log]
if log_files:
log_content = log_files[-1].content
results = []
for line in log_content:
if "sun.java.command =" in line and ".jdr" not in line and "-Djboss.server.base.dir" in line:
results.append(line)
if results:
# default is standalone.xml
config_xml = 'standalone.xml'
java_command = results[-1]
if '--server-config' in java_command:
config_xml = java_command.split('--server-config=')[1].split()[0]
elif '-c ' in java_command:
config_xml = java_command.split('-c ')[1].split()[0]
return [config_xml]
return [] | [
"def",
"jboss_standalone_conf_file",
"(",
"broker",
")",
":",
"log_files",
"=",
"broker",
"[",
"JDRSpecs",
".",
"jboss_standalone_server_log",
"]",
"if",
"log_files",
":",
"log_content",
"=",
"log_files",
"[",
"-",
"1",
"]",
".",
"content",
"results",
"=",
"["... | Get which jboss standalone conf file is using from server log | [
"Get",
"which",
"jboss",
"standalone",
"conf",
"file",
"is",
"using",
"from",
"server",
"log"
] | b57cbf8ed7c089672426ede0441e0a4f789ef4a1 | https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/specs/jdr_archive.py#L23-L41 |
239,784 | RedHatInsights/insights-core | insights/util/__init__.py | parse_bool | def parse_bool(s, default=False):
"""
Return the boolean value of an English string or default if it can't be
determined.
"""
if s is None:
return default
return TRUTH.get(s.lower(), default) | python | def parse_bool(s, default=False):
if s is None:
return default
return TRUTH.get(s.lower(), default) | [
"def",
"parse_bool",
"(",
"s",
",",
"default",
"=",
"False",
")",
":",
"if",
"s",
"is",
"None",
":",
"return",
"default",
"return",
"TRUTH",
".",
"get",
"(",
"s",
".",
"lower",
"(",
")",
",",
"default",
")"
] | Return the boolean value of an English string or default if it can't be
determined. | [
"Return",
"the",
"boolean",
"value",
"of",
"an",
"English",
"string",
"or",
"default",
"if",
"it",
"can",
"t",
"be",
"determined",
"."
] | b57cbf8ed7c089672426ede0441e0a4f789ef4a1 | https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/util/__init__.py#L22-L29 |
239,785 | RedHatInsights/insights-core | insights/util/__init__.py | defaults | def defaults(default=None):
"""
Catches any exception thrown by the wrapped function and returns `default`
instead.
Parameters
----------
default : object
The default value to return if the wrapped function throws an exception
"""
def _f(func):
@functools.wraps(func)
def __f(self, *args, **kwargs):
try:
return func(self, *args, **kwargs)
except Exception:
return default
return __f
return _f | python | def defaults(default=None):
def _f(func):
@functools.wraps(func)
def __f(self, *args, **kwargs):
try:
return func(self, *args, **kwargs)
except Exception:
return default
return __f
return _f | [
"def",
"defaults",
"(",
"default",
"=",
"None",
")",
":",
"def",
"_f",
"(",
"func",
")",
":",
"@",
"functools",
".",
"wraps",
"(",
"func",
")",
"def",
"__f",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"try",
":",
"return"... | Catches any exception thrown by the wrapped function and returns `default`
instead.
Parameters
----------
default : object
The default value to return if the wrapped function throws an exception | [
"Catches",
"any",
"exception",
"thrown",
"by",
"the",
"wrapped",
"function",
"and",
"returns",
"default",
"instead",
"."
] | b57cbf8ed7c089672426ede0441e0a4f789ef4a1 | https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/util/__init__.py#L66-L85 |
239,786 | RedHatInsights/insights-core | insights/util/__init__.py | keys_in | def keys_in(items, *args):
"""
Use this utility function to ensure multiple keys are in one or more
dicts. Returns `True` if all keys are present in at least one of the
given dicts, otherwise returns `False`.
:Parameters:
- `items`: Iterable of required keys
- Variable number of subsequent arguments, each one being a dict to check.
"""
found = dict((key, False) for key in items)
for d in args:
for item in items:
if not found[item] and item in d:
found[item] = True
return all(found.values()) | python | def keys_in(items, *args):
found = dict((key, False) for key in items)
for d in args:
for item in items:
if not found[item] and item in d:
found[item] = True
return all(found.values()) | [
"def",
"keys_in",
"(",
"items",
",",
"*",
"args",
")",
":",
"found",
"=",
"dict",
"(",
"(",
"key",
",",
"False",
")",
"for",
"key",
"in",
"items",
")",
"for",
"d",
"in",
"args",
":",
"for",
"item",
"in",
"items",
":",
"if",
"not",
"found",
"[",... | Use this utility function to ensure multiple keys are in one or more
dicts. Returns `True` if all keys are present in at least one of the
given dicts, otherwise returns `False`.
:Parameters:
- `items`: Iterable of required keys
- Variable number of subsequent arguments, each one being a dict to check. | [
"Use",
"this",
"utility",
"function",
"to",
"ensure",
"multiple",
"keys",
"are",
"in",
"one",
"or",
"more",
"dicts",
".",
"Returns",
"True",
"if",
"all",
"keys",
"are",
"present",
"in",
"at",
"least",
"one",
"of",
"the",
"given",
"dicts",
"otherwise",
"r... | b57cbf8ed7c089672426ede0441e0a4f789ef4a1 | https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/util/__init__.py#L88-L104 |
239,787 | RedHatInsights/insights-core | insights/util/__init__.py | deprecated | def deprecated(func, solution):
"""
Mark a parser or combiner as deprecated, and give a message of how to fix
this. This will emit a warning in the logs when the function is used.
When combined with modifications to conftest, this causes deprecations to
become fatal errors when testing, so they get fixed.
Arguments:
func (function): the function or method being deprecated.
solution (str): a string describing the replacement class, method or
function that replaces the thing being deprecated. For example,
"use the `fnord()` function" or "use the `search()` method with
the parameter `name='(value)'`".
"""
def get_name_line(src):
for line in src:
if "@" not in line:
return line.strip()
path = inspect.getsourcefile(func)
src, line_no = inspect.getsourcelines(func)
name = get_name_line(src) or "Unknown"
the_msg = "<{c}> at {p}:{l} is deprecated: {s}".format(
c=name, p=path, l=line_no, s=solution
)
warnings.warn(the_msg, DeprecationWarning) | python | def deprecated(func, solution):
def get_name_line(src):
for line in src:
if "@" not in line:
return line.strip()
path = inspect.getsourcefile(func)
src, line_no = inspect.getsourcelines(func)
name = get_name_line(src) or "Unknown"
the_msg = "<{c}> at {p}:{l} is deprecated: {s}".format(
c=name, p=path, l=line_no, s=solution
)
warnings.warn(the_msg, DeprecationWarning) | [
"def",
"deprecated",
"(",
"func",
",",
"solution",
")",
":",
"def",
"get_name_line",
"(",
"src",
")",
":",
"for",
"line",
"in",
"src",
":",
"if",
"\"@\"",
"not",
"in",
"line",
":",
"return",
"line",
".",
"strip",
"(",
")",
"path",
"=",
"inspect",
"... | Mark a parser or combiner as deprecated, and give a message of how to fix
this. This will emit a warning in the logs when the function is used.
When combined with modifications to conftest, this causes deprecations to
become fatal errors when testing, so they get fixed.
Arguments:
func (function): the function or method being deprecated.
solution (str): a string describing the replacement class, method or
function that replaces the thing being deprecated. For example,
"use the `fnord()` function" or "use the `search()` method with
the parameter `name='(value)'`". | [
"Mark",
"a",
"parser",
"or",
"combiner",
"as",
"deprecated",
"and",
"give",
"a",
"message",
"of",
"how",
"to",
"fix",
"this",
".",
"This",
"will",
"emit",
"a",
"warning",
"in",
"the",
"logs",
"when",
"the",
"function",
"is",
"used",
".",
"When",
"combi... | b57cbf8ed7c089672426ede0441e0a4f789ef4a1 | https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/util/__init__.py#L119-L146 |
239,788 | RedHatInsights/insights-core | insights/util/__init__.py | parse_keypair_lines | def parse_keypair_lines(content, delim='|', kv_sep='='):
"""
Parses a set of entities, where each entity is a set of key-value pairs
contained all on one line. Each entity is parsed into a dictionary and
added to the list returned from this function.
"""
r = []
if content:
for row in [line for line in content if line]:
item_dict = {}
for item in row.split(delim):
key, value = [i.strip("'\"").strip() for i in item.strip().split(kv_sep)]
item_dict[key] = value
r.append(item_dict)
return r | python | def parse_keypair_lines(content, delim='|', kv_sep='='):
r = []
if content:
for row in [line for line in content if line]:
item_dict = {}
for item in row.split(delim):
key, value = [i.strip("'\"").strip() for i in item.strip().split(kv_sep)]
item_dict[key] = value
r.append(item_dict)
return r | [
"def",
"parse_keypair_lines",
"(",
"content",
",",
"delim",
"=",
"'|'",
",",
"kv_sep",
"=",
"'='",
")",
":",
"r",
"=",
"[",
"]",
"if",
"content",
":",
"for",
"row",
"in",
"[",
"line",
"for",
"line",
"in",
"content",
"if",
"line",
"]",
":",
"item_di... | Parses a set of entities, where each entity is a set of key-value pairs
contained all on one line. Each entity is parsed into a dictionary and
added to the list returned from this function. | [
"Parses",
"a",
"set",
"of",
"entities",
"where",
"each",
"entity",
"is",
"a",
"set",
"of",
"key",
"-",
"value",
"pairs",
"contained",
"all",
"on",
"one",
"line",
".",
"Each",
"entity",
"is",
"parsed",
"into",
"a",
"dictionary",
"and",
"added",
"to",
"t... | b57cbf8ed7c089672426ede0441e0a4f789ef4a1 | https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/util/__init__.py#L182-L196 |
239,789 | RedHatInsights/insights-core | insights/util/__init__.py | rsplit | def rsplit(_str, seps):
"""
Splits _str by the first sep in seps that is found from the right side.
Returns a tuple without the separator.
"""
for idx, ch in enumerate(reversed(_str)):
if ch in seps:
return _str[0:-idx - 1], _str[-idx:] | python | def rsplit(_str, seps):
for idx, ch in enumerate(reversed(_str)):
if ch in seps:
return _str[0:-idx - 1], _str[-idx:] | [
"def",
"rsplit",
"(",
"_str",
",",
"seps",
")",
":",
"for",
"idx",
",",
"ch",
"in",
"enumerate",
"(",
"reversed",
"(",
"_str",
")",
")",
":",
"if",
"ch",
"in",
"seps",
":",
"return",
"_str",
"[",
"0",
":",
"-",
"idx",
"-",
"1",
"]",
",",
"_st... | Splits _str by the first sep in seps that is found from the right side.
Returns a tuple without the separator. | [
"Splits",
"_str",
"by",
"the",
"first",
"sep",
"in",
"seps",
"that",
"is",
"found",
"from",
"the",
"right",
"side",
".",
"Returns",
"a",
"tuple",
"without",
"the",
"separator",
"."
] | b57cbf8ed7c089672426ede0441e0a4f789ef4a1 | https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/util/__init__.py#L199-L206 |
239,790 | RedHatInsights/insights-core | insights/formats/text.py | HumanReadableFormat.progress_bar | def progress_bar(self, c, broker):
"""
Print the formated progress information for the processed return types
"""
v = broker.get(c)
if v and isinstance(v, dict) and len(v) > 0 and 'type' in v:
if v["type"] in self.responses:
print(self.responses[v["type"]].color + self.responses[v["type"]].intl + Style.RESET_ALL, end="", file=self.stream)
else:
print(".", end="", file=self.stream)
elif c in broker.exceptions:
self.counts['exception'] += len(broker.exceptions[c])
print(Fore.RED + "E" + Style.RESET_ALL, end="", file=self.stream)
return self | python | def progress_bar(self, c, broker):
v = broker.get(c)
if v and isinstance(v, dict) and len(v) > 0 and 'type' in v:
if v["type"] in self.responses:
print(self.responses[v["type"]].color + self.responses[v["type"]].intl + Style.RESET_ALL, end="", file=self.stream)
else:
print(".", end="", file=self.stream)
elif c in broker.exceptions:
self.counts['exception'] += len(broker.exceptions[c])
print(Fore.RED + "E" + Style.RESET_ALL, end="", file=self.stream)
return self | [
"def",
"progress_bar",
"(",
"self",
",",
"c",
",",
"broker",
")",
":",
"v",
"=",
"broker",
".",
"get",
"(",
"c",
")",
"if",
"v",
"and",
"isinstance",
"(",
"v",
",",
"dict",
")",
"and",
"len",
"(",
"v",
")",
">",
"0",
"and",
"'type'",
"in",
"v... | Print the formated progress information for the processed return types | [
"Print",
"the",
"formated",
"progress",
"information",
"for",
"the",
"processed",
"return",
"types"
] | b57cbf8ed7c089672426ede0441e0a4f789ef4a1 | https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/formats/text.py#L94-L108 |
239,791 | RedHatInsights/insights-core | insights/formats/text.py | HumanReadableFormat.show_dropped | def show_dropped(self):
""" Show dropped files """
ctx = _find_context(self.broker)
if ctx and ctx.all_files:
ds = self.broker.get_by_type(datasource)
vals = []
for v in ds.values():
if isinstance(v, list):
vals.extend(d.path for d in v)
else:
vals.append(v.path)
dropped = set(ctx.all_files) - set(vals)
pprint("Dropped Files:", stream=self.stream)
pprint(dropped, indent=4, stream=self.stream) | python | def show_dropped(self):
ctx = _find_context(self.broker)
if ctx and ctx.all_files:
ds = self.broker.get_by_type(datasource)
vals = []
for v in ds.values():
if isinstance(v, list):
vals.extend(d.path for d in v)
else:
vals.append(v.path)
dropped = set(ctx.all_files) - set(vals)
pprint("Dropped Files:", stream=self.stream)
pprint(dropped, indent=4, stream=self.stream) | [
"def",
"show_dropped",
"(",
"self",
")",
":",
"ctx",
"=",
"_find_context",
"(",
"self",
".",
"broker",
")",
"if",
"ctx",
"and",
"ctx",
".",
"all_files",
":",
"ds",
"=",
"self",
".",
"broker",
".",
"get_by_type",
"(",
"datasource",
")",
"vals",
"=",
"... | Show dropped files | [
"Show",
"dropped",
"files"
] | b57cbf8ed7c089672426ede0441e0a4f789ef4a1 | https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/formats/text.py#L118-L131 |
239,792 | RedHatInsights/insights-core | insights/client/client.py | register | def register(config, pconn):
"""
Do registration using basic auth
"""
username = config.username
password = config.password
authmethod = config.authmethod
auto_config = config.auto_config
if not username and not password and not auto_config and authmethod == 'BASIC':
logger.debug('Username and password must be defined in configuration file with BASIC authentication method.')
return False
return pconn.register() | python | def register(config, pconn):
username = config.username
password = config.password
authmethod = config.authmethod
auto_config = config.auto_config
if not username and not password and not auto_config and authmethod == 'BASIC':
logger.debug('Username and password must be defined in configuration file with BASIC authentication method.')
return False
return pconn.register() | [
"def",
"register",
"(",
"config",
",",
"pconn",
")",
":",
"username",
"=",
"config",
".",
"username",
"password",
"=",
"config",
".",
"password",
"authmethod",
"=",
"config",
".",
"authmethod",
"auto_config",
"=",
"config",
".",
"auto_config",
"if",
"not",
... | Do registration using basic auth | [
"Do",
"registration",
"using",
"basic",
"auth"
] | b57cbf8ed7c089672426ede0441e0a4f789ef4a1 | https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/client/client.py#L93-L104 |
239,793 | RedHatInsights/insights-core | insights/client/collection_rules.py | InsightsUploadConf.validate_gpg_sig | def validate_gpg_sig(self, path, sig=None):
"""
Validate the collection rules
"""
logger.debug("Verifying GPG signature of Insights configuration")
if sig is None:
sig = path + ".asc"
command = ("/usr/bin/gpg --no-default-keyring "
"--keyring " + constants.pub_gpg_path +
" --verify " + sig + " " + path)
if not six.PY3:
command = command.encode('utf-8', 'ignore')
args = shlex.split(command)
logger.debug("Executing: %s", args)
proc = Popen(
args, shell=False, stdout=PIPE, stderr=STDOUT, close_fds=True)
stdout, stderr = proc.communicate()
logger.debug("STDOUT: %s", stdout)
logger.debug("STDERR: %s", stderr)
logger.debug("Status: %s", proc.returncode)
if proc.returncode:
logger.error("ERROR: Unable to validate GPG signature: %s", path)
return False
else:
logger.debug("GPG signature verified")
return True | python | def validate_gpg_sig(self, path, sig=None):
logger.debug("Verifying GPG signature of Insights configuration")
if sig is None:
sig = path + ".asc"
command = ("/usr/bin/gpg --no-default-keyring "
"--keyring " + constants.pub_gpg_path +
" --verify " + sig + " " + path)
if not six.PY3:
command = command.encode('utf-8', 'ignore')
args = shlex.split(command)
logger.debug("Executing: %s", args)
proc = Popen(
args, shell=False, stdout=PIPE, stderr=STDOUT, close_fds=True)
stdout, stderr = proc.communicate()
logger.debug("STDOUT: %s", stdout)
logger.debug("STDERR: %s", stderr)
logger.debug("Status: %s", proc.returncode)
if proc.returncode:
logger.error("ERROR: Unable to validate GPG signature: %s", path)
return False
else:
logger.debug("GPG signature verified")
return True | [
"def",
"validate_gpg_sig",
"(",
"self",
",",
"path",
",",
"sig",
"=",
"None",
")",
":",
"logger",
".",
"debug",
"(",
"\"Verifying GPG signature of Insights configuration\"",
")",
"if",
"sig",
"is",
"None",
":",
"sig",
"=",
"path",
"+",
"\".asc\"",
"command",
... | Validate the collection rules | [
"Validate",
"the",
"collection",
"rules"
] | b57cbf8ed7c089672426ede0441e0a4f789ef4a1 | https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/client/collection_rules.py#L46-L71 |
239,794 | RedHatInsights/insights-core | insights/client/collection_rules.py | InsightsUploadConf.try_disk | def try_disk(self, path, gpg=True):
"""
Try to load json off disk
"""
if not os.path.isfile(path):
return
if not gpg or self.validate_gpg_sig(path):
stream = open(path, 'r')
json_stream = stream.read()
if len(json_stream):
try:
json_config = json.loads(json_stream)
return json_config
except ValueError:
logger.error("ERROR: Invalid JSON in %s", path)
return False
else:
logger.warn("WARNING: %s was an empty file", path)
return | python | def try_disk(self, path, gpg=True):
if not os.path.isfile(path):
return
if not gpg or self.validate_gpg_sig(path):
stream = open(path, 'r')
json_stream = stream.read()
if len(json_stream):
try:
json_config = json.loads(json_stream)
return json_config
except ValueError:
logger.error("ERROR: Invalid JSON in %s", path)
return False
else:
logger.warn("WARNING: %s was an empty file", path)
return | [
"def",
"try_disk",
"(",
"self",
",",
"path",
",",
"gpg",
"=",
"True",
")",
":",
"if",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"path",
")",
":",
"return",
"if",
"not",
"gpg",
"or",
"self",
".",
"validate_gpg_sig",
"(",
"path",
")",
":",
"str... | Try to load json off disk | [
"Try",
"to",
"load",
"json",
"off",
"disk"
] | b57cbf8ed7c089672426ede0441e0a4f789ef4a1 | https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/client/collection_rules.py#L73-L92 |
239,795 | RedHatInsights/insights-core | insights/client/collection_rules.py | InsightsUploadConf.get_collection_rules | def get_collection_rules(self, raw=False):
"""
Download the collection rules
"""
logger.debug("Attemping to download collection rules from %s",
self.collection_rules_url)
net_logger.info("GET %s", self.collection_rules_url)
try:
req = self.conn.session.get(
self.collection_rules_url, headers=({'accept': 'text/plain'}))
if req.status_code == 200:
logger.debug("Successfully downloaded collection rules")
json_response = NamedTemporaryFile()
json_response.write(req.text.encode('utf-8'))
json_response.file.flush()
else:
logger.error("ERROR: Could not download dynamic configuration")
logger.error("Debug Info: \nConf status: %s", req.status_code)
logger.error("Debug Info: \nConf message: %s", req.text)
return None
except requests.ConnectionError as e:
logger.error(
"ERROR: Could not download dynamic configuration: %s", e)
return None
if self.gpg:
self.get_collection_rules_gpg(json_response)
self.write_collection_data(self.collection_rules_file, req.text)
if raw:
return req.text
else:
return json.loads(req.text) | python | def get_collection_rules(self, raw=False):
logger.debug("Attemping to download collection rules from %s",
self.collection_rules_url)
net_logger.info("GET %s", self.collection_rules_url)
try:
req = self.conn.session.get(
self.collection_rules_url, headers=({'accept': 'text/plain'}))
if req.status_code == 200:
logger.debug("Successfully downloaded collection rules")
json_response = NamedTemporaryFile()
json_response.write(req.text.encode('utf-8'))
json_response.file.flush()
else:
logger.error("ERROR: Could not download dynamic configuration")
logger.error("Debug Info: \nConf status: %s", req.status_code)
logger.error("Debug Info: \nConf message: %s", req.text)
return None
except requests.ConnectionError as e:
logger.error(
"ERROR: Could not download dynamic configuration: %s", e)
return None
if self.gpg:
self.get_collection_rules_gpg(json_response)
self.write_collection_data(self.collection_rules_file, req.text)
if raw:
return req.text
else:
return json.loads(req.text) | [
"def",
"get_collection_rules",
"(",
"self",
",",
"raw",
"=",
"False",
")",
":",
"logger",
".",
"debug",
"(",
"\"Attemping to download collection rules from %s\"",
",",
"self",
".",
"collection_rules_url",
")",
"net_logger",
".",
"info",
"(",
"\"GET %s\"",
",",
"se... | Download the collection rules | [
"Download",
"the",
"collection",
"rules"
] | b57cbf8ed7c089672426ede0441e0a4f789ef4a1 | https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/client/collection_rules.py#L94-L130 |
239,796 | RedHatInsights/insights-core | insights/client/collection_rules.py | InsightsUploadConf.get_collection_rules_gpg | def get_collection_rules_gpg(self, collection_rules):
"""
Download the collection rules gpg signature
"""
sig_text = self.fetch_gpg()
sig_response = NamedTemporaryFile(suffix=".asc")
sig_response.write(sig_text.encode('utf-8'))
sig_response.file.flush()
self.validate_gpg_sig(collection_rules.name, sig_response.name)
self.write_collection_data(self.collection_rules_file + ".asc", sig_text) | python | def get_collection_rules_gpg(self, collection_rules):
sig_text = self.fetch_gpg()
sig_response = NamedTemporaryFile(suffix=".asc")
sig_response.write(sig_text.encode('utf-8'))
sig_response.file.flush()
self.validate_gpg_sig(collection_rules.name, sig_response.name)
self.write_collection_data(self.collection_rules_file + ".asc", sig_text) | [
"def",
"get_collection_rules_gpg",
"(",
"self",
",",
"collection_rules",
")",
":",
"sig_text",
"=",
"self",
".",
"fetch_gpg",
"(",
")",
"sig_response",
"=",
"NamedTemporaryFile",
"(",
"suffix",
"=",
"\".asc\"",
")",
"sig_response",
".",
"write",
"(",
"sig_text",... | Download the collection rules gpg signature | [
"Download",
"the",
"collection",
"rules",
"gpg",
"signature"
] | b57cbf8ed7c089672426ede0441e0a4f789ef4a1 | https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/client/collection_rules.py#L149-L158 |
239,797 | RedHatInsights/insights-core | insights/client/collection_rules.py | InsightsUploadConf.write_collection_data | def write_collection_data(self, path, data):
"""
Write collections rules to disk
"""
flags = os.O_WRONLY | os.O_CREAT | os.O_TRUNC
fd = os.open(path, flags, 0o600)
with os.fdopen(fd, 'w') as dyn_conf_file:
dyn_conf_file.write(data) | python | def write_collection_data(self, path, data):
flags = os.O_WRONLY | os.O_CREAT | os.O_TRUNC
fd = os.open(path, flags, 0o600)
with os.fdopen(fd, 'w') as dyn_conf_file:
dyn_conf_file.write(data) | [
"def",
"write_collection_data",
"(",
"self",
",",
"path",
",",
"data",
")",
":",
"flags",
"=",
"os",
".",
"O_WRONLY",
"|",
"os",
".",
"O_CREAT",
"|",
"os",
".",
"O_TRUNC",
"fd",
"=",
"os",
".",
"open",
"(",
"path",
",",
"flags",
",",
"0o600",
")",
... | Write collections rules to disk | [
"Write",
"collections",
"rules",
"to",
"disk"
] | b57cbf8ed7c089672426ede0441e0a4f789ef4a1 | https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/client/collection_rules.py#L160-L167 |
239,798 | RedHatInsights/insights-core | insights/client/collection_rules.py | InsightsUploadConf.get_conf_file | def get_conf_file(self):
"""
Get config from local config file, first try cache, then fallback.
"""
for conf_file in [self.collection_rules_file, self.fallback_file]:
logger.debug("trying to read conf from: " + conf_file)
conf = self.try_disk(conf_file, self.gpg)
if not conf:
continue
version = conf.get('version', None)
if version is None:
raise ValueError("ERROR: Could not find version in json")
conf['file'] = conf_file
logger.debug("Success reading config")
logger.debug(json.dumps(conf))
return conf
raise ValueError("ERROR: Unable to download conf or read it from disk!") | python | def get_conf_file(self):
for conf_file in [self.collection_rules_file, self.fallback_file]:
logger.debug("trying to read conf from: " + conf_file)
conf = self.try_disk(conf_file, self.gpg)
if not conf:
continue
version = conf.get('version', None)
if version is None:
raise ValueError("ERROR: Could not find version in json")
conf['file'] = conf_file
logger.debug("Success reading config")
logger.debug(json.dumps(conf))
return conf
raise ValueError("ERROR: Unable to download conf or read it from disk!") | [
"def",
"get_conf_file",
"(",
"self",
")",
":",
"for",
"conf_file",
"in",
"[",
"self",
".",
"collection_rules_file",
",",
"self",
".",
"fallback_file",
"]",
":",
"logger",
".",
"debug",
"(",
"\"trying to read conf from: \"",
"+",
"conf_file",
")",
"conf",
"=",
... | Get config from local config file, first try cache, then fallback. | [
"Get",
"config",
"from",
"local",
"config",
"file",
"first",
"try",
"cache",
"then",
"fallback",
"."
] | b57cbf8ed7c089672426ede0441e0a4f789ef4a1 | https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/client/collection_rules.py#L169-L189 |
239,799 | RedHatInsights/insights-core | insights/client/collection_rules.py | InsightsUploadConf.get_conf_update | def get_conf_update(self):
"""
Get updated config from URL, fallback to local file if download fails.
"""
dyn_conf = self.get_collection_rules()
if not dyn_conf:
return self.get_conf_file()
version = dyn_conf.get('version', None)
if version is None:
raise ValueError("ERROR: Could not find version in json")
dyn_conf['file'] = self.collection_rules_file
logger.debug("Success reading config")
config_hash = hashlib.sha1(json.dumps(dyn_conf).encode('utf-8')).hexdigest()
logger.debug('sha1 of config: %s', config_hash)
return dyn_conf | python | def get_conf_update(self):
dyn_conf = self.get_collection_rules()
if not dyn_conf:
return self.get_conf_file()
version = dyn_conf.get('version', None)
if version is None:
raise ValueError("ERROR: Could not find version in json")
dyn_conf['file'] = self.collection_rules_file
logger.debug("Success reading config")
config_hash = hashlib.sha1(json.dumps(dyn_conf).encode('utf-8')).hexdigest()
logger.debug('sha1 of config: %s', config_hash)
return dyn_conf | [
"def",
"get_conf_update",
"(",
"self",
")",
":",
"dyn_conf",
"=",
"self",
".",
"get_collection_rules",
"(",
")",
"if",
"not",
"dyn_conf",
":",
"return",
"self",
".",
"get_conf_file",
"(",
")",
"version",
"=",
"dyn_conf",
".",
"get",
"(",
"'version'",
",",
... | Get updated config from URL, fallback to local file if download fails. | [
"Get",
"updated",
"config",
"from",
"URL",
"fallback",
"to",
"local",
"file",
"if",
"download",
"fails",
"."
] | b57cbf8ed7c089672426ede0441e0a4f789ef4a1 | https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/client/collection_rules.py#L191-L208 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.