repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
listlengths
20
707
docstring
stringlengths
3
17.3k
docstring_tokens
listlengths
3
222
sha
stringlengths
40
40
url
stringlengths
87
242
partition
stringclasses
1 value
idx
int64
0
252k
RedHatInsights/insights-core
insights/client/config.py
InsightsConfig._update_dict
def _update_dict(self, dict_): ''' Update without allowing undefined options or overwrite of class methods ''' dict_ = dict((k, v) for k, v in dict_.items() if ( k not in self._init_attrs)) # zzz if 'no_gpg' in dict_ and dict_['no_gpg']: dict_['gpg'] = False unknown_opts = set(dict_.keys()).difference(set(DEFAULT_OPTS.keys())) if unknown_opts and self._print_errors: # only print error once sys.stdout.write( 'WARNING: Unknown options: ' + ', '.join(list(unknown_opts)) + '\n') if 'no_schedule' in unknown_opts: sys.stdout.write('WARNING: Config option `no_schedule` has ' 'been deprecated. To disable automatic ' 'scheduling for Red Hat Insights, run ' '`insights-client --disable-schedule`\n') for u in unknown_opts: dict_.pop(u, None) self.__dict__.update(dict_)
python
def _update_dict(self, dict_): ''' Update without allowing undefined options or overwrite of class methods ''' dict_ = dict((k, v) for k, v in dict_.items() if ( k not in self._init_attrs)) # zzz if 'no_gpg' in dict_ and dict_['no_gpg']: dict_['gpg'] = False unknown_opts = set(dict_.keys()).difference(set(DEFAULT_OPTS.keys())) if unknown_opts and self._print_errors: # only print error once sys.stdout.write( 'WARNING: Unknown options: ' + ', '.join(list(unknown_opts)) + '\n') if 'no_schedule' in unknown_opts: sys.stdout.write('WARNING: Config option `no_schedule` has ' 'been deprecated. To disable automatic ' 'scheduling for Red Hat Insights, run ' '`insights-client --disable-schedule`\n') for u in unknown_opts: dict_.pop(u, None) self.__dict__.update(dict_)
[ "def", "_update_dict", "(", "self", ",", "dict_", ")", ":", "dict_", "=", "dict", "(", "(", "k", ",", "v", ")", "for", "k", ",", "v", "in", "dict_", ".", "items", "(", ")", "if", "(", "k", "not", "in", "self", ".", "_init_attrs", ")", ")", "#...
Update without allowing undefined options or overwrite of class methods
[ "Update", "without", "allowing", "undefined", "options", "or", "overwrite", "of", "class", "methods" ]
b57cbf8ed7c089672426ede0441e0a4f789ef4a1
https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/client/config.py#L393-L417
train
220,800
RedHatInsights/insights-core
insights/client/config.py
InsightsConfig._load_config_file
def _load_config_file(self, fname=None): ''' Load config from config file. If fname is not specified, config is loaded from the file named by InsightsConfig.conf ''' parsedconfig = ConfigParser.RawConfigParser() try: parsedconfig.read(fname or self.conf) except ConfigParser.Error: if self._print_errors: sys.stdout.write( 'ERROR: Could not read configuration file, ' 'using defaults\n') return try: if parsedconfig.has_section(constants.app_name): d = dict(parsedconfig.items(constants.app_name)) elif parsedconfig.has_section('redhat-access-insights'): d = dict(parsedconfig.items('redhat-access-insights')) else: raise ConfigParser.Error except ConfigParser.Error: if self._print_errors: sys.stdout.write( 'ERROR: Could not read configuration file, ' 'using defaults\n') return for key in d: try: if key == 'retries' or key == 'cmd_timeout': d[key] = parsedconfig.getint(constants.app_name, key) if key == 'http_timeout': d[key] = parsedconfig.getfloat(constants.app_name, key) if key in DEFAULT_BOOLS and isinstance( d[key], six.string_types): d[key] = parsedconfig.getboolean(constants.app_name, key) except ValueError as e: if self._print_errors: sys.stdout.write( 'ERROR: {0}.\nCould not read configuration file, ' 'using defaults\n'.format(e)) return self._update_dict(d)
python
def _load_config_file(self, fname=None): ''' Load config from config file. If fname is not specified, config is loaded from the file named by InsightsConfig.conf ''' parsedconfig = ConfigParser.RawConfigParser() try: parsedconfig.read(fname or self.conf) except ConfigParser.Error: if self._print_errors: sys.stdout.write( 'ERROR: Could not read configuration file, ' 'using defaults\n') return try: if parsedconfig.has_section(constants.app_name): d = dict(parsedconfig.items(constants.app_name)) elif parsedconfig.has_section('redhat-access-insights'): d = dict(parsedconfig.items('redhat-access-insights')) else: raise ConfigParser.Error except ConfigParser.Error: if self._print_errors: sys.stdout.write( 'ERROR: Could not read configuration file, ' 'using defaults\n') return for key in d: try: if key == 'retries' or key == 'cmd_timeout': d[key] = parsedconfig.getint(constants.app_name, key) if key == 'http_timeout': d[key] = parsedconfig.getfloat(constants.app_name, key) if key in DEFAULT_BOOLS and isinstance( d[key], six.string_types): d[key] = parsedconfig.getboolean(constants.app_name, key) except ValueError as e: if self._print_errors: sys.stdout.write( 'ERROR: {0}.\nCould not read configuration file, ' 'using defaults\n'.format(e)) return self._update_dict(d)
[ "def", "_load_config_file", "(", "self", ",", "fname", "=", "None", ")", ":", "parsedconfig", "=", "ConfigParser", ".", "RawConfigParser", "(", ")", "try", ":", "parsedconfig", ".", "read", "(", "fname", "or", "self", ".", "conf", ")", "except", "ConfigPar...
Load config from config file. If fname is not specified, config is loaded from the file named by InsightsConfig.conf
[ "Load", "config", "from", "config", "file", ".", "If", "fname", "is", "not", "specified", "config", "is", "loaded", "from", "the", "file", "named", "by", "InsightsConfig", ".", "conf" ]
b57cbf8ed7c089672426ede0441e0a4f789ef4a1
https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/client/config.py#L493-L535
train
220,801
RedHatInsights/insights-core
insights/client/config.py
InsightsConfig.load_all
def load_all(self): ''' Helper function for actual Insights client use ''' # check for custom conf file before loading conf self._load_command_line(conf_only=True) self._load_config_file() self._load_env() self._load_command_line() self._imply_options() self._validate_options() return self
python
def load_all(self): ''' Helper function for actual Insights client use ''' # check for custom conf file before loading conf self._load_command_line(conf_only=True) self._load_config_file() self._load_env() self._load_command_line() self._imply_options() self._validate_options() return self
[ "def", "load_all", "(", "self", ")", ":", "# check for custom conf file before loading conf", "self", ".", "_load_command_line", "(", "conf_only", "=", "True", ")", "self", ".", "_load_config_file", "(", ")", "self", ".", "_load_env", "(", ")", "self", ".", "_lo...
Helper function for actual Insights client use
[ "Helper", "function", "for", "actual", "Insights", "client", "use" ]
b57cbf8ed7c089672426ede0441e0a4f789ef4a1
https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/client/config.py#L537-L548
train
220,802
RedHatInsights/insights-core
insights/client/config.py
InsightsConfig._validate_options
def _validate_options(self): ''' Make sure there are no conflicting or invalid options ''' if self.obfuscate_hostname and not self.obfuscate: raise ValueError( 'Option `obfuscate_hostname` requires `obfuscate`') if self.analyze_image_id is not None and len(self.analyze_image_id) < 12: raise ValueError( 'Image/Container ID must be at least twelve characters long.') if self.enable_schedule and self.disable_schedule: raise ValueError( 'Conflicting options: --enable-schedule and --disable-schedule') if self.analyze_container and (self.register or self.unregister): raise ValueError('Registration not supported with ' 'image or container analysis.') if self.to_json and self.to_stdout: raise ValueError( 'Conflicting options: --to-stdout and --to-json') if self.payload and not self.content_type: raise ValueError( '--payload requires --content-type') if not self.legacy_upload: if self.group: raise ValueError( '--group is not supported at this time.') if self.analyze_image_id: raise ValueError( '--analyze-image-id is not supported at this time.') if self.analyze_file: raise ValueError( '--analyze-file is not supported at this time.') if self.analyze_mountpoint: raise ValueError( '--analyze-mountpoint is not supported at this time.') if self.analyze_container: raise ValueError( '--analyze-container is not supported at this time.')
python
def _validate_options(self): ''' Make sure there are no conflicting or invalid options ''' if self.obfuscate_hostname and not self.obfuscate: raise ValueError( 'Option `obfuscate_hostname` requires `obfuscate`') if self.analyze_image_id is not None and len(self.analyze_image_id) < 12: raise ValueError( 'Image/Container ID must be at least twelve characters long.') if self.enable_schedule and self.disable_schedule: raise ValueError( 'Conflicting options: --enable-schedule and --disable-schedule') if self.analyze_container and (self.register or self.unregister): raise ValueError('Registration not supported with ' 'image or container analysis.') if self.to_json and self.to_stdout: raise ValueError( 'Conflicting options: --to-stdout and --to-json') if self.payload and not self.content_type: raise ValueError( '--payload requires --content-type') if not self.legacy_upload: if self.group: raise ValueError( '--group is not supported at this time.') if self.analyze_image_id: raise ValueError( '--analyze-image-id is not supported at this time.') if self.analyze_file: raise ValueError( '--analyze-file is not supported at this time.') if self.analyze_mountpoint: raise ValueError( '--analyze-mountpoint is not supported at this time.') if self.analyze_container: raise ValueError( '--analyze-container is not supported at this time.')
[ "def", "_validate_options", "(", "self", ")", ":", "if", "self", ".", "obfuscate_hostname", "and", "not", "self", ".", "obfuscate", ":", "raise", "ValueError", "(", "'Option `obfuscate_hostname` requires `obfuscate`'", ")", "if", "self", ".", "analyze_image_id", "is...
Make sure there are no conflicting or invalid options
[ "Make", "sure", "there", "are", "no", "conflicting", "or", "invalid", "options" ]
b57cbf8ed7c089672426ede0441e0a4f789ef4a1
https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/client/config.py#L550-L587
train
220,803
RedHatInsights/insights-core
insights/client/config.py
InsightsConfig._imply_options
def _imply_options(self): ''' Some options enable others automatically ''' self.no_upload = self.no_upload or self.to_stdout or self.offline self.auto_update = self.auto_update and not self.offline if (self.analyze_container or self.analyze_file or self.analyze_mountpoint or self.analyze_image_id): self.analyze_container = True self.to_json = self.to_json or self.analyze_container self.register = (self.register or self.reregister) and not self.offline self.keep_archive = self.keep_archive or self.no_upload if self.payload: self.legacy_upload = False
python
def _imply_options(self): ''' Some options enable others automatically ''' self.no_upload = self.no_upload or self.to_stdout or self.offline self.auto_update = self.auto_update and not self.offline if (self.analyze_container or self.analyze_file or self.analyze_mountpoint or self.analyze_image_id): self.analyze_container = True self.to_json = self.to_json or self.analyze_container self.register = (self.register or self.reregister) and not self.offline self.keep_archive = self.keep_archive or self.no_upload if self.payload: self.legacy_upload = False
[ "def", "_imply_options", "(", "self", ")", ":", "self", ".", "no_upload", "=", "self", ".", "no_upload", "or", "self", ".", "to_stdout", "or", "self", ".", "offline", "self", ".", "auto_update", "=", "self", ".", "auto_update", "and", "not", "self", ".",...
Some options enable others automatically
[ "Some", "options", "enable", "others", "automatically" ]
b57cbf8ed7c089672426ede0441e0a4f789ef4a1
https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/client/config.py#L589-L604
train
220,804
RedHatInsights/insights-core
insights/parsers/httpd_conf.py
dict_deep_merge
def dict_deep_merge(tgt, src): """ Utility function to merge the source dictionary `src` to the target dictionary recursively Note: The type of the values in the dictionary can only be `dict` or `list` Parameters: tgt (dict): The target dictionary src (dict): The source dictionary """ for k, v in src.items(): if k in tgt: if isinstance(tgt[k], dict) and isinstance(v, dict): dict_deep_merge(tgt[k], v) else: tgt[k].extend(deepcopy(v)) else: tgt[k] = deepcopy(v)
python
def dict_deep_merge(tgt, src): """ Utility function to merge the source dictionary `src` to the target dictionary recursively Note: The type of the values in the dictionary can only be `dict` or `list` Parameters: tgt (dict): The target dictionary src (dict): The source dictionary """ for k, v in src.items(): if k in tgt: if isinstance(tgt[k], dict) and isinstance(v, dict): dict_deep_merge(tgt[k], v) else: tgt[k].extend(deepcopy(v)) else: tgt[k] = deepcopy(v)
[ "def", "dict_deep_merge", "(", "tgt", ",", "src", ")", ":", "for", "k", ",", "v", "in", "src", ".", "items", "(", ")", ":", "if", "k", "in", "tgt", ":", "if", "isinstance", "(", "tgt", "[", "k", "]", ",", "dict", ")", "and", "isinstance", "(", ...
Utility function to merge the source dictionary `src` to the target dictionary recursively Note: The type of the values in the dictionary can only be `dict` or `list` Parameters: tgt (dict): The target dictionary src (dict): The source dictionary
[ "Utility", "function", "to", "merge", "the", "source", "dictionary", "src", "to", "the", "target", "dictionary", "recursively" ]
b57cbf8ed7c089672426ede0441e0a4f789ef4a1
https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/parsers/httpd_conf.py#L195-L214
train
220,805
RedHatInsights/insights-core
insights/client/mount.py
Mount._activate_thin_device
def _activate_thin_device(name, dm_id, size, pool): """ Provisions an LVM device-mapper thin device reflecting, DM device id 'dm_id' in the docker pool. """ table = '0 %d thin /dev/mapper/%s %s' % (int(size) // 512, pool, dm_id) cmd = ['dmsetup', 'create', name, '--table', table] r = util.subp(cmd) if r.return_code != 0: raise MountError('Failed to create thin device: %s' % r.stderr.decode(sys.getdefaultencoding()))
python
def _activate_thin_device(name, dm_id, size, pool): """ Provisions an LVM device-mapper thin device reflecting, DM device id 'dm_id' in the docker pool. """ table = '0 %d thin /dev/mapper/%s %s' % (int(size) // 512, pool, dm_id) cmd = ['dmsetup', 'create', name, '--table', table] r = util.subp(cmd) if r.return_code != 0: raise MountError('Failed to create thin device: %s' % r.stderr.decode(sys.getdefaultencoding()))
[ "def", "_activate_thin_device", "(", "name", ",", "dm_id", ",", "size", ",", "pool", ")", ":", "table", "=", "'0 %d thin /dev/mapper/%s %s'", "%", "(", "int", "(", "size", ")", "//", "512", ",", "pool", ",", "dm_id", ")", "cmd", "=", "[", "'dmsetup'", ...
Provisions an LVM device-mapper thin device reflecting, DM device id 'dm_id' in the docker pool.
[ "Provisions", "an", "LVM", "device", "-", "mapper", "thin", "device", "reflecting", "DM", "device", "id", "dm_id", "in", "the", "docker", "pool", "." ]
b57cbf8ed7c089672426ede0441e0a4f789ef4a1
https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/client/mount.py#L74-L84
train
220,806
RedHatInsights/insights-core
insights/client/mount.py
Mount.remove_thin_device
def remove_thin_device(name, force=False): """ Destroys a thin device via subprocess call. """ cmd = ['dmsetup', 'remove', '--retry', name] r = util.subp(cmd) if not force: if r.return_code != 0: raise MountError('Could not remove thin device:\n%s' % r.stderr.decode(sys.getdefaultencoding()).split("\n")[0])
python
def remove_thin_device(name, force=False): """ Destroys a thin device via subprocess call. """ cmd = ['dmsetup', 'remove', '--retry', name] r = util.subp(cmd) if not force: if r.return_code != 0: raise MountError('Could not remove thin device:\n%s' % r.stderr.decode(sys.getdefaultencoding()).split("\n")[0])
[ "def", "remove_thin_device", "(", "name", ",", "force", "=", "False", ")", ":", "cmd", "=", "[", "'dmsetup'", ",", "'remove'", ",", "'--retry'", ",", "name", "]", "r", "=", "util", ".", "subp", "(", "cmd", ")", "if", "not", "force", ":", "if", "r",...
Destroys a thin device via subprocess call.
[ "Destroys", "a", "thin", "device", "via", "subprocess", "call", "." ]
b57cbf8ed7c089672426ede0441e0a4f789ef4a1
https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/client/mount.py#L87-L96
train
220,807
RedHatInsights/insights-core
insights/client/mount.py
Mount._is_device_active
def _is_device_active(device): """ Checks dmsetup to see if a device is already active """ cmd = ['dmsetup', 'info', device] dmsetup_info = util.subp(cmd) for dm_line in dmsetup_info.stdout.split("\n"): line = dm_line.split(':') if ('State' in line[0].strip()) and ('ACTIVE' in line[1].strip()): return True return False
python
def _is_device_active(device): """ Checks dmsetup to see if a device is already active """ cmd = ['dmsetup', 'info', device] dmsetup_info = util.subp(cmd) for dm_line in dmsetup_info.stdout.split("\n"): line = dm_line.split(':') if ('State' in line[0].strip()) and ('ACTIVE' in line[1].strip()): return True return False
[ "def", "_is_device_active", "(", "device", ")", ":", "cmd", "=", "[", "'dmsetup'", ",", "'info'", ",", "device", "]", "dmsetup_info", "=", "util", ".", "subp", "(", "cmd", ")", "for", "dm_line", "in", "dmsetup_info", ".", "stdout", ".", "split", "(", "...
Checks dmsetup to see if a device is already active
[ "Checks", "dmsetup", "to", "see", "if", "a", "device", "is", "already", "active" ]
b57cbf8ed7c089672426ede0441e0a4f789ef4a1
https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/client/mount.py#L99-L109
train
220,808
RedHatInsights/insights-core
insights/client/mount.py
Mount.mount_path
def mount_path(source, target, bind=False): """ Subprocess call to mount dev at path. """ cmd = ['mount'] if bind: cmd.append('--bind') cmd.append(source) cmd.append(target) r = util.subp(cmd) if r.return_code != 0: raise MountError('Could not mount docker container:\n' + ' '.join(cmd) + '\n%s' % r.stderr.decode(sys.getdefaultencoding()))
python
def mount_path(source, target, bind=False): """ Subprocess call to mount dev at path. """ cmd = ['mount'] if bind: cmd.append('--bind') cmd.append(source) cmd.append(target) r = util.subp(cmd) if r.return_code != 0: raise MountError('Could not mount docker container:\n' + ' '.join(cmd) + '\n%s' % r.stderr.decode(sys.getdefaultencoding()))
[ "def", "mount_path", "(", "source", ",", "target", ",", "bind", "=", "False", ")", ":", "cmd", "=", "[", "'mount'", "]", "if", "bind", ":", "cmd", ".", "append", "(", "'--bind'", ")", "cmd", ".", "append", "(", "source", ")", "cmd", ".", "append", ...
Subprocess call to mount dev at path.
[ "Subprocess", "call", "to", "mount", "dev", "at", "path", "." ]
b57cbf8ed7c089672426ede0441e0a4f789ef4a1
https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/client/mount.py#L121-L134
train
220,809
RedHatInsights/insights-core
insights/client/mount.py
Mount.get_dev_at_mountpoint
def get_dev_at_mountpoint(mntpoint): """ Retrieves the device mounted at mntpoint, or raises MountError if none. """ results = util.subp(['findmnt', '-o', 'SOURCE', mntpoint]) if results.return_code != 0: raise MountError('No device mounted at %s' % mntpoint) stdout = results.stdout.decode(sys.getdefaultencoding()) return stdout.replace('SOURCE\n', '').strip().split('\n')[-1]
python
def get_dev_at_mountpoint(mntpoint): """ Retrieves the device mounted at mntpoint, or raises MountError if none. """ results = util.subp(['findmnt', '-o', 'SOURCE', mntpoint]) if results.return_code != 0: raise MountError('No device mounted at %s' % mntpoint) stdout = results.stdout.decode(sys.getdefaultencoding()) return stdout.replace('SOURCE\n', '').strip().split('\n')[-1]
[ "def", "get_dev_at_mountpoint", "(", "mntpoint", ")", ":", "results", "=", "util", ".", "subp", "(", "[", "'findmnt'", ",", "'-o'", ",", "'SOURCE'", ",", "mntpoint", "]", ")", "if", "results", ".", "return_code", "!=", "0", ":", "raise", "MountError", "(...
Retrieves the device mounted at mntpoint, or raises MountError if none.
[ "Retrieves", "the", "device", "mounted", "at", "mntpoint", "or", "raises", "MountError", "if", "none", "." ]
b57cbf8ed7c089672426ede0441e0a4f789ef4a1
https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/client/mount.py#L137-L147
train
220,810
RedHatInsights/insights-core
insights/client/mount.py
Mount.unmount_path
def unmount_path(path, force=False): """ Unmounts the directory specified by path. """ r = util.subp(['umount', path]) if not force: if r.return_code != 0: raise ValueError(r.stderr)
python
def unmount_path(path, force=False): """ Unmounts the directory specified by path. """ r = util.subp(['umount', path]) if not force: if r.return_code != 0: raise ValueError(r.stderr)
[ "def", "unmount_path", "(", "path", ",", "force", "=", "False", ")", ":", "r", "=", "util", ".", "subp", "(", "[", "'umount'", ",", "path", "]", ")", "if", "not", "force", ":", "if", "r", ".", "return_code", "!=", "0", ":", "raise", "ValueError", ...
Unmounts the directory specified by path.
[ "Unmounts", "the", "directory", "specified", "by", "path", "." ]
b57cbf8ed7c089672426ede0441e0a4f789ef4a1
https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/client/mount.py#L150-L157
train
220,811
RedHatInsights/insights-core
insights/client/mount.py
DockerMount._create_temp_container
def _create_temp_container(self, iid): """ Create a temporary container from a given iid. Temporary containers are marked with a sentinel environment variable so that they can be cleaned on unmount. """ try: return self.client.create_container( image=iid, command='/bin/true', environment=['_ATOMIC_TEMP_CONTAINER'], detach=True, network_disabled=True)['Id'] except docker.errors.APIError as ex: raise MountError('Error creating temporary container:\n%s' % str(ex))
python
def _create_temp_container(self, iid): """ Create a temporary container from a given iid. Temporary containers are marked with a sentinel environment variable so that they can be cleaned on unmount. """ try: return self.client.create_container( image=iid, command='/bin/true', environment=['_ATOMIC_TEMP_CONTAINER'], detach=True, network_disabled=True)['Id'] except docker.errors.APIError as ex: raise MountError('Error creating temporary container:\n%s' % str(ex))
[ "def", "_create_temp_container", "(", "self", ",", "iid", ")", ":", "try", ":", "return", "self", ".", "client", ".", "create_container", "(", "image", "=", "iid", ",", "command", "=", "'/bin/true'", ",", "environment", "=", "[", "'_ATOMIC_TEMP_CONTAINER'", ...
Create a temporary container from a given iid. Temporary containers are marked with a sentinel environment variable so that they can be cleaned on unmount.
[ "Create", "a", "temporary", "container", "from", "a", "given", "iid", "." ]
b57cbf8ed7c089672426ede0441e0a4f789ef4a1
https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/client/mount.py#L176-L189
train
220,812
RedHatInsights/insights-core
insights/client/mount.py
DockerMount._clone
def _clone(self, cid): """ Create a temporary image snapshot from a given cid. Temporary image snapshots are marked with a sentinel label so that they can be cleaned on unmount. """ try: iid = self.client.commit( container=cid, conf={ 'Labels': { 'io.projectatomic.Temporary': 'true' } } )['Id'] except docker.errors.APIError as ex: raise MountError(str(ex)) self.tmp_image = iid return self._create_temp_container(iid)
python
def _clone(self, cid): """ Create a temporary image snapshot from a given cid. Temporary image snapshots are marked with a sentinel label so that they can be cleaned on unmount. """ try: iid = self.client.commit( container=cid, conf={ 'Labels': { 'io.projectatomic.Temporary': 'true' } } )['Id'] except docker.errors.APIError as ex: raise MountError(str(ex)) self.tmp_image = iid return self._create_temp_container(iid)
[ "def", "_clone", "(", "self", ",", "cid", ")", ":", "try", ":", "iid", "=", "self", ".", "client", ".", "commit", "(", "container", "=", "cid", ",", "conf", "=", "{", "'Labels'", ":", "{", "'io.projectatomic.Temporary'", ":", "'true'", "}", "}", ")",...
Create a temporary image snapshot from a given cid. Temporary image snapshots are marked with a sentinel label so that they can be cleaned on unmount.
[ "Create", "a", "temporary", "image", "snapshot", "from", "a", "given", "cid", "." ]
b57cbf8ed7c089672426ede0441e0a4f789ef4a1
https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/client/mount.py#L191-L210
train
220,813
RedHatInsights/insights-core
insights/client/mount.py
DockerMount._identifier_as_cid
def _identifier_as_cid(self, identifier): """ Returns a container uuid for identifier. If identifier is an image UUID or image tag, create a temporary container and return its uuid. """ def __cname_matches(container, identifier): return any([n for n in (container['Names'] or []) if matches(n, '/' + identifier)]) # Determine if identifier is a container containers = [c['Id'] for c in self.client.containers(all=True) if (__cname_matches(c, identifier) or matches(c['Id'], identifier + '*'))] if len(containers) > 1: raise SelectionMatchError(identifier, containers) elif len(containers) == 1: c = containers[0] return self._clone(c) # Determine if identifier is an image UUID images = [i for i in set(self.client.images(all=True, quiet=True)) if i.startswith(identifier)] if len(images) > 1: raise SelectionMatchError(identifier, images) elif len(images) == 1: return self._create_temp_container(images[0]) # Match image tag. images = util.image_by_name(identifier) if len(images) > 1: tags = [t for i in images for t in i['RepoTags']] raise SelectionMatchError(identifier, tags) elif len(images) == 1: return self._create_temp_container(images[0]['Id'].replace("sha256:", "")) raise MountError('{} did not match any image or container.' ''.format(identifier))
python
def _identifier_as_cid(self, identifier): """ Returns a container uuid for identifier. If identifier is an image UUID or image tag, create a temporary container and return its uuid. """ def __cname_matches(container, identifier): return any([n for n in (container['Names'] or []) if matches(n, '/' + identifier)]) # Determine if identifier is a container containers = [c['Id'] for c in self.client.containers(all=True) if (__cname_matches(c, identifier) or matches(c['Id'], identifier + '*'))] if len(containers) > 1: raise SelectionMatchError(identifier, containers) elif len(containers) == 1: c = containers[0] return self._clone(c) # Determine if identifier is an image UUID images = [i for i in set(self.client.images(all=True, quiet=True)) if i.startswith(identifier)] if len(images) > 1: raise SelectionMatchError(identifier, images) elif len(images) == 1: return self._create_temp_container(images[0]) # Match image tag. images = util.image_by_name(identifier) if len(images) > 1: tags = [t for i in images for t in i['RepoTags']] raise SelectionMatchError(identifier, tags) elif len(images) == 1: return self._create_temp_container(images[0]['Id'].replace("sha256:", "")) raise MountError('{} did not match any image or container.' ''.format(identifier))
[ "def", "_identifier_as_cid", "(", "self", ",", "identifier", ")", ":", "def", "__cname_matches", "(", "container", ",", "identifier", ")", ":", "return", "any", "(", "[", "n", "for", "n", "in", "(", "container", "[", "'Names'", "]", "or", "[", "]", ")"...
Returns a container uuid for identifier. If identifier is an image UUID or image tag, create a temporary container and return its uuid.
[ "Returns", "a", "container", "uuid", "for", "identifier", "." ]
b57cbf8ed7c089672426ede0441e0a4f789ef4a1
https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/client/mount.py#L216-L256
train
220,814
RedHatInsights/insights-core
insights/client/mount.py
DockerMount.mount
def mount(self, identifier): """ Mounts a container or image referred to by identifier to the host filesystem. """ driver = self.client.info()['Driver'] driver_mount_fn = getattr(self, "_mount_" + driver, self._unsupported_backend) cid = driver_mount_fn(identifier) # Return mount path so it can be later unmounted by path return self.mountpoint, cid
python
def mount(self, identifier): """ Mounts a container or image referred to by identifier to the host filesystem. """ driver = self.client.info()['Driver'] driver_mount_fn = getattr(self, "_mount_" + driver, self._unsupported_backend) cid = driver_mount_fn(identifier) # Return mount path so it can be later unmounted by path return self.mountpoint, cid
[ "def", "mount", "(", "self", ",", "identifier", ")", ":", "driver", "=", "self", ".", "client", ".", "info", "(", ")", "[", "'Driver'", "]", "driver_mount_fn", "=", "getattr", "(", "self", ",", "\"_mount_\"", "+", "driver", ",", "self", ".", "_unsuppor...
Mounts a container or image referred to by identifier to the host filesystem.
[ "Mounts", "a", "container", "or", "image", "referred", "to", "by", "identifier", "to", "the", "host", "filesystem", "." ]
b57cbf8ed7c089672426ede0441e0a4f789ef4a1
https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/client/mount.py#L274-L286
train
220,815
RedHatInsights/insights-core
insights/client/mount.py
DockerMount._mount_devicemapper
def _mount_devicemapper(self, identifier): """ Devicemapper mount backend. """ info = self.client.info() # cid is the contaienr_id of the temp container cid = self._identifier_as_cid(identifier) cinfo = self.client.inspect_container(cid) dm_dev_name, dm_dev_id, dm_dev_size = '', '', '' dm_pool = info['DriverStatus'][0][1] try: dm_dev_name = cinfo['GraphDriver']['Data']['DeviceName'] dm_dev_id = cinfo['GraphDriver']['Data']['DeviceId'] dm_dev_size = cinfo['GraphDriver']['Data']['DeviceSize'] except: # TODO: deprecated when GraphDriver patch makes it upstream dm_dev_id, dm_dev_size = DockerMount._no_gd_api_dm(cid) dm_dev_name = dm_pool.replace('pool', cid) # grab list of devces dmsetupLs = dmsetupWrap.getDmsetupLs() if dmsetupLs == -1: raise MountError('Error: dmsetup returned non zero error ') # ENSURE device exists! if dm_dev_name not in dmsetupLs: # IF device doesn't exist yet we create it! Mount._activate_thin_device(dm_dev_name, dm_dev_id, dm_dev_size, dm_pool) # check that device is shown in /dev/mapper, if not we can use the # major minor numbers in /dev/block mapperDir = os.path.join('/dev/mapper', dm_dev_name) if os.path.exists(mapperDir): dm_dev_path = mapperDir else: # get new dmsetupLs after device has been created! dmsetupLs = dmsetupWrap.getDmsetupLs() # test if device exists in dmsetupls, if so, get its majorminor found in /dev/block majorMinor = dmsetupWrap.getMajorMinor(dm_dev_name, dmsetupLs) blockDir = os.path.join('/dev/block', majorMinor) # FIXME, coudl be due to Virtual box, but occasionally the block device # will not be created by the time we check it exists below, so we # can wait a half a second to let it be created up import time time.sleep(0.1) if os.path.exists(blockDir): dm_dev_path = blockDir else: raise MountError('Error: Block device found in dmsetup ls ' 'but not in /dev/mapper/ or /dev/block') options = ['ro', 'nosuid', 'nodev'] # XFS should get nouuid fstype = Mount._get_fs(dm_dev_path).decode(sys.getdefaultencoding()) if fstype.upper() == 'XFS' and 'nouuid' not in options: if 'nouuid' not in options: options.append('nouuid') try: Mount.mount_path(dm_dev_path, self.mountpoint) except MountError as de: self._cleanup_container(cinfo) Mount.remove_thin_device(dm_dev_name) raise de # return the temp container ID so we can unmount later return cid
python
def _mount_devicemapper(self, identifier): """ Devicemapper mount backend. """ info = self.client.info() # cid is the contaienr_id of the temp container cid = self._identifier_as_cid(identifier) cinfo = self.client.inspect_container(cid) dm_dev_name, dm_dev_id, dm_dev_size = '', '', '' dm_pool = info['DriverStatus'][0][1] try: dm_dev_name = cinfo['GraphDriver']['Data']['DeviceName'] dm_dev_id = cinfo['GraphDriver']['Data']['DeviceId'] dm_dev_size = cinfo['GraphDriver']['Data']['DeviceSize'] except: # TODO: deprecated when GraphDriver patch makes it upstream dm_dev_id, dm_dev_size = DockerMount._no_gd_api_dm(cid) dm_dev_name = dm_pool.replace('pool', cid) # grab list of devces dmsetupLs = dmsetupWrap.getDmsetupLs() if dmsetupLs == -1: raise MountError('Error: dmsetup returned non zero error ') # ENSURE device exists! if dm_dev_name not in dmsetupLs: # IF device doesn't exist yet we create it! Mount._activate_thin_device(dm_dev_name, dm_dev_id, dm_dev_size, dm_pool) # check that device is shown in /dev/mapper, if not we can use the # major minor numbers in /dev/block mapperDir = os.path.join('/dev/mapper', dm_dev_name) if os.path.exists(mapperDir): dm_dev_path = mapperDir else: # get new dmsetupLs after device has been created! dmsetupLs = dmsetupWrap.getDmsetupLs() # test if device exists in dmsetupls, if so, get its majorminor found in /dev/block majorMinor = dmsetupWrap.getMajorMinor(dm_dev_name, dmsetupLs) blockDir = os.path.join('/dev/block', majorMinor) # FIXME, coudl be due to Virtual box, but occasionally the block device # will not be created by the time we check it exists below, so we # can wait a half a second to let it be created up import time time.sleep(0.1) if os.path.exists(blockDir): dm_dev_path = blockDir else: raise MountError('Error: Block device found in dmsetup ls ' 'but not in /dev/mapper/ or /dev/block') options = ['ro', 'nosuid', 'nodev'] # XFS should get nouuid fstype = Mount._get_fs(dm_dev_path).decode(sys.getdefaultencoding()) if fstype.upper() == 'XFS' and 'nouuid' not in options: if 'nouuid' not in options: options.append('nouuid') try: Mount.mount_path(dm_dev_path, self.mountpoint) except MountError as de: self._cleanup_container(cinfo) Mount.remove_thin_device(dm_dev_name) raise de # return the temp container ID so we can unmount later return cid
[ "def", "_mount_devicemapper", "(", "self", ",", "identifier", ")", ":", "info", "=", "self", ".", "client", ".", "info", "(", ")", "# cid is the contaienr_id of the temp container", "cid", "=", "self", ".", "_identifier_as_cid", "(", "identifier", ")", "cinfo", ...
Devicemapper mount backend.
[ "Devicemapper", "mount", "backend", "." ]
b57cbf8ed7c089672426ede0441e0a4f789ef4a1
https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/client/mount.py#L293-L366
train
220,816
RedHatInsights/insights-core
insights/client/mount.py
DockerMount._mount_overlay
def _mount_overlay(self, identifier): """ OverlayFS mount backend. """ cid = self._identifier_as_cid(identifier) cinfo = self.client.inspect_container(cid) ld, ud, wd = '', '', '' try: ld = cinfo['GraphDriver']['Data']['lowerDir'] ud = cinfo['GraphDriver']['Data']['upperDir'] wd = cinfo['GraphDriver']['Data']['workDir'] except: ld, ud, wd = DockerMount._no_gd_api_overlay(cid) options = ['ro', 'lowerdir=' + ld, 'upperdir=' + ud, 'workdir=' + wd] optstring = ','.join(options) cmd = ['mount', '-t', 'overlay', '-o', optstring, 'overlay', self.mountpoint] status = util.subp(cmd) if status.return_code != 0: self._cleanup_container(cinfo) raise MountError('Failed to mount OverlayFS device.\n%s' % status.stderr.decode(sys.getdefaultencoding())) return cid
python
def _mount_overlay(self, identifier): """ OverlayFS mount backend. """ cid = self._identifier_as_cid(identifier) cinfo = self.client.inspect_container(cid) ld, ud, wd = '', '', '' try: ld = cinfo['GraphDriver']['Data']['lowerDir'] ud = cinfo['GraphDriver']['Data']['upperDir'] wd = cinfo['GraphDriver']['Data']['workDir'] except: ld, ud, wd = DockerMount._no_gd_api_overlay(cid) options = ['ro', 'lowerdir=' + ld, 'upperdir=' + ud, 'workdir=' + wd] optstring = ','.join(options) cmd = ['mount', '-t', 'overlay', '-o', optstring, 'overlay', self.mountpoint] status = util.subp(cmd) if status.return_code != 0: self._cleanup_container(cinfo) raise MountError('Failed to mount OverlayFS device.\n%s' % status.stderr.decode(sys.getdefaultencoding())) return cid
[ "def", "_mount_overlay", "(", "self", ",", "identifier", ")", ":", "cid", "=", "self", ".", "_identifier_as_cid", "(", "identifier", ")", "cinfo", "=", "self", ".", "client", ".", "inspect_container", "(", "cid", ")", "ld", ",", "ud", ",", "wd", "=", "...
OverlayFS mount backend.
[ "OverlayFS", "mount", "backend", "." ]
b57cbf8ed7c089672426ede0441e0a4f789ef4a1
https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/client/mount.py#L368-L394
train
220,817
RedHatInsights/insights-core
insights/client/mount.py
DockerMount._cleanup_container
def _cleanup_container(self, cinfo): """ Remove a container and clean up its image if necessary. """ # I'm not a fan of doing this again here. env = cinfo['Config']['Env'] if (env and '_ATOMIC_TEMP_CONTAINER' not in env) or not env: return iid = cinfo['Image'] self.client.remove_container(cinfo['Id']) try: labels = self.client.inspect_image(iid)['Config']['Labels'] except TypeError: labels = {} if labels and 'io.projectatomic.Temporary' in labels: if labels['io.projectatomic.Temporary'] == 'true': self.client.remove_image(iid)
python
def _cleanup_container(self, cinfo): """ Remove a container and clean up its image if necessary. """ # I'm not a fan of doing this again here. env = cinfo['Config']['Env'] if (env and '_ATOMIC_TEMP_CONTAINER' not in env) or not env: return iid = cinfo['Image'] self.client.remove_container(cinfo['Id']) try: labels = self.client.inspect_image(iid)['Config']['Labels'] except TypeError: labels = {} if labels and 'io.projectatomic.Temporary' in labels: if labels['io.projectatomic.Temporary'] == 'true': self.client.remove_image(iid)
[ "def", "_cleanup_container", "(", "self", ",", "cinfo", ")", ":", "# I'm not a fan of doing this again here.", "env", "=", "cinfo", "[", "'Config'", "]", "[", "'Env'", "]", "if", "(", "env", "and", "'_ATOMIC_TEMP_CONTAINER'", "not", "in", "env", ")", "or", "no...
Remove a container and clean up its image if necessary.
[ "Remove", "a", "container", "and", "clean", "up", "its", "image", "if", "necessary", "." ]
b57cbf8ed7c089672426ede0441e0a4f789ef4a1
https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/client/mount.py#L396-L413
train
220,818
RedHatInsights/insights-core
insights/client/mount.py
DockerMount._unmount_devicemapper
def _unmount_devicemapper(self, cid): """ Devicemapper unmount backend. """ mountpoint = self.mountpoint Mount.unmount_path(mountpoint) cinfo = self.client.inspect_container(cid) dev_name = cinfo['GraphDriver']['Data']['DeviceName'] Mount.remove_thin_device(dev_name) self._cleanup_container(cinfo)
python
def _unmount_devicemapper(self, cid): """ Devicemapper unmount backend. """ mountpoint = self.mountpoint Mount.unmount_path(mountpoint) cinfo = self.client.inspect_container(cid) dev_name = cinfo['GraphDriver']['Data']['DeviceName'] Mount.remove_thin_device(dev_name) self._cleanup_container(cinfo)
[ "def", "_unmount_devicemapper", "(", "self", ",", "cid", ")", ":", "mountpoint", "=", "self", ".", "mountpoint", "Mount", ".", "unmount_path", "(", "mountpoint", ")", "cinfo", "=", "self", ".", "client", ".", "inspect_container", "(", "cid", ")", "dev_name",...
Devicemapper unmount backend.
[ "Devicemapper", "unmount", "backend", "." ]
b57cbf8ed7c089672426ede0441e0a4f789ef4a1
https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/client/mount.py#L430-L441
train
220,819
RedHatInsights/insights-core
insights/client/mount.py
DockerMount._unmount_overlay
def _unmount_overlay(self, cid): """ OverlayFS unmount backend. """ mountpoint = self.mountpoint Mount.unmount_path(mountpoint) self._cleanup_container(self.client.inspect_container(cid))
python
def _unmount_overlay(self, cid): """ OverlayFS unmount backend. """ mountpoint = self.mountpoint Mount.unmount_path(mountpoint) self._cleanup_container(self.client.inspect_container(cid))
[ "def", "_unmount_overlay", "(", "self", ",", "cid", ")", ":", "mountpoint", "=", "self", ".", "mountpoint", "Mount", ".", "unmount_path", "(", "mountpoint", ")", "self", ".", "_cleanup_container", "(", "self", ".", "client", ".", "inspect_container", "(", "c...
OverlayFS unmount backend.
[ "OverlayFS", "unmount", "backend", "." ]
b57cbf8ed7c089672426ede0441e0a4f789ef4a1
https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/client/mount.py#L443-L449
train
220,820
RedHatInsights/insights-core
insights/specs/jdr_archive.py
JDRSpecs.jboss_standalone_conf_file
def jboss_standalone_conf_file(broker): """Get which jboss standalone conf file is using from server log""" log_files = broker[JDRSpecs.jboss_standalone_server_log] if log_files: log_content = log_files[-1].content results = [] for line in log_content: if "sun.java.command =" in line and ".jdr" not in line and "-Djboss.server.base.dir" in line: results.append(line) if results: # default is standalone.xml config_xml = 'standalone.xml' java_command = results[-1] if '--server-config' in java_command: config_xml = java_command.split('--server-config=')[1].split()[0] elif '-c ' in java_command: config_xml = java_command.split('-c ')[1].split()[0] return [config_xml] return []
python
def jboss_standalone_conf_file(broker): """Get which jboss standalone conf file is using from server log""" log_files = broker[JDRSpecs.jboss_standalone_server_log] if log_files: log_content = log_files[-1].content results = [] for line in log_content: if "sun.java.command =" in line and ".jdr" not in line and "-Djboss.server.base.dir" in line: results.append(line) if results: # default is standalone.xml config_xml = 'standalone.xml' java_command = results[-1] if '--server-config' in java_command: config_xml = java_command.split('--server-config=')[1].split()[0] elif '-c ' in java_command: config_xml = java_command.split('-c ')[1].split()[0] return [config_xml] return []
[ "def", "jboss_standalone_conf_file", "(", "broker", ")", ":", "log_files", "=", "broker", "[", "JDRSpecs", ".", "jboss_standalone_server_log", "]", "if", "log_files", ":", "log_content", "=", "log_files", "[", "-", "1", "]", ".", "content", "results", "=", "["...
Get which jboss standalone conf file is using from server log
[ "Get", "which", "jboss", "standalone", "conf", "file", "is", "using", "from", "server", "log" ]
b57cbf8ed7c089672426ede0441e0a4f789ef4a1
https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/specs/jdr_archive.py#L23-L41
train
220,821
RedHatInsights/insights-core
insights/util/__init__.py
parse_bool
def parse_bool(s, default=False): """ Return the boolean value of an English string or default if it can't be determined. """ if s is None: return default return TRUTH.get(s.lower(), default)
python
def parse_bool(s, default=False): """ Return the boolean value of an English string or default if it can't be determined. """ if s is None: return default return TRUTH.get(s.lower(), default)
[ "def", "parse_bool", "(", "s", ",", "default", "=", "False", ")", ":", "if", "s", "is", "None", ":", "return", "default", "return", "TRUTH", ".", "get", "(", "s", ".", "lower", "(", ")", ",", "default", ")" ]
Return the boolean value of an English string or default if it can't be determined.
[ "Return", "the", "boolean", "value", "of", "an", "English", "string", "or", "default", "if", "it", "can", "t", "be", "determined", "." ]
b57cbf8ed7c089672426ede0441e0a4f789ef4a1
https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/util/__init__.py#L22-L29
train
220,822
RedHatInsights/insights-core
insights/util/__init__.py
defaults
def defaults(default=None): """ Catches any exception thrown by the wrapped function and returns `default` instead. Parameters ---------- default : object The default value to return if the wrapped function throws an exception """ def _f(func): @functools.wraps(func) def __f(self, *args, **kwargs): try: return func(self, *args, **kwargs) except Exception: return default return __f return _f
python
def defaults(default=None): """ Catches any exception thrown by the wrapped function and returns `default` instead. Parameters ---------- default : object The default value to return if the wrapped function throws an exception """ def _f(func): @functools.wraps(func) def __f(self, *args, **kwargs): try: return func(self, *args, **kwargs) except Exception: return default return __f return _f
[ "def", "defaults", "(", "default", "=", "None", ")", ":", "def", "_f", "(", "func", ")", ":", "@", "functools", ".", "wraps", "(", "func", ")", "def", "__f", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "try", ":", "return"...
Catches any exception thrown by the wrapped function and returns `default` instead. Parameters ---------- default : object The default value to return if the wrapped function throws an exception
[ "Catches", "any", "exception", "thrown", "by", "the", "wrapped", "function", "and", "returns", "default", "instead", "." ]
b57cbf8ed7c089672426ede0441e0a4f789ef4a1
https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/util/__init__.py#L66-L85
train
220,823
RedHatInsights/insights-core
insights/util/__init__.py
keys_in
def keys_in(items, *args): """ Use this utility function to ensure multiple keys are in one or more dicts. Returns `True` if all keys are present in at least one of the given dicts, otherwise returns `False`. :Parameters: - `items`: Iterable of required keys - Variable number of subsequent arguments, each one being a dict to check. """ found = dict((key, False) for key in items) for d in args: for item in items: if not found[item] and item in d: found[item] = True return all(found.values())
python
def keys_in(items, *args): """ Use this utility function to ensure multiple keys are in one or more dicts. Returns `True` if all keys are present in at least one of the given dicts, otherwise returns `False`. :Parameters: - `items`: Iterable of required keys - Variable number of subsequent arguments, each one being a dict to check. """ found = dict((key, False) for key in items) for d in args: for item in items: if not found[item] and item in d: found[item] = True return all(found.values())
[ "def", "keys_in", "(", "items", ",", "*", "args", ")", ":", "found", "=", "dict", "(", "(", "key", ",", "False", ")", "for", "key", "in", "items", ")", "for", "d", "in", "args", ":", "for", "item", "in", "items", ":", "if", "not", "found", "[",...
Use this utility function to ensure multiple keys are in one or more dicts. Returns `True` if all keys are present in at least one of the given dicts, otherwise returns `False`. :Parameters: - `items`: Iterable of required keys - Variable number of subsequent arguments, each one being a dict to check.
[ "Use", "this", "utility", "function", "to", "ensure", "multiple", "keys", "are", "in", "one", "or", "more", "dicts", ".", "Returns", "True", "if", "all", "keys", "are", "present", "in", "at", "least", "one", "of", "the", "given", "dicts", "otherwise", "r...
b57cbf8ed7c089672426ede0441e0a4f789ef4a1
https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/util/__init__.py#L88-L104
train
220,824
RedHatInsights/insights-core
insights/util/__init__.py
deprecated
def deprecated(func, solution): """ Mark a parser or combiner as deprecated, and give a message of how to fix this. This will emit a warning in the logs when the function is used. When combined with modifications to conftest, this causes deprecations to become fatal errors when testing, so they get fixed. Arguments: func (function): the function or method being deprecated. solution (str): a string describing the replacement class, method or function that replaces the thing being deprecated. For example, "use the `fnord()` function" or "use the `search()` method with the parameter `name='(value)'`". """ def get_name_line(src): for line in src: if "@" not in line: return line.strip() path = inspect.getsourcefile(func) src, line_no = inspect.getsourcelines(func) name = get_name_line(src) or "Unknown" the_msg = "<{c}> at {p}:{l} is deprecated: {s}".format( c=name, p=path, l=line_no, s=solution ) warnings.warn(the_msg, DeprecationWarning)
python
def deprecated(func, solution): """ Mark a parser or combiner as deprecated, and give a message of how to fix this. This will emit a warning in the logs when the function is used. When combined with modifications to conftest, this causes deprecations to become fatal errors when testing, so they get fixed. Arguments: func (function): the function or method being deprecated. solution (str): a string describing the replacement class, method or function that replaces the thing being deprecated. For example, "use the `fnord()` function" or "use the `search()` method with the parameter `name='(value)'`". """ def get_name_line(src): for line in src: if "@" not in line: return line.strip() path = inspect.getsourcefile(func) src, line_no = inspect.getsourcelines(func) name = get_name_line(src) or "Unknown" the_msg = "<{c}> at {p}:{l} is deprecated: {s}".format( c=name, p=path, l=line_no, s=solution ) warnings.warn(the_msg, DeprecationWarning)
[ "def", "deprecated", "(", "func", ",", "solution", ")", ":", "def", "get_name_line", "(", "src", ")", ":", "for", "line", "in", "src", ":", "if", "\"@\"", "not", "in", "line", ":", "return", "line", ".", "strip", "(", ")", "path", "=", "inspect", "...
Mark a parser or combiner as deprecated, and give a message of how to fix this. This will emit a warning in the logs when the function is used. When combined with modifications to conftest, this causes deprecations to become fatal errors when testing, so they get fixed. Arguments: func (function): the function or method being deprecated. solution (str): a string describing the replacement class, method or function that replaces the thing being deprecated. For example, "use the `fnord()` function" or "use the `search()` method with the parameter `name='(value)'`".
[ "Mark", "a", "parser", "or", "combiner", "as", "deprecated", "and", "give", "a", "message", "of", "how", "to", "fix", "this", ".", "This", "will", "emit", "a", "warning", "in", "the", "logs", "when", "the", "function", "is", "used", ".", "When", "combi...
b57cbf8ed7c089672426ede0441e0a4f789ef4a1
https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/util/__init__.py#L119-L146
train
220,825
RedHatInsights/insights-core
insights/util/__init__.py
parse_keypair_lines
def parse_keypair_lines(content, delim='|', kv_sep='='): """ Parses a set of entities, where each entity is a set of key-value pairs contained all on one line. Each entity is parsed into a dictionary and added to the list returned from this function. """ r = [] if content: for row in [line for line in content if line]: item_dict = {} for item in row.split(delim): key, value = [i.strip("'\"").strip() for i in item.strip().split(kv_sep)] item_dict[key] = value r.append(item_dict) return r
python
def parse_keypair_lines(content, delim='|', kv_sep='='): """ Parses a set of entities, where each entity is a set of key-value pairs contained all on one line. Each entity is parsed into a dictionary and added to the list returned from this function. """ r = [] if content: for row in [line for line in content if line]: item_dict = {} for item in row.split(delim): key, value = [i.strip("'\"").strip() for i in item.strip().split(kv_sep)] item_dict[key] = value r.append(item_dict) return r
[ "def", "parse_keypair_lines", "(", "content", ",", "delim", "=", "'|'", ",", "kv_sep", "=", "'='", ")", ":", "r", "=", "[", "]", "if", "content", ":", "for", "row", "in", "[", "line", "for", "line", "in", "content", "if", "line", "]", ":", "item_di...
Parses a set of entities, where each entity is a set of key-value pairs contained all on one line. Each entity is parsed into a dictionary and added to the list returned from this function.
[ "Parses", "a", "set", "of", "entities", "where", "each", "entity", "is", "a", "set", "of", "key", "-", "value", "pairs", "contained", "all", "on", "one", "line", ".", "Each", "entity", "is", "parsed", "into", "a", "dictionary", "and", "added", "to", "t...
b57cbf8ed7c089672426ede0441e0a4f789ef4a1
https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/util/__init__.py#L182-L196
train
220,826
RedHatInsights/insights-core
insights/util/__init__.py
rsplit
def rsplit(_str, seps): """ Splits _str by the first sep in seps that is found from the right side. Returns a tuple without the separator. """ for idx, ch in enumerate(reversed(_str)): if ch in seps: return _str[0:-idx - 1], _str[-idx:]
python
def rsplit(_str, seps): """ Splits _str by the first sep in seps that is found from the right side. Returns a tuple without the separator. """ for idx, ch in enumerate(reversed(_str)): if ch in seps: return _str[0:-idx - 1], _str[-idx:]
[ "def", "rsplit", "(", "_str", ",", "seps", ")", ":", "for", "idx", ",", "ch", "in", "enumerate", "(", "reversed", "(", "_str", ")", ")", ":", "if", "ch", "in", "seps", ":", "return", "_str", "[", "0", ":", "-", "idx", "-", "1", "]", ",", "_st...
Splits _str by the first sep in seps that is found from the right side. Returns a tuple without the separator.
[ "Splits", "_str", "by", "the", "first", "sep", "in", "seps", "that", "is", "found", "from", "the", "right", "side", ".", "Returns", "a", "tuple", "without", "the", "separator", "." ]
b57cbf8ed7c089672426ede0441e0a4f789ef4a1
https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/util/__init__.py#L199-L206
train
220,827
RedHatInsights/insights-core
insights/formats/text.py
HumanReadableFormat.progress_bar
def progress_bar(self, c, broker): """ Print the formated progress information for the processed return types """ v = broker.get(c) if v and isinstance(v, dict) and len(v) > 0 and 'type' in v: if v["type"] in self.responses: print(self.responses[v["type"]].color + self.responses[v["type"]].intl + Style.RESET_ALL, end="", file=self.stream) else: print(".", end="", file=self.stream) elif c in broker.exceptions: self.counts['exception'] += len(broker.exceptions[c]) print(Fore.RED + "E" + Style.RESET_ALL, end="", file=self.stream) return self
python
def progress_bar(self, c, broker): """ Print the formated progress information for the processed return types """ v = broker.get(c) if v and isinstance(v, dict) and len(v) > 0 and 'type' in v: if v["type"] in self.responses: print(self.responses[v["type"]].color + self.responses[v["type"]].intl + Style.RESET_ALL, end="", file=self.stream) else: print(".", end="", file=self.stream) elif c in broker.exceptions: self.counts['exception'] += len(broker.exceptions[c]) print(Fore.RED + "E" + Style.RESET_ALL, end="", file=self.stream) return self
[ "def", "progress_bar", "(", "self", ",", "c", ",", "broker", ")", ":", "v", "=", "broker", ".", "get", "(", "c", ")", "if", "v", "and", "isinstance", "(", "v", ",", "dict", ")", "and", "len", "(", "v", ")", ">", "0", "and", "'type'", "in", "v...
Print the formated progress information for the processed return types
[ "Print", "the", "formated", "progress", "information", "for", "the", "processed", "return", "types" ]
b57cbf8ed7c089672426ede0441e0a4f789ef4a1
https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/formats/text.py#L94-L108
train
220,828
RedHatInsights/insights-core
insights/formats/text.py
HumanReadableFormat.show_dropped
def show_dropped(self): """ Show dropped files """ ctx = _find_context(self.broker) if ctx and ctx.all_files: ds = self.broker.get_by_type(datasource) vals = [] for v in ds.values(): if isinstance(v, list): vals.extend(d.path for d in v) else: vals.append(v.path) dropped = set(ctx.all_files) - set(vals) pprint("Dropped Files:", stream=self.stream) pprint(dropped, indent=4, stream=self.stream)
python
def show_dropped(self): """ Show dropped files """ ctx = _find_context(self.broker) if ctx and ctx.all_files: ds = self.broker.get_by_type(datasource) vals = [] for v in ds.values(): if isinstance(v, list): vals.extend(d.path for d in v) else: vals.append(v.path) dropped = set(ctx.all_files) - set(vals) pprint("Dropped Files:", stream=self.stream) pprint(dropped, indent=4, stream=self.stream)
[ "def", "show_dropped", "(", "self", ")", ":", "ctx", "=", "_find_context", "(", "self", ".", "broker", ")", "if", "ctx", "and", "ctx", ".", "all_files", ":", "ds", "=", "self", ".", "broker", ".", "get_by_type", "(", "datasource", ")", "vals", "=", "...
Show dropped files
[ "Show", "dropped", "files" ]
b57cbf8ed7c089672426ede0441e0a4f789ef4a1
https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/formats/text.py#L118-L131
train
220,829
RedHatInsights/insights-core
insights/client/client.py
register
def register(config, pconn): """ Do registration using basic auth """ username = config.username password = config.password authmethod = config.authmethod auto_config = config.auto_config if not username and not password and not auto_config and authmethod == 'BASIC': logger.debug('Username and password must be defined in configuration file with BASIC authentication method.') return False return pconn.register()
python
def register(config, pconn): """ Do registration using basic auth """ username = config.username password = config.password authmethod = config.authmethod auto_config = config.auto_config if not username and not password and not auto_config and authmethod == 'BASIC': logger.debug('Username and password must be defined in configuration file with BASIC authentication method.') return False return pconn.register()
[ "def", "register", "(", "config", ",", "pconn", ")", ":", "username", "=", "config", ".", "username", "password", "=", "config", ".", "password", "authmethod", "=", "config", ".", "authmethod", "auto_config", "=", "config", ".", "auto_config", "if", "not", ...
Do registration using basic auth
[ "Do", "registration", "using", "basic", "auth" ]
b57cbf8ed7c089672426ede0441e0a4f789ef4a1
https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/client/client.py#L93-L104
train
220,830
RedHatInsights/insights-core
insights/client/collection_rules.py
InsightsUploadConf.validate_gpg_sig
def validate_gpg_sig(self, path, sig=None): """ Validate the collection rules """ logger.debug("Verifying GPG signature of Insights configuration") if sig is None: sig = path + ".asc" command = ("/usr/bin/gpg --no-default-keyring " "--keyring " + constants.pub_gpg_path + " --verify " + sig + " " + path) if not six.PY3: command = command.encode('utf-8', 'ignore') args = shlex.split(command) logger.debug("Executing: %s", args) proc = Popen( args, shell=False, stdout=PIPE, stderr=STDOUT, close_fds=True) stdout, stderr = proc.communicate() logger.debug("STDOUT: %s", stdout) logger.debug("STDERR: %s", stderr) logger.debug("Status: %s", proc.returncode) if proc.returncode: logger.error("ERROR: Unable to validate GPG signature: %s", path) return False else: logger.debug("GPG signature verified") return True
python
def validate_gpg_sig(self, path, sig=None): """ Validate the collection rules """ logger.debug("Verifying GPG signature of Insights configuration") if sig is None: sig = path + ".asc" command = ("/usr/bin/gpg --no-default-keyring " "--keyring " + constants.pub_gpg_path + " --verify " + sig + " " + path) if not six.PY3: command = command.encode('utf-8', 'ignore') args = shlex.split(command) logger.debug("Executing: %s", args) proc = Popen( args, shell=False, stdout=PIPE, stderr=STDOUT, close_fds=True) stdout, stderr = proc.communicate() logger.debug("STDOUT: %s", stdout) logger.debug("STDERR: %s", stderr) logger.debug("Status: %s", proc.returncode) if proc.returncode: logger.error("ERROR: Unable to validate GPG signature: %s", path) return False else: logger.debug("GPG signature verified") return True
[ "def", "validate_gpg_sig", "(", "self", ",", "path", ",", "sig", "=", "None", ")", ":", "logger", ".", "debug", "(", "\"Verifying GPG signature of Insights configuration\"", ")", "if", "sig", "is", "None", ":", "sig", "=", "path", "+", "\".asc\"", "command", ...
Validate the collection rules
[ "Validate", "the", "collection", "rules" ]
b57cbf8ed7c089672426ede0441e0a4f789ef4a1
https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/client/collection_rules.py#L46-L71
train
220,831
RedHatInsights/insights-core
insights/client/collection_rules.py
InsightsUploadConf.try_disk
def try_disk(self, path, gpg=True): """ Try to load json off disk """ if not os.path.isfile(path): return if not gpg or self.validate_gpg_sig(path): stream = open(path, 'r') json_stream = stream.read() if len(json_stream): try: json_config = json.loads(json_stream) return json_config except ValueError: logger.error("ERROR: Invalid JSON in %s", path) return False else: logger.warn("WARNING: %s was an empty file", path) return
python
def try_disk(self, path, gpg=True): """ Try to load json off disk """ if not os.path.isfile(path): return if not gpg or self.validate_gpg_sig(path): stream = open(path, 'r') json_stream = stream.read() if len(json_stream): try: json_config = json.loads(json_stream) return json_config except ValueError: logger.error("ERROR: Invalid JSON in %s", path) return False else: logger.warn("WARNING: %s was an empty file", path) return
[ "def", "try_disk", "(", "self", ",", "path", ",", "gpg", "=", "True", ")", ":", "if", "not", "os", ".", "path", ".", "isfile", "(", "path", ")", ":", "return", "if", "not", "gpg", "or", "self", ".", "validate_gpg_sig", "(", "path", ")", ":", "str...
Try to load json off disk
[ "Try", "to", "load", "json", "off", "disk" ]
b57cbf8ed7c089672426ede0441e0a4f789ef4a1
https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/client/collection_rules.py#L73-L92
train
220,832
RedHatInsights/insights-core
insights/client/collection_rules.py
InsightsUploadConf.get_collection_rules
def get_collection_rules(self, raw=False): """ Download the collection rules """ logger.debug("Attemping to download collection rules from %s", self.collection_rules_url) net_logger.info("GET %s", self.collection_rules_url) try: req = self.conn.session.get( self.collection_rules_url, headers=({'accept': 'text/plain'})) if req.status_code == 200: logger.debug("Successfully downloaded collection rules") json_response = NamedTemporaryFile() json_response.write(req.text.encode('utf-8')) json_response.file.flush() else: logger.error("ERROR: Could not download dynamic configuration") logger.error("Debug Info: \nConf status: %s", req.status_code) logger.error("Debug Info: \nConf message: %s", req.text) return None except requests.ConnectionError as e: logger.error( "ERROR: Could not download dynamic configuration: %s", e) return None if self.gpg: self.get_collection_rules_gpg(json_response) self.write_collection_data(self.collection_rules_file, req.text) if raw: return req.text else: return json.loads(req.text)
python
def get_collection_rules(self, raw=False): """ Download the collection rules """ logger.debug("Attemping to download collection rules from %s", self.collection_rules_url) net_logger.info("GET %s", self.collection_rules_url) try: req = self.conn.session.get( self.collection_rules_url, headers=({'accept': 'text/plain'})) if req.status_code == 200: logger.debug("Successfully downloaded collection rules") json_response = NamedTemporaryFile() json_response.write(req.text.encode('utf-8')) json_response.file.flush() else: logger.error("ERROR: Could not download dynamic configuration") logger.error("Debug Info: \nConf status: %s", req.status_code) logger.error("Debug Info: \nConf message: %s", req.text) return None except requests.ConnectionError as e: logger.error( "ERROR: Could not download dynamic configuration: %s", e) return None if self.gpg: self.get_collection_rules_gpg(json_response) self.write_collection_data(self.collection_rules_file, req.text) if raw: return req.text else: return json.loads(req.text)
[ "def", "get_collection_rules", "(", "self", ",", "raw", "=", "False", ")", ":", "logger", ".", "debug", "(", "\"Attemping to download collection rules from %s\"", ",", "self", ".", "collection_rules_url", ")", "net_logger", ".", "info", "(", "\"GET %s\"", ",", "se...
Download the collection rules
[ "Download", "the", "collection", "rules" ]
b57cbf8ed7c089672426ede0441e0a4f789ef4a1
https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/client/collection_rules.py#L94-L130
train
220,833
RedHatInsights/insights-core
insights/client/collection_rules.py
InsightsUploadConf.get_collection_rules_gpg
def get_collection_rules_gpg(self, collection_rules): """ Download the collection rules gpg signature """ sig_text = self.fetch_gpg() sig_response = NamedTemporaryFile(suffix=".asc") sig_response.write(sig_text.encode('utf-8')) sig_response.file.flush() self.validate_gpg_sig(collection_rules.name, sig_response.name) self.write_collection_data(self.collection_rules_file + ".asc", sig_text)
python
def get_collection_rules_gpg(self, collection_rules): """ Download the collection rules gpg signature """ sig_text = self.fetch_gpg() sig_response = NamedTemporaryFile(suffix=".asc") sig_response.write(sig_text.encode('utf-8')) sig_response.file.flush() self.validate_gpg_sig(collection_rules.name, sig_response.name) self.write_collection_data(self.collection_rules_file + ".asc", sig_text)
[ "def", "get_collection_rules_gpg", "(", "self", ",", "collection_rules", ")", ":", "sig_text", "=", "self", ".", "fetch_gpg", "(", ")", "sig_response", "=", "NamedTemporaryFile", "(", "suffix", "=", "\".asc\"", ")", "sig_response", ".", "write", "(", "sig_text",...
Download the collection rules gpg signature
[ "Download", "the", "collection", "rules", "gpg", "signature" ]
b57cbf8ed7c089672426ede0441e0a4f789ef4a1
https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/client/collection_rules.py#L149-L158
train
220,834
RedHatInsights/insights-core
insights/client/collection_rules.py
InsightsUploadConf.write_collection_data
def write_collection_data(self, path, data): """ Write collections rules to disk """ flags = os.O_WRONLY | os.O_CREAT | os.O_TRUNC fd = os.open(path, flags, 0o600) with os.fdopen(fd, 'w') as dyn_conf_file: dyn_conf_file.write(data)
python
def write_collection_data(self, path, data): """ Write collections rules to disk """ flags = os.O_WRONLY | os.O_CREAT | os.O_TRUNC fd = os.open(path, flags, 0o600) with os.fdopen(fd, 'w') as dyn_conf_file: dyn_conf_file.write(data)
[ "def", "write_collection_data", "(", "self", ",", "path", ",", "data", ")", ":", "flags", "=", "os", ".", "O_WRONLY", "|", "os", ".", "O_CREAT", "|", "os", ".", "O_TRUNC", "fd", "=", "os", ".", "open", "(", "path", ",", "flags", ",", "0o600", ")", ...
Write collections rules to disk
[ "Write", "collections", "rules", "to", "disk" ]
b57cbf8ed7c089672426ede0441e0a4f789ef4a1
https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/client/collection_rules.py#L160-L167
train
220,835
RedHatInsights/insights-core
insights/client/collection_rules.py
InsightsUploadConf.get_conf_file
def get_conf_file(self): """ Get config from local config file, first try cache, then fallback. """ for conf_file in [self.collection_rules_file, self.fallback_file]: logger.debug("trying to read conf from: " + conf_file) conf = self.try_disk(conf_file, self.gpg) if not conf: continue version = conf.get('version', None) if version is None: raise ValueError("ERROR: Could not find version in json") conf['file'] = conf_file logger.debug("Success reading config") logger.debug(json.dumps(conf)) return conf raise ValueError("ERROR: Unable to download conf or read it from disk!")
python
def get_conf_file(self): """ Get config from local config file, first try cache, then fallback. """ for conf_file in [self.collection_rules_file, self.fallback_file]: logger.debug("trying to read conf from: " + conf_file) conf = self.try_disk(conf_file, self.gpg) if not conf: continue version = conf.get('version', None) if version is None: raise ValueError("ERROR: Could not find version in json") conf['file'] = conf_file logger.debug("Success reading config") logger.debug(json.dumps(conf)) return conf raise ValueError("ERROR: Unable to download conf or read it from disk!")
[ "def", "get_conf_file", "(", "self", ")", ":", "for", "conf_file", "in", "[", "self", ".", "collection_rules_file", ",", "self", ".", "fallback_file", "]", ":", "logger", ".", "debug", "(", "\"trying to read conf from: \"", "+", "conf_file", ")", "conf", "=", ...
Get config from local config file, first try cache, then fallback.
[ "Get", "config", "from", "local", "config", "file", "first", "try", "cache", "then", "fallback", "." ]
b57cbf8ed7c089672426ede0441e0a4f789ef4a1
https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/client/collection_rules.py#L169-L189
train
220,836
RedHatInsights/insights-core
insights/client/collection_rules.py
InsightsUploadConf.get_conf_update
def get_conf_update(self): """ Get updated config from URL, fallback to local file if download fails. """ dyn_conf = self.get_collection_rules() if not dyn_conf: return self.get_conf_file() version = dyn_conf.get('version', None) if version is None: raise ValueError("ERROR: Could not find version in json") dyn_conf['file'] = self.collection_rules_file logger.debug("Success reading config") config_hash = hashlib.sha1(json.dumps(dyn_conf).encode('utf-8')).hexdigest() logger.debug('sha1 of config: %s', config_hash) return dyn_conf
python
def get_conf_update(self): """ Get updated config from URL, fallback to local file if download fails. """ dyn_conf = self.get_collection_rules() if not dyn_conf: return self.get_conf_file() version = dyn_conf.get('version', None) if version is None: raise ValueError("ERROR: Could not find version in json") dyn_conf['file'] = self.collection_rules_file logger.debug("Success reading config") config_hash = hashlib.sha1(json.dumps(dyn_conf).encode('utf-8')).hexdigest() logger.debug('sha1 of config: %s', config_hash) return dyn_conf
[ "def", "get_conf_update", "(", "self", ")", ":", "dyn_conf", "=", "self", ".", "get_collection_rules", "(", ")", "if", "not", "dyn_conf", ":", "return", "self", ".", "get_conf_file", "(", ")", "version", "=", "dyn_conf", ".", "get", "(", "'version'", ",", ...
Get updated config from URL, fallback to local file if download fails.
[ "Get", "updated", "config", "from", "URL", "fallback", "to", "local", "file", "if", "download", "fails", "." ]
b57cbf8ed7c089672426ede0441e0a4f789ef4a1
https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/client/collection_rules.py#L191-L208
train
220,837
RedHatInsights/insights-core
insights/client/collection_rules.py
InsightsUploadConf.get_rm_conf
def get_rm_conf(self): """ Get excluded files config from remove_file. """ if not os.path.isfile(self.remove_file): return None # Convert config object into dict parsedconfig = ConfigParser.RawConfigParser() parsedconfig.read(self.remove_file) rm_conf = {} for item, value in parsedconfig.items('remove'): if six.PY3: rm_conf[item] = value.strip().encode('utf-8').decode('unicode-escape').split(',') else: rm_conf[item] = value.strip().decode('string-escape').split(',') return rm_conf
python
def get_rm_conf(self): """ Get excluded files config from remove_file. """ if not os.path.isfile(self.remove_file): return None # Convert config object into dict parsedconfig = ConfigParser.RawConfigParser() parsedconfig.read(self.remove_file) rm_conf = {} for item, value in parsedconfig.items('remove'): if six.PY3: rm_conf[item] = value.strip().encode('utf-8').decode('unicode-escape').split(',') else: rm_conf[item] = value.strip().decode('string-escape').split(',') return rm_conf
[ "def", "get_rm_conf", "(", "self", ")", ":", "if", "not", "os", ".", "path", ".", "isfile", "(", "self", ".", "remove_file", ")", ":", "return", "None", "# Convert config object into dict", "parsedconfig", "=", "ConfigParser", ".", "RawConfigParser", "(", ")",...
Get excluded files config from remove_file.
[ "Get", "excluded", "files", "config", "from", "remove_file", "." ]
b57cbf8ed7c089672426ede0441e0a4f789ef4a1
https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/client/collection_rules.py#L210-L228
train
220,838
RedHatInsights/insights-core
insights/util/streams.py
stream
def stream(command, stdin=None, env=os.environ, timeout=None): """ Yields a generator of a command's output. For line oriented commands only. Args: command (str or list): a command without pipes. If it's not a list, ``shlex.split`` is applied. stdin (file like object): stream to use as the command's standard input. env (dict): The environment in which to execute the command. PATH should be defined. timeout (int): Amount of time in seconds to give the command to complete. The ``timeout`` utility must be installed to use this feature. Yields: The output stream for the command. It should typically be wrapped in a ``reader``. """ if not isinstance(command, list): command = shlex.split(command) cmd = which(command[0]) if cmd is None: path = env.get("PATH", "") raise Exception("Command [%s] not in PATH [%s]" % (command[0], path)) command[0] = cmd if timeout: if not timeout_command[0]: raise Exception("Timeout specified but timeout command not available.") command = timeout_command + [str(timeout)] + command output = None try: output = Popen(command, env=env, stdin=stdin, **stream_options) yield output.stdout finally: if output: output.wait()
python
def stream(command, stdin=None, env=os.environ, timeout=None): """ Yields a generator of a command's output. For line oriented commands only. Args: command (str or list): a command without pipes. If it's not a list, ``shlex.split`` is applied. stdin (file like object): stream to use as the command's standard input. env (dict): The environment in which to execute the command. PATH should be defined. timeout (int): Amount of time in seconds to give the command to complete. The ``timeout`` utility must be installed to use this feature. Yields: The output stream for the command. It should typically be wrapped in a ``reader``. """ if not isinstance(command, list): command = shlex.split(command) cmd = which(command[0]) if cmd is None: path = env.get("PATH", "") raise Exception("Command [%s] not in PATH [%s]" % (command[0], path)) command[0] = cmd if timeout: if not timeout_command[0]: raise Exception("Timeout specified but timeout command not available.") command = timeout_command + [str(timeout)] + command output = None try: output = Popen(command, env=env, stdin=stdin, **stream_options) yield output.stdout finally: if output: output.wait()
[ "def", "stream", "(", "command", ",", "stdin", "=", "None", ",", "env", "=", "os", ".", "environ", ",", "timeout", "=", "None", ")", ":", "if", "not", "isinstance", "(", "command", ",", "list", ")", ":", "command", "=", "shlex", ".", "split", "(", ...
Yields a generator of a command's output. For line oriented commands only. Args: command (str or list): a command without pipes. If it's not a list, ``shlex.split`` is applied. stdin (file like object): stream to use as the command's standard input. env (dict): The environment in which to execute the command. PATH should be defined. timeout (int): Amount of time in seconds to give the command to complete. The ``timeout`` utility must be installed to use this feature. Yields: The output stream for the command. It should typically be wrapped in a ``reader``.
[ "Yields", "a", "generator", "of", "a", "command", "s", "output", ".", "For", "line", "oriented", "commands", "only", "." ]
b57cbf8ed7c089672426ede0441e0a4f789ef4a1
https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/util/streams.py#L30-L68
train
220,839
RedHatInsights/insights-core
insights/util/streams.py
connect
def connect(*cmds, **kwargs): """ Connects multiple command streams together and yields the final stream. Args: cmds (list): list of commands to pipe together. Each command will be an input to ``stream``. stdin (file like object): stream to use as the first command's standard input. env (dict): The environment in which to execute the commands. PATH should be defined. timeout (int): Amount of time in seconds to give the pipeline to complete. The ``timeout`` utility must be installed to use this feature. Yields: The output stream for the final command in the pipeline. It should typically be wrapped in a ``reader``. """ stdin = kwargs.get("stdin") env = kwargs.get("env", os.environ) timeout = kwargs.get("timeout") end = len(cmds) - 1 @contextmanager def inner(idx, inp): with stream(cmds[idx], stdin=inp, env=env, timeout=timeout) as s: if idx == end: yield s else: with inner(idx + 1, s) as c: yield c with inner(0, stdin) as s: yield s
python
def connect(*cmds, **kwargs): """ Connects multiple command streams together and yields the final stream. Args: cmds (list): list of commands to pipe together. Each command will be an input to ``stream``. stdin (file like object): stream to use as the first command's standard input. env (dict): The environment in which to execute the commands. PATH should be defined. timeout (int): Amount of time in seconds to give the pipeline to complete. The ``timeout`` utility must be installed to use this feature. Yields: The output stream for the final command in the pipeline. It should typically be wrapped in a ``reader``. """ stdin = kwargs.get("stdin") env = kwargs.get("env", os.environ) timeout = kwargs.get("timeout") end = len(cmds) - 1 @contextmanager def inner(idx, inp): with stream(cmds[idx], stdin=inp, env=env, timeout=timeout) as s: if idx == end: yield s else: with inner(idx + 1, s) as c: yield c with inner(0, stdin) as s: yield s
[ "def", "connect", "(", "*", "cmds", ",", "*", "*", "kwargs", ")", ":", "stdin", "=", "kwargs", ".", "get", "(", "\"stdin\"", ")", "env", "=", "kwargs", ".", "get", "(", "\"env\"", ",", "os", ".", "environ", ")", "timeout", "=", "kwargs", ".", "ge...
Connects multiple command streams together and yields the final stream. Args: cmds (list): list of commands to pipe together. Each command will be an input to ``stream``. stdin (file like object): stream to use as the first command's standard input. env (dict): The environment in which to execute the commands. PATH should be defined. timeout (int): Amount of time in seconds to give the pipeline to complete. The ``timeout`` utility must be installed to use this feature. Yields: The output stream for the final command in the pipeline. It should typically be wrapped in a ``reader``.
[ "Connects", "multiple", "command", "streams", "together", "and", "yields", "the", "final", "stream", "." ]
b57cbf8ed7c089672426ede0441e0a4f789ef4a1
https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/util/streams.py#L72-L105
train
220,840
RedHatInsights/insights-core
insights/core/ls_parser.py
parse_non_selinux
def parse_non_selinux(parts): """ Parse part of an ls output line that isn't selinux. Args: parts (list): A four element list of strings representing the initial parts of an ls line after the permission bits. The parts are link count, owner, group, and everything else. Returns: A dict containing links, owner, group, date, and name. If the line represented a device, major and minor numbers are included. Otherwise, size is included. If the raw name was a symbolic link, link is included. """ links, owner, group, last = parts result = { "links": int(links), "owner": owner, "group": group, } # device numbers only go to 256. # If a comma is in the first four characters, the next two elements are # major and minor device numbers. Otherwise, the next element is the size. if "," in last[:4]: major, minor, rest = last.split(None, 2) result["major"] = int(major.rstrip(",")) result["minor"] = int(minor) else: size, rest = last.split(None, 1) result["size"] = int(size) # The date part is always 12 characters regardless of content. result["date"] = rest[:12] # Jump over the date and the following space to get the path part. path, link = parse_path(rest[13:]) result["name"] = path if link: result["link"] = link return result
python
def parse_non_selinux(parts): """ Parse part of an ls output line that isn't selinux. Args: parts (list): A four element list of strings representing the initial parts of an ls line after the permission bits. The parts are link count, owner, group, and everything else. Returns: A dict containing links, owner, group, date, and name. If the line represented a device, major and minor numbers are included. Otherwise, size is included. If the raw name was a symbolic link, link is included. """ links, owner, group, last = parts result = { "links": int(links), "owner": owner, "group": group, } # device numbers only go to 256. # If a comma is in the first four characters, the next two elements are # major and minor device numbers. Otherwise, the next element is the size. if "," in last[:4]: major, minor, rest = last.split(None, 2) result["major"] = int(major.rstrip(",")) result["minor"] = int(minor) else: size, rest = last.split(None, 1) result["size"] = int(size) # The date part is always 12 characters regardless of content. result["date"] = rest[:12] # Jump over the date and the following space to get the path part. path, link = parse_path(rest[13:]) result["name"] = path if link: result["link"] = link return result
[ "def", "parse_non_selinux", "(", "parts", ")", ":", "links", ",", "owner", ",", "group", ",", "last", "=", "parts", "result", "=", "{", "\"links\"", ":", "int", "(", "links", ")", ",", "\"owner\"", ":", "owner", ",", "\"group\"", ":", "group", ",", "...
Parse part of an ls output line that isn't selinux. Args: parts (list): A four element list of strings representing the initial parts of an ls line after the permission bits. The parts are link count, owner, group, and everything else. Returns: A dict containing links, owner, group, date, and name. If the line represented a device, major and minor numbers are included. Otherwise, size is included. If the raw name was a symbolic link, link is included.
[ "Parse", "part", "of", "an", "ls", "output", "line", "that", "isn", "t", "selinux", "." ]
b57cbf8ed7c089672426ede0441e0a4f789ef4a1
https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/core/ls_parser.py#L23-L65
train
220,841
RedHatInsights/insights-core
insights/core/ls_parser.py
parse_selinux
def parse_selinux(parts): """ Parse part of an ls output line that is selinux. Args: parts (list): A four element list of strings representing the initial parts of an ls line after the permission bits. The parts are owner group, selinux info, and the path. Returns: A dict containing owner, group, se_user, se_role, se_type, se_mls, and name. If the raw name was a symbolic link, link is always included. """ owner, group = parts[:2] selinux = parts[2].split(":") lsel = len(selinux) path, link = parse_path(parts[-1]) result = { "owner": owner, "group": group, "se_user": selinux[0], "se_role": selinux[1] if lsel > 1 else None, "se_type": selinux[2] if lsel > 2 else None, "se_mls": selinux[3] if lsel > 3 else None, "name": path } if link: result["link"] = link return result
python
def parse_selinux(parts): """ Parse part of an ls output line that is selinux. Args: parts (list): A four element list of strings representing the initial parts of an ls line after the permission bits. The parts are owner group, selinux info, and the path. Returns: A dict containing owner, group, se_user, se_role, se_type, se_mls, and name. If the raw name was a symbolic link, link is always included. """ owner, group = parts[:2] selinux = parts[2].split(":") lsel = len(selinux) path, link = parse_path(parts[-1]) result = { "owner": owner, "group": group, "se_user": selinux[0], "se_role": selinux[1] if lsel > 1 else None, "se_type": selinux[2] if lsel > 2 else None, "se_mls": selinux[3] if lsel > 3 else None, "name": path } if link: result["link"] = link return result
[ "def", "parse_selinux", "(", "parts", ")", ":", "owner", ",", "group", "=", "parts", "[", ":", "2", "]", "selinux", "=", "parts", "[", "2", "]", ".", "split", "(", "\":\"", ")", "lsel", "=", "len", "(", "selinux", ")", "path", ",", "link", "=", ...
Parse part of an ls output line that is selinux. Args: parts (list): A four element list of strings representing the initial parts of an ls line after the permission bits. The parts are owner group, selinux info, and the path. Returns: A dict containing owner, group, se_user, se_role, se_type, se_mls, and name. If the raw name was a symbolic link, link is always included.
[ "Parse", "part", "of", "an", "ls", "output", "line", "that", "is", "selinux", "." ]
b57cbf8ed7c089672426ede0441e0a4f789ef4a1
https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/core/ls_parser.py#L68-L98
train
220,842
RedHatInsights/insights-core
insights/core/ls_parser.py
parse
def parse(lines, root=None): """ Parses a list of lines from ls into dictionaries representing their components. Args: lines (list): A list of lines generated by ls. root (str): The directory name to be used for ls output stanzas that don't have a name. Returns: A dictionary representing the ls output. It's keyed by the path containing each ls stanza. """ doc = {} entries = [] name = None total = None for line in lines: line = line.strip() if not line: continue if line and line[0] == "/" and line[-1] == ":": if name is None: name = line[:-1] if entries: d = Directory(name, total or len(entries), entries) doc[root] = d total = None entries = [] else: d = Directory(name, total or len(entries), entries) doc[name or root] = d total = None entries = [] name = line[:-1] continue if line.startswith("total"): total = int(line.split(None, 1)[1]) continue entries.append(line) name = name or root doc[name] = Directory(name, total or len(entries), entries) return doc
python
def parse(lines, root=None): """ Parses a list of lines from ls into dictionaries representing their components. Args: lines (list): A list of lines generated by ls. root (str): The directory name to be used for ls output stanzas that don't have a name. Returns: A dictionary representing the ls output. It's keyed by the path containing each ls stanza. """ doc = {} entries = [] name = None total = None for line in lines: line = line.strip() if not line: continue if line and line[0] == "/" and line[-1] == ":": if name is None: name = line[:-1] if entries: d = Directory(name, total or len(entries), entries) doc[root] = d total = None entries = [] else: d = Directory(name, total or len(entries), entries) doc[name or root] = d total = None entries = [] name = line[:-1] continue if line.startswith("total"): total = int(line.split(None, 1)[1]) continue entries.append(line) name = name or root doc[name] = Directory(name, total or len(entries), entries) return doc
[ "def", "parse", "(", "lines", ",", "root", "=", "None", ")", ":", "doc", "=", "{", "}", "entries", "=", "[", "]", "name", "=", "None", "total", "=", "None", "for", "line", "in", "lines", ":", "line", "=", "line", ".", "strip", "(", ")", "if", ...
Parses a list of lines from ls into dictionaries representing their components. Args: lines (list): A list of lines generated by ls. root (str): The directory name to be used for ls output stanzas that don't have a name. Returns: A dictionary representing the ls output. It's keyed by the path containing each ls stanza.
[ "Parses", "a", "list", "of", "lines", "from", "ls", "into", "dictionaries", "representing", "their", "components", "." ]
b57cbf8ed7c089672426ede0441e0a4f789ef4a1
https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/core/ls_parser.py#L181-L224
train
220,843
RedHatInsights/insights-core
insights/parsers/__init__.py
get_active_lines
def get_active_lines(lines, comment_char="#"): """ Returns lines, or parts of lines, from content that are not commented out or completely empty. The resulting lines are all individually stripped. This is useful for parsing many config files such as ifcfg. Parameters: lines (list): List of strings to parse. comment_char (str): String indicating that all chars following are part of a comment and will be removed from the output. Returns: list: List of valid lines remaining in the input. Examples: >>> lines = [ ... 'First line', ... ' ', ... '# Comment line', ... 'Inline comment # comment', ... ' Whitespace ', ... 'Last line'] >>> get_active_lines(lines) ['First line', 'Inline comment', 'Whitespace', 'Last line'] """ return list(filter(None, (line.split(comment_char, 1)[0].strip() for line in lines)))
python
def get_active_lines(lines, comment_char="#"): """ Returns lines, or parts of lines, from content that are not commented out or completely empty. The resulting lines are all individually stripped. This is useful for parsing many config files such as ifcfg. Parameters: lines (list): List of strings to parse. comment_char (str): String indicating that all chars following are part of a comment and will be removed from the output. Returns: list: List of valid lines remaining in the input. Examples: >>> lines = [ ... 'First line', ... ' ', ... '# Comment line', ... 'Inline comment # comment', ... ' Whitespace ', ... 'Last line'] >>> get_active_lines(lines) ['First line', 'Inline comment', 'Whitespace', 'Last line'] """ return list(filter(None, (line.split(comment_char, 1)[0].strip() for line in lines)))
[ "def", "get_active_lines", "(", "lines", ",", "comment_char", "=", "\"#\"", ")", ":", "return", "list", "(", "filter", "(", "None", ",", "(", "line", ".", "split", "(", "comment_char", ",", "1", ")", "[", "0", "]", ".", "strip", "(", ")", "for", "l...
Returns lines, or parts of lines, from content that are not commented out or completely empty. The resulting lines are all individually stripped. This is useful for parsing many config files such as ifcfg. Parameters: lines (list): List of strings to parse. comment_char (str): String indicating that all chars following are part of a comment and will be removed from the output. Returns: list: List of valid lines remaining in the input. Examples: >>> lines = [ ... 'First line', ... ' ', ... '# Comment line', ... 'Inline comment # comment', ... ' Whitespace ', ... 'Last line'] >>> get_active_lines(lines) ['First line', 'Inline comment', 'Whitespace', 'Last line']
[ "Returns", "lines", "or", "parts", "of", "lines", "from", "content", "that", "are", "not", "commented", "out", "or", "completely", "empty", ".", "The", "resulting", "lines", "are", "all", "individually", "stripped", "." ]
b57cbf8ed7c089672426ede0441e0a4f789ef4a1
https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/parsers/__init__.py#L30-L56
train
220,844
RedHatInsights/insights-core
insights/parsers/__init__.py
optlist_to_dict
def optlist_to_dict(optlist, opt_sep=',', kv_sep='=', strip_quotes=False): """Parse an option list into a dictionary. Takes a list of options separated by ``opt_sep`` and places them into a dictionary with the default value of ``True``. If ``kv_sep`` option is specified then key/value options ``key=value`` are parsed. Useful for parsing options such as mount options in the format ``rw,ro,rsize=32168,xyz``. Parameters: optlist (str): String of options to parse. opt_sep (str): Separater used to split options. kv_sep (str): If not `None` then `optlist` includes key=value pairs to be split, and this str is used to split them. strip_quotes (bool): If set, will remove matching '"' and '"' characters from start and end of line. No quotes are removed from inside the string and mismatched quotes are not removed. Returns: dict: Returns a dictionary of names present in the list. If `kv_sep` is not `None` then the values will be the str on the right-hand side of `kv_sep`. If `kv_sep` is `None` then each key will have a default value of `True`. Examples: >>> optlist = 'rw,ro,rsize=32168,xyz' >>> optlist_to_dict(optlist) {'rw': True, 'ro': True, 'rsize': '32168', 'xyz': True} """ def make_kv(opt): if kv_sep is not None and kv_sep in opt: k, v = opt.split(kv_sep, 1) k = k.strip() if strip_quotes and v[0] in ('"', "'") and v[-1] == v[0]: return k, v[1:-1] else: return k, v else: return opt, True return dict(make_kv(opt) for opt in optlist.split(opt_sep))
python
def optlist_to_dict(optlist, opt_sep=',', kv_sep='=', strip_quotes=False): """Parse an option list into a dictionary. Takes a list of options separated by ``opt_sep`` and places them into a dictionary with the default value of ``True``. If ``kv_sep`` option is specified then key/value options ``key=value`` are parsed. Useful for parsing options such as mount options in the format ``rw,ro,rsize=32168,xyz``. Parameters: optlist (str): String of options to parse. opt_sep (str): Separater used to split options. kv_sep (str): If not `None` then `optlist` includes key=value pairs to be split, and this str is used to split them. strip_quotes (bool): If set, will remove matching '"' and '"' characters from start and end of line. No quotes are removed from inside the string and mismatched quotes are not removed. Returns: dict: Returns a dictionary of names present in the list. If `kv_sep` is not `None` then the values will be the str on the right-hand side of `kv_sep`. If `kv_sep` is `None` then each key will have a default value of `True`. Examples: >>> optlist = 'rw,ro,rsize=32168,xyz' >>> optlist_to_dict(optlist) {'rw': True, 'ro': True, 'rsize': '32168', 'xyz': True} """ def make_kv(opt): if kv_sep is not None and kv_sep in opt: k, v = opt.split(kv_sep, 1) k = k.strip() if strip_quotes and v[0] in ('"', "'") and v[-1] == v[0]: return k, v[1:-1] else: return k, v else: return opt, True return dict(make_kv(opt) for opt in optlist.split(opt_sep))
[ "def", "optlist_to_dict", "(", "optlist", ",", "opt_sep", "=", "','", ",", "kv_sep", "=", "'='", ",", "strip_quotes", "=", "False", ")", ":", "def", "make_kv", "(", "opt", ")", ":", "if", "kv_sep", "is", "not", "None", "and", "kv_sep", "in", "opt", "...
Parse an option list into a dictionary. Takes a list of options separated by ``opt_sep`` and places them into a dictionary with the default value of ``True``. If ``kv_sep`` option is specified then key/value options ``key=value`` are parsed. Useful for parsing options such as mount options in the format ``rw,ro,rsize=32168,xyz``. Parameters: optlist (str): String of options to parse. opt_sep (str): Separater used to split options. kv_sep (str): If not `None` then `optlist` includes key=value pairs to be split, and this str is used to split them. strip_quotes (bool): If set, will remove matching '"' and '"' characters from start and end of line. No quotes are removed from inside the string and mismatched quotes are not removed. Returns: dict: Returns a dictionary of names present in the list. If `kv_sep` is not `None` then the values will be the str on the right-hand side of `kv_sep`. If `kv_sep` is `None` then each key will have a default value of `True`. Examples: >>> optlist = 'rw,ro,rsize=32168,xyz' >>> optlist_to_dict(optlist) {'rw': True, 'ro': True, 'rsize': '32168', 'xyz': True}
[ "Parse", "an", "option", "list", "into", "a", "dictionary", "." ]
b57cbf8ed7c089672426ede0441e0a4f789ef4a1
https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/parsers/__init__.py#L59-L99
train
220,845
RedHatInsights/insights-core
insights/parsers/__init__.py
unsplit_lines
def unsplit_lines(lines, cont_char='\\', keep_cont_char=False): """Recombine lines having a continuation character at end. Generator that recombines lines in the list that have the char `cont_char` at the end of a line. If `cont_char` is found in a line then then next line will be appended to the current line, this will continue for multiple continuation lines until the next line is found with no continuation character at the end. All lines found will be combined and returned. If the `keep_cont_char` option is set to True, the continuation character will be left on the end of the line. Otherwise, by default, it is removed. Parameters: lines (list): List of strings to be evaluated. cont_char (char): Char to search for at end of line. Default is ``\\``. keep_cont_char (bool): Whether to keep the continuation on the end of the line. Defaults to False, which causes the continuation character to be removed. Yields: line (str): Yields unsplit lines Examples: >>> lines = ['Line one \\', ' line one part 2', 'Line two'] >>> list(unsplit_lines(lines)) ['Line one line one part 2', 'Line two'] >>> list(unsplit_lines(lines, cont_char='2')) ['Line one \\', ' line one part Line two'] >>> list(unsplit_lines(lines, keep_cont_char=True) ['Line one \ line one part 2', 'Line two'] """ unsplit_lines = [] for line in lines: line = line.rstrip() if line.endswith(cont_char): unsplit_lines.append(line if keep_cont_char else line[:-1]) else: yield ''.join(unsplit_lines) + line unsplit_lines = [] if unsplit_lines: yield ''.join(unsplit_lines)
python
def unsplit_lines(lines, cont_char='\\', keep_cont_char=False): """Recombine lines having a continuation character at end. Generator that recombines lines in the list that have the char `cont_char` at the end of a line. If `cont_char` is found in a line then then next line will be appended to the current line, this will continue for multiple continuation lines until the next line is found with no continuation character at the end. All lines found will be combined and returned. If the `keep_cont_char` option is set to True, the continuation character will be left on the end of the line. Otherwise, by default, it is removed. Parameters: lines (list): List of strings to be evaluated. cont_char (char): Char to search for at end of line. Default is ``\\``. keep_cont_char (bool): Whether to keep the continuation on the end of the line. Defaults to False, which causes the continuation character to be removed. Yields: line (str): Yields unsplit lines Examples: >>> lines = ['Line one \\', ' line one part 2', 'Line two'] >>> list(unsplit_lines(lines)) ['Line one line one part 2', 'Line two'] >>> list(unsplit_lines(lines, cont_char='2')) ['Line one \\', ' line one part Line two'] >>> list(unsplit_lines(lines, keep_cont_char=True) ['Line one \ line one part 2', 'Line two'] """ unsplit_lines = [] for line in lines: line = line.rstrip() if line.endswith(cont_char): unsplit_lines.append(line if keep_cont_char else line[:-1]) else: yield ''.join(unsplit_lines) + line unsplit_lines = [] if unsplit_lines: yield ''.join(unsplit_lines)
[ "def", "unsplit_lines", "(", "lines", ",", "cont_char", "=", "'\\\\'", ",", "keep_cont_char", "=", "False", ")", ":", "unsplit_lines", "=", "[", "]", "for", "line", "in", "lines", ":", "line", "=", "line", ".", "rstrip", "(", ")", "if", "line", ".", ...
Recombine lines having a continuation character at end. Generator that recombines lines in the list that have the char `cont_char` at the end of a line. If `cont_char` is found in a line then then next line will be appended to the current line, this will continue for multiple continuation lines until the next line is found with no continuation character at the end. All lines found will be combined and returned. If the `keep_cont_char` option is set to True, the continuation character will be left on the end of the line. Otherwise, by default, it is removed. Parameters: lines (list): List of strings to be evaluated. cont_char (char): Char to search for at end of line. Default is ``\\``. keep_cont_char (bool): Whether to keep the continuation on the end of the line. Defaults to False, which causes the continuation character to be removed. Yields: line (str): Yields unsplit lines Examples: >>> lines = ['Line one \\', ' line one part 2', 'Line two'] >>> list(unsplit_lines(lines)) ['Line one line one part 2', 'Line two'] >>> list(unsplit_lines(lines, cont_char='2')) ['Line one \\', ' line one part Line two'] >>> list(unsplit_lines(lines, keep_cont_char=True) ['Line one \ line one part 2', 'Line two']
[ "Recombine", "lines", "having", "a", "continuation", "character", "at", "end", "." ]
b57cbf8ed7c089672426ede0441e0a4f789ef4a1
https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/parsers/__init__.py#L179-L220
train
220,846
RedHatInsights/insights-core
insights/parsers/__init__.py
calc_offset
def calc_offset(lines, target, invert_search=False): """ Function to search for a line in a list starting with a target string. If `target` is `None` or an empty string then `0` is returned. This allows checking `target` here instead of having to check for an empty target in the calling function. Each line is stripped of leading spaces prior to comparison with each target however target is not stripped. See `parse_fixed_table` in this module for sample usage. Arguments: lines (list): List of strings. target (list): List of strings to search for at the beginning of any line in lines. invert_search (boolean): If `True` this flag causes the search to continue until the first line is found not matching anything in target. An empty line is implicitly included in target. Default is `False`. This would typically be used if trimming trailing lines off of a file by passing `reversed(lines)` as the `lines` argument. Returns: int: index into the `lines` indicating the location of `target`. If `target` is `None` or an empty string `0` is returned as the offset. If `invert_search` is `True` the index returned will point to the line after the last target was found. Raises: ValueError: Exception is raised if `target` string is specified and it was not found in the input lines. Examples: >>> lines = [ ... '# ', ... 'Warning line', ... 'Error line', ... ' data 1 line', ... ' data 2 line'] >>> target = ['data'] >>> calc_offset(lines, target) 3 >>> target = ['#', 'Warning', 'Error'] >>> calc_offset(lines, target, invert_search=True) 3 """ if target and target[0] is not None: for offset, line in enumerate(l.strip() for l in lines): found_any = any([line.startswith(t) for t in target]) if not invert_search and found_any: return offset elif invert_search and not(line == '' or found_any): return offset # If we get here then we didn't find any of the targets raise ValueError("Line containing '{}' was not found in table".format(','.join(target))) else: # If no target then return index 0 return 0
python
def calc_offset(lines, target, invert_search=False): """ Function to search for a line in a list starting with a target string. If `target` is `None` or an empty string then `0` is returned. This allows checking `target` here instead of having to check for an empty target in the calling function. Each line is stripped of leading spaces prior to comparison with each target however target is not stripped. See `parse_fixed_table` in this module for sample usage. Arguments: lines (list): List of strings. target (list): List of strings to search for at the beginning of any line in lines. invert_search (boolean): If `True` this flag causes the search to continue until the first line is found not matching anything in target. An empty line is implicitly included in target. Default is `False`. This would typically be used if trimming trailing lines off of a file by passing `reversed(lines)` as the `lines` argument. Returns: int: index into the `lines` indicating the location of `target`. If `target` is `None` or an empty string `0` is returned as the offset. If `invert_search` is `True` the index returned will point to the line after the last target was found. Raises: ValueError: Exception is raised if `target` string is specified and it was not found in the input lines. Examples: >>> lines = [ ... '# ', ... 'Warning line', ... 'Error line', ... ' data 1 line', ... ' data 2 line'] >>> target = ['data'] >>> calc_offset(lines, target) 3 >>> target = ['#', 'Warning', 'Error'] >>> calc_offset(lines, target, invert_search=True) 3 """ if target and target[0] is not None: for offset, line in enumerate(l.strip() for l in lines): found_any = any([line.startswith(t) for t in target]) if not invert_search and found_any: return offset elif invert_search and not(line == '' or found_any): return offset # If we get here then we didn't find any of the targets raise ValueError("Line containing '{}' was not found in table".format(','.join(target))) else: # If no target then return index 0 return 0
[ "def", "calc_offset", "(", "lines", ",", "target", ",", "invert_search", "=", "False", ")", ":", "if", "target", "and", "target", "[", "0", "]", "is", "not", "None", ":", "for", "offset", ",", "line", "in", "enumerate", "(", "l", ".", "strip", "(", ...
Function to search for a line in a list starting with a target string. If `target` is `None` or an empty string then `0` is returned. This allows checking `target` here instead of having to check for an empty target in the calling function. Each line is stripped of leading spaces prior to comparison with each target however target is not stripped. See `parse_fixed_table` in this module for sample usage. Arguments: lines (list): List of strings. target (list): List of strings to search for at the beginning of any line in lines. invert_search (boolean): If `True` this flag causes the search to continue until the first line is found not matching anything in target. An empty line is implicitly included in target. Default is `False`. This would typically be used if trimming trailing lines off of a file by passing `reversed(lines)` as the `lines` argument. Returns: int: index into the `lines` indicating the location of `target`. If `target` is `None` or an empty string `0` is returned as the offset. If `invert_search` is `True` the index returned will point to the line after the last target was found. Raises: ValueError: Exception is raised if `target` string is specified and it was not found in the input lines. Examples: >>> lines = [ ... '# ', ... 'Warning line', ... 'Error line', ... ' data 1 line', ... ' data 2 line'] >>> target = ['data'] >>> calc_offset(lines, target) 3 >>> target = ['#', 'Warning', 'Error'] >>> calc_offset(lines, target, invert_search=True) 3
[ "Function", "to", "search", "for", "a", "line", "in", "a", "list", "starting", "with", "a", "target", "string", ".", "If", "target", "is", "None", "or", "an", "empty", "string", "then", "0", "is", "returned", ".", "This", "allows", "checking", "target", ...
b57cbf8ed7c089672426ede0441e0a4f789ef4a1
https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/parsers/__init__.py#L223-L278
train
220,847
RedHatInsights/insights-core
insights/parsers/__init__.py
parse_fixed_table
def parse_fixed_table(table_lines, heading_ignore=[], header_substitute=[], trailing_ignore=[]): """ Function to parse table data containing column headings in the first row and data in fixed positions in each remaining row of table data. Table columns must not contain spaces within the column name. Column headings are assumed to be left justified and the column data width is the width of the heading label plus all whitespace to the right of the label. This function will handle blank columns. Arguments: table_lines (list): List of strings with the first line containing column headings separated by spaces, and the remaining lines containing table data in left justified format. heading_ignore (list): Optional list of strings to search for at beginning of line. All lines before this line will be ignored. If specified then it must be present in the file or `ValueError` will be raised. header_substitute (list): Optional list of tuples containing `(old_string_value, new_string_value)` to be used to modify header values. If whitespace is present in a column it must be replaced with non-whitespace characters in order for the table to be parsed correctly. trailing_ignore (list): Optional list of strings to look for at the end rows of the content. Lines starting with these strings will be ignored, thereby truncating the rows of data. Returns: list: Returns a list of dict for each row of column data. Dict keys are the column headings in the same case as input. Raises: ValueError: Raised if `heading_ignore` is specified and not found in `table_lines`. Sample input:: Column1 Column2 Column3 data1 data 2 data 3 data4 data5 data6 Examples: >>> table_data = parse_fixed_table(table_lines) >>> table_data [{'Column1': 'data1', 'Column2': 'data 2', 'Column3': 'data 3'}, {'Column1': 'data4', 'Column2': 'data5', 'Column3': 'data6'}] """ def calc_column_indices(line, headers): idx = [] for h in headers: i = idx[-1] + 1 if idx else 0 idx.append(line.index(h, i)) return idx first_line = calc_offset(table_lines, heading_ignore) try: last_line = len(table_lines) - calc_offset(reversed(table_lines), trailing_ignore, invert_search=True) except ValueError: last_line = len(table_lines) header = table_lines[first_line] if header_substitute: for old_val, new_val in header_substitute: header = header.replace(old_val, new_val) col_headers = header.strip().split() col_index = calc_column_indices(header, col_headers) table_data = [] for line in table_lines[first_line + 1:last_line]: col_data = dict( (col_headers[c], line[col_index[c]:col_index[c + 1]].strip()) for c in range(len(col_index) - 1) ) col_data[col_headers[-1]] = line[col_index[-1]:].strip() table_data.append(col_data) return table_data
python
def parse_fixed_table(table_lines, heading_ignore=[], header_substitute=[], trailing_ignore=[]): """ Function to parse table data containing column headings in the first row and data in fixed positions in each remaining row of table data. Table columns must not contain spaces within the column name. Column headings are assumed to be left justified and the column data width is the width of the heading label plus all whitespace to the right of the label. This function will handle blank columns. Arguments: table_lines (list): List of strings with the first line containing column headings separated by spaces, and the remaining lines containing table data in left justified format. heading_ignore (list): Optional list of strings to search for at beginning of line. All lines before this line will be ignored. If specified then it must be present in the file or `ValueError` will be raised. header_substitute (list): Optional list of tuples containing `(old_string_value, new_string_value)` to be used to modify header values. If whitespace is present in a column it must be replaced with non-whitespace characters in order for the table to be parsed correctly. trailing_ignore (list): Optional list of strings to look for at the end rows of the content. Lines starting with these strings will be ignored, thereby truncating the rows of data. Returns: list: Returns a list of dict for each row of column data. Dict keys are the column headings in the same case as input. Raises: ValueError: Raised if `heading_ignore` is specified and not found in `table_lines`. Sample input:: Column1 Column2 Column3 data1 data 2 data 3 data4 data5 data6 Examples: >>> table_data = parse_fixed_table(table_lines) >>> table_data [{'Column1': 'data1', 'Column2': 'data 2', 'Column3': 'data 3'}, {'Column1': 'data4', 'Column2': 'data5', 'Column3': 'data6'}] """ def calc_column_indices(line, headers): idx = [] for h in headers: i = idx[-1] + 1 if idx else 0 idx.append(line.index(h, i)) return idx first_line = calc_offset(table_lines, heading_ignore) try: last_line = len(table_lines) - calc_offset(reversed(table_lines), trailing_ignore, invert_search=True) except ValueError: last_line = len(table_lines) header = table_lines[first_line] if header_substitute: for old_val, new_val in header_substitute: header = header.replace(old_val, new_val) col_headers = header.strip().split() col_index = calc_column_indices(header, col_headers) table_data = [] for line in table_lines[first_line + 1:last_line]: col_data = dict( (col_headers[c], line[col_index[c]:col_index[c + 1]].strip()) for c in range(len(col_index) - 1) ) col_data[col_headers[-1]] = line[col_index[-1]:].strip() table_data.append(col_data) return table_data
[ "def", "parse_fixed_table", "(", "table_lines", ",", "heading_ignore", "=", "[", "]", ",", "header_substitute", "=", "[", "]", ",", "trailing_ignore", "=", "[", "]", ")", ":", "def", "calc_column_indices", "(", "line", ",", "headers", ")", ":", "idx", "=",...
Function to parse table data containing column headings in the first row and data in fixed positions in each remaining row of table data. Table columns must not contain spaces within the column name. Column headings are assumed to be left justified and the column data width is the width of the heading label plus all whitespace to the right of the label. This function will handle blank columns. Arguments: table_lines (list): List of strings with the first line containing column headings separated by spaces, and the remaining lines containing table data in left justified format. heading_ignore (list): Optional list of strings to search for at beginning of line. All lines before this line will be ignored. If specified then it must be present in the file or `ValueError` will be raised. header_substitute (list): Optional list of tuples containing `(old_string_value, new_string_value)` to be used to modify header values. If whitespace is present in a column it must be replaced with non-whitespace characters in order for the table to be parsed correctly. trailing_ignore (list): Optional list of strings to look for at the end rows of the content. Lines starting with these strings will be ignored, thereby truncating the rows of data. Returns: list: Returns a list of dict for each row of column data. Dict keys are the column headings in the same case as input. Raises: ValueError: Raised if `heading_ignore` is specified and not found in `table_lines`. Sample input:: Column1 Column2 Column3 data1 data 2 data 3 data4 data5 data6 Examples: >>> table_data = parse_fixed_table(table_lines) >>> table_data [{'Column1': 'data1', 'Column2': 'data 2', 'Column3': 'data 3'}, {'Column1': 'data4', 'Column2': 'data5', 'Column3': 'data6'}]
[ "Function", "to", "parse", "table", "data", "containing", "column", "headings", "in", "the", "first", "row", "and", "data", "in", "fixed", "positions", "in", "each", "remaining", "row", "of", "table", "data", ".", "Table", "columns", "must", "not", "contain"...
b57cbf8ed7c089672426ede0441e0a4f789ef4a1
https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/parsers/__init__.py#L281-L359
train
220,848
RedHatInsights/insights-core
insights/parsers/__init__.py
keyword_search
def keyword_search(rows, **kwargs): """ Takes a list of dictionaries and finds all the dictionaries where the keys and values match those found in the keyword arguments. Keys in the row data have ' ' and '-' replaced with '_', so they can match the keyword argument parsing. For example, the keyword argument 'fix_up_path' will match a key named 'fix-up path'. In addition, several suffixes can be added to the key name to do partial matching of values: * '__contains' will test whether the data value contains the given value. * '__startswith' tests if the data value starts with the given value * '__lower_value' compares the lower-case version of the data and given values. Arguments: rows (list): A list of dictionaries representing the data to be searched. **kwargs (dict): keyword-value pairs corresponding to the fields that need to be found and their required values in the data rows. Returns: (list): The list of rows that match the search keywords. If no keyword arguments are given, no rows are returned. Examples: >>> rows = [ ... {'domain': 'oracle', 'type': 'soft', 'item': 'nofile', 'value': 1024}, ... {'domain': 'oracle', 'type': 'hard', 'item': 'nofile', 'value': 65536}, ... {'domain': 'oracle', 'type': 'soft', 'item': 'stack', 'value': 10240}, ... {'domain': 'oracle', 'type': 'hard', 'item': 'stack', 'value': 3276}, ... {'domain': 'root', 'type': 'soft', 'item': 'nproc', 'value': -1}] ... >>> keyword_search(rows, domain='root') [{'domain': 'root', 'type': 'soft', 'item': 'nproc', 'value': -1}] >>> keyword_search(rows, item__contains='c') [{'domain': 'oracle', 'type': 'soft', 'item': 'stack', 'value': 10240}, {'domain': 'oracle', 'type': 'hard', 'item': 'stack', 'value': 3276}, {'domain': 'root', 'type': 'soft', 'item': 'nproc', 'value': -1}] >>> keyword_search(rows, domain__startswith='r') [{'domain': 'root', 'type': 'soft', 'item': 'nproc', 'value': -1}] """ results = [] if not kwargs: return results # Allows us to transform the key and do lookups like __contains and # __startswith matchers = { 'default': lambda s, v: s == v, 'contains': lambda s, v: v in s, 'startswith': lambda s, v: s.startswith(v), 'lower_value': lambda s, v: s.lower() == v.lower(), } def key_match(row, key, value): # Translate ' ' and '-' of keys in dict to '_' to match keyword arguments. my_row = {} for my_key, val in row.items(): my_row[my_key.replace(' ', '_').replace('-', '_')] = val matcher_fn = matchers['default'] if '__' in key: key, matcher = key.split('__', 1) if matcher not in matchers: # put key back the way we found it, matcher fn unchanged key = key + '__' + matcher else: matcher_fn = matchers[matcher] return key in my_row and matcher_fn(my_row[key], value) data = [] for row in rows: if all(map(lambda kv: key_match(row, kv[0], kv[1]), kwargs.items())): data.append(row) return data
python
def keyword_search(rows, **kwargs): """ Takes a list of dictionaries and finds all the dictionaries where the keys and values match those found in the keyword arguments. Keys in the row data have ' ' and '-' replaced with '_', so they can match the keyword argument parsing. For example, the keyword argument 'fix_up_path' will match a key named 'fix-up path'. In addition, several suffixes can be added to the key name to do partial matching of values: * '__contains' will test whether the data value contains the given value. * '__startswith' tests if the data value starts with the given value * '__lower_value' compares the lower-case version of the data and given values. Arguments: rows (list): A list of dictionaries representing the data to be searched. **kwargs (dict): keyword-value pairs corresponding to the fields that need to be found and their required values in the data rows. Returns: (list): The list of rows that match the search keywords. If no keyword arguments are given, no rows are returned. Examples: >>> rows = [ ... {'domain': 'oracle', 'type': 'soft', 'item': 'nofile', 'value': 1024}, ... {'domain': 'oracle', 'type': 'hard', 'item': 'nofile', 'value': 65536}, ... {'domain': 'oracle', 'type': 'soft', 'item': 'stack', 'value': 10240}, ... {'domain': 'oracle', 'type': 'hard', 'item': 'stack', 'value': 3276}, ... {'domain': 'root', 'type': 'soft', 'item': 'nproc', 'value': -1}] ... >>> keyword_search(rows, domain='root') [{'domain': 'root', 'type': 'soft', 'item': 'nproc', 'value': -1}] >>> keyword_search(rows, item__contains='c') [{'domain': 'oracle', 'type': 'soft', 'item': 'stack', 'value': 10240}, {'domain': 'oracle', 'type': 'hard', 'item': 'stack', 'value': 3276}, {'domain': 'root', 'type': 'soft', 'item': 'nproc', 'value': -1}] >>> keyword_search(rows, domain__startswith='r') [{'domain': 'root', 'type': 'soft', 'item': 'nproc', 'value': -1}] """ results = [] if not kwargs: return results # Allows us to transform the key and do lookups like __contains and # __startswith matchers = { 'default': lambda s, v: s == v, 'contains': lambda s, v: v in s, 'startswith': lambda s, v: s.startswith(v), 'lower_value': lambda s, v: s.lower() == v.lower(), } def key_match(row, key, value): # Translate ' ' and '-' of keys in dict to '_' to match keyword arguments. my_row = {} for my_key, val in row.items(): my_row[my_key.replace(' ', '_').replace('-', '_')] = val matcher_fn = matchers['default'] if '__' in key: key, matcher = key.split('__', 1) if matcher not in matchers: # put key back the way we found it, matcher fn unchanged key = key + '__' + matcher else: matcher_fn = matchers[matcher] return key in my_row and matcher_fn(my_row[key], value) data = [] for row in rows: if all(map(lambda kv: key_match(row, kv[0], kv[1]), kwargs.items())): data.append(row) return data
[ "def", "keyword_search", "(", "rows", ",", "*", "*", "kwargs", ")", ":", "results", "=", "[", "]", "if", "not", "kwargs", ":", "return", "results", "# Allows us to transform the key and do lookups like __contains and", "# __startswith", "matchers", "=", "{", "'defau...
Takes a list of dictionaries and finds all the dictionaries where the keys and values match those found in the keyword arguments. Keys in the row data have ' ' and '-' replaced with '_', so they can match the keyword argument parsing. For example, the keyword argument 'fix_up_path' will match a key named 'fix-up path'. In addition, several suffixes can be added to the key name to do partial matching of values: * '__contains' will test whether the data value contains the given value. * '__startswith' tests if the data value starts with the given value * '__lower_value' compares the lower-case version of the data and given values. Arguments: rows (list): A list of dictionaries representing the data to be searched. **kwargs (dict): keyword-value pairs corresponding to the fields that need to be found and their required values in the data rows. Returns: (list): The list of rows that match the search keywords. If no keyword arguments are given, no rows are returned. Examples: >>> rows = [ ... {'domain': 'oracle', 'type': 'soft', 'item': 'nofile', 'value': 1024}, ... {'domain': 'oracle', 'type': 'hard', 'item': 'nofile', 'value': 65536}, ... {'domain': 'oracle', 'type': 'soft', 'item': 'stack', 'value': 10240}, ... {'domain': 'oracle', 'type': 'hard', 'item': 'stack', 'value': 3276}, ... {'domain': 'root', 'type': 'soft', 'item': 'nproc', 'value': -1}] ... >>> keyword_search(rows, domain='root') [{'domain': 'root', 'type': 'soft', 'item': 'nproc', 'value': -1}] >>> keyword_search(rows, item__contains='c') [{'domain': 'oracle', 'type': 'soft', 'item': 'stack', 'value': 10240}, {'domain': 'oracle', 'type': 'hard', 'item': 'stack', 'value': 3276}, {'domain': 'root', 'type': 'soft', 'item': 'nproc', 'value': -1}] >>> keyword_search(rows, domain__startswith='r') [{'domain': 'root', 'type': 'soft', 'item': 'nproc', 'value': -1}]
[ "Takes", "a", "list", "of", "dictionaries", "and", "finds", "all", "the", "dictionaries", "where", "the", "keys", "and", "values", "match", "those", "found", "in", "the", "keyword", "arguments", "." ]
b57cbf8ed7c089672426ede0441e0a4f789ef4a1
https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/parsers/__init__.py#L451-L528
train
220,849
RedHatInsights/insights-core
insights/formats/_markdown.py
MarkdownFormat.count_exceptions
def count_exceptions(self, c, broker): """ Count exceptions as processing proceeds """ if c in broker.exceptions: self.counts['exception'] += len(broker.exceptions[c]) return self
python
def count_exceptions(self, c, broker): """ Count exceptions as processing proceeds """ if c in broker.exceptions: self.counts['exception'] += len(broker.exceptions[c]) return self
[ "def", "count_exceptions", "(", "self", ",", "c", ",", "broker", ")", ":", "if", "c", "in", "broker", ".", "exceptions", ":", "self", ".", "counts", "[", "'exception'", "]", "+=", "len", "(", "broker", ".", "exceptions", "[", "c", "]", ")", "return",...
Count exceptions as processing proceeds
[ "Count", "exceptions", "as", "processing", "proceeds" ]
b57cbf8ed7c089672426ede0441e0a4f789ef4a1
https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/formats/_markdown.py#L74-L80
train
220,850
RedHatInsights/insights-core
examples/cluster_rules/bash_version.py
bash_rule
def bash_rule(bash, hostnames): """ Cluster rule to process bash and hostname info ``bash`` and ``hostnames`` are Pandas DataFrames for the facts collected for each host in the cluster. See https://pandas.pydata.org/pandas-docs/stable/api.html#dataframe for information on available attributes and methods. Arguments: bash (pandas.DataFrame): Includes facts from ``bash_version`` fact with columns "name" and "version" and one row per host in the cluster. hostnames (pandas.DataFrame): Includes facts from ``get_hostname`` fact with column "hostname" and one row per host in the cluster. """ if isinstance(bash, dict): return make_fail('bash_rule', error_message="Run this rule with a cluster archive") return make_pass('bash_rule', bash=bash, hostname=hostnames)
python
def bash_rule(bash, hostnames): """ Cluster rule to process bash and hostname info ``bash`` and ``hostnames`` are Pandas DataFrames for the facts collected for each host in the cluster. See https://pandas.pydata.org/pandas-docs/stable/api.html#dataframe for information on available attributes and methods. Arguments: bash (pandas.DataFrame): Includes facts from ``bash_version`` fact with columns "name" and "version" and one row per host in the cluster. hostnames (pandas.DataFrame): Includes facts from ``get_hostname`` fact with column "hostname" and one row per host in the cluster. """ if isinstance(bash, dict): return make_fail('bash_rule', error_message="Run this rule with a cluster archive") return make_pass('bash_rule', bash=bash, hostname=hostnames)
[ "def", "bash_rule", "(", "bash", ",", "hostnames", ")", ":", "if", "isinstance", "(", "bash", ",", "dict", ")", ":", "return", "make_fail", "(", "'bash_rule'", ",", "error_message", "=", "\"Run this rule with a cluster archive\"", ")", "return", "make_pass", "("...
Cluster rule to process bash and hostname info ``bash`` and ``hostnames`` are Pandas DataFrames for the facts collected for each host in the cluster. See https://pandas.pydata.org/pandas-docs/stable/api.html#dataframe for information on available attributes and methods. Arguments: bash (pandas.DataFrame): Includes facts from ``bash_version`` fact with columns "name" and "version" and one row per host in the cluster. hostnames (pandas.DataFrame): Includes facts from ``get_hostname`` fact with column "hostname" and one row per host in the cluster.
[ "Cluster", "rule", "to", "process", "bash", "and", "hostname", "info" ]
b57cbf8ed7c089672426ede0441e0a4f789ef4a1
https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/examples/cluster_rules/bash_version.py#L37-L58
train
220,851
RedHatInsights/insights-core
insights/core/marshalling.py
Marshaller.marshal
def marshal(self, o, use_value_list=False): """ Packages the return from a parser for easy use in a rule. """ if o is None: return elif isinstance(o, dict): if use_value_list: for k, v in o.items(): o[k] = [v] return o elif isinstance(o, six.string_types): if use_value_list: return {o: [True]} else: return {o: True} else: raise TypeError("Marshaller doesn't support given type %s" % type(o))
python
def marshal(self, o, use_value_list=False): """ Packages the return from a parser for easy use in a rule. """ if o is None: return elif isinstance(o, dict): if use_value_list: for k, v in o.items(): o[k] = [v] return o elif isinstance(o, six.string_types): if use_value_list: return {o: [True]} else: return {o: True} else: raise TypeError("Marshaller doesn't support given type %s" % type(o))
[ "def", "marshal", "(", "self", ",", "o", ",", "use_value_list", "=", "False", ")", ":", "if", "o", "is", "None", ":", "return", "elif", "isinstance", "(", "o", ",", "dict", ")", ":", "if", "use_value_list", ":", "for", "k", ",", "v", "in", "o", "...
Packages the return from a parser for easy use in a rule.
[ "Packages", "the", "return", "from", "a", "parser", "for", "easy", "use", "in", "a", "rule", "." ]
b57cbf8ed7c089672426ede0441e0a4f789ef4a1
https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/core/marshalling.py#L19-L37
train
220,852
RedHatInsights/insights-core
insights/collect.py
load_manifest
def load_manifest(data): """ Helper for loading a manifest yaml doc. """ if isinstance(data, dict): return data doc = yaml.safe_load(data) if not isinstance(doc, dict): raise Exception("Manifest didn't result in dict.") return doc
python
def load_manifest(data): """ Helper for loading a manifest yaml doc. """ if isinstance(data, dict): return data doc = yaml.safe_load(data) if not isinstance(doc, dict): raise Exception("Manifest didn't result in dict.") return doc
[ "def", "load_manifest", "(", "data", ")", ":", "if", "isinstance", "(", "data", ",", "dict", ")", ":", "return", "data", "doc", "=", "yaml", ".", "safe_load", "(", "data", ")", "if", "not", "isinstance", "(", "doc", ",", "dict", ")", ":", "raise", ...
Helper for loading a manifest yaml doc.
[ "Helper", "for", "loading", "a", "manifest", "yaml", "doc", "." ]
b57cbf8ed7c089672426ede0441e0a4f789ef4a1
https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/collect.py#L112-L119
train
220,853
RedHatInsights/insights-core
insights/collect.py
create_context
def create_context(ctx): """ Loads and constructs the specified context with the specified arguments. If a '.' isn't in the class name, the 'insights.core.context' package is assumed. """ ctx_cls_name = ctx.get("class", "insights.core.context.HostContext") if "." not in ctx_cls_name: ctx_cls_name = "insights.core.context." + ctx_cls_name ctx_cls = dr.get_component(ctx_cls_name) ctx_args = ctx.get("args", {}) return ctx_cls(**ctx_args)
python
def create_context(ctx): """ Loads and constructs the specified context with the specified arguments. If a '.' isn't in the class name, the 'insights.core.context' package is assumed. """ ctx_cls_name = ctx.get("class", "insights.core.context.HostContext") if "." not in ctx_cls_name: ctx_cls_name = "insights.core.context." + ctx_cls_name ctx_cls = dr.get_component(ctx_cls_name) ctx_args = ctx.get("args", {}) return ctx_cls(**ctx_args)
[ "def", "create_context", "(", "ctx", ")", ":", "ctx_cls_name", "=", "ctx", ".", "get", "(", "\"class\"", ",", "\"insights.core.context.HostContext\"", ")", "if", "\".\"", "not", "in", "ctx_cls_name", ":", "ctx_cls_name", "=", "\"insights.core.context.\"", "+", "ct...
Loads and constructs the specified context with the specified arguments. If a '.' isn't in the class name, the 'insights.core.context' package is assumed.
[ "Loads", "and", "constructs", "the", "specified", "context", "with", "the", "specified", "arguments", ".", "If", "a", ".", "isn", "t", "in", "the", "class", "name", "the", "insights", ".", "core", ".", "context", "package", "is", "assumed", "." ]
b57cbf8ed7c089672426ede0441e0a4f789ef4a1
https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/collect.py#L141-L152
train
220,854
RedHatInsights/insights-core
insights/collect.py
get_to_persist
def get_to_persist(persisters): """ Given a specification of what to persist, generates the corresponding set of components. """ def specs(): for p in persisters: if isinstance(p, dict): yield p["name"], p.get("enabled", True) else: yield p, True components = sorted(dr.DELEGATES, key=dr.get_name) names = dict((c, dr.get_name(c)) for c in components) results = set() for p, e in specs(): for c in components: if names[c].startswith(p): if e: results.add(c) elif c in results: results.remove(c) return results
python
def get_to_persist(persisters): """ Given a specification of what to persist, generates the corresponding set of components. """ def specs(): for p in persisters: if isinstance(p, dict): yield p["name"], p.get("enabled", True) else: yield p, True components = sorted(dr.DELEGATES, key=dr.get_name) names = dict((c, dr.get_name(c)) for c in components) results = set() for p, e in specs(): for c in components: if names[c].startswith(p): if e: results.add(c) elif c in results: results.remove(c) return results
[ "def", "get_to_persist", "(", "persisters", ")", ":", "def", "specs", "(", ")", ":", "for", "p", "in", "persisters", ":", "if", "isinstance", "(", "p", ",", "dict", ")", ":", "yield", "p", "[", "\"name\"", "]", ",", "p", ".", "get", "(", "\"enabled...
Given a specification of what to persist, generates the corresponding set of components.
[ "Given", "a", "specification", "of", "what", "to", "persist", "generates", "the", "corresponding", "set", "of", "components", "." ]
b57cbf8ed7c089672426ede0441e0a4f789ef4a1
https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/collect.py#L155-L178
train
220,855
RedHatInsights/insights-core
insights/collect.py
create_archive
def create_archive(path, remove_path=True): """ Creates a tar.gz of the path using the path basename + "tar.gz" The resulting file is in the parent directory of the original path, and the original path is removed. """ root_path = os.path.dirname(path) relative_path = os.path.basename(path) archive_path = path + ".tar.gz" cmd = [["tar", "-C", root_path, "-czf", archive_path, relative_path]] call(cmd, env=SAFE_ENV) if remove_path: fs.remove(path) return archive_path
python
def create_archive(path, remove_path=True): """ Creates a tar.gz of the path using the path basename + "tar.gz" The resulting file is in the parent directory of the original path, and the original path is removed. """ root_path = os.path.dirname(path) relative_path = os.path.basename(path) archive_path = path + ".tar.gz" cmd = [["tar", "-C", root_path, "-czf", archive_path, relative_path]] call(cmd, env=SAFE_ENV) if remove_path: fs.remove(path) return archive_path
[ "def", "create_archive", "(", "path", ",", "remove_path", "=", "True", ")", ":", "root_path", "=", "os", ".", "path", ".", "dirname", "(", "path", ")", "relative_path", "=", "os", ".", "path", ".", "basename", "(", "path", ")", "archive_path", "=", "pa...
Creates a tar.gz of the path using the path basename + "tar.gz" The resulting file is in the parent directory of the original path, and the original path is removed.
[ "Creates", "a", "tar", ".", "gz", "of", "the", "path", "using", "the", "path", "basename", "+", "tar", ".", "gz", "The", "resulting", "file", "is", "in", "the", "parent", "directory", "of", "the", "original", "path", "and", "the", "original", "path", "...
b57cbf8ed7c089672426ede0441e0a4f789ef4a1
https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/collect.py#L181-L195
train
220,856
RedHatInsights/insights-core
insights/collect.py
collect
def collect(manifest=default_manifest, tmp_path=None, compress=False): """ This is the collection entry point. It accepts a manifest, a temporary directory in which to store output, and a boolean for optional compression. Args: manifest (str or dict): json document or dictionary containing the collection manifest. See default_manifest for an example. tmp_path (str): The temporary directory that will be used to create a working directory for storing component output as well as the final tar.gz if one is generated. compress (boolean): True to create a tar.gz and remove the original workspace containing output. False to leave the workspace without creating a tar.gz Returns: The full path to the created tar.gz or workspace. """ manifest = load_manifest(manifest) client = manifest.get("client", {}) plugins = manifest.get("plugins", {}) run_strategy = client.get("run_strategy", {"name": "parallel"}) apply_default_enabled(plugins.get("default_component_enabled", False)) load_packages(plugins.get("packages", [])) apply_blacklist(client.get("blacklist", {})) apply_configs(plugins) to_persist = get_to_persist(client.get("persist", set())) hostname = call("hostname -f", env=SAFE_ENV).strip() suffix = datetime.utcnow().strftime("%Y%m%d%H%M%S") relative_path = "insights-%s-%s" % (hostname, suffix) tmp_path = tmp_path or tempfile.gettempdir() output_path = os.path.join(tmp_path, relative_path) fs.ensure_path(output_path) fs.touch(os.path.join(output_path, "insights_archive.txt")) broker = dr.Broker() ctx = create_context(client.get("context", {})) broker[ctx.__class__] = ctx parallel = run_strategy.get("name") == "parallel" pool_args = run_strategy.get("args", {}) with get_pool(parallel, pool_args) as pool: h = Hydration(output_path, pool=pool) broker.add_observer(h.make_persister(to_persist)) dr.run_all(broker=broker, pool=pool) if compress: return create_archive(output_path) return output_path
python
def collect(manifest=default_manifest, tmp_path=None, compress=False): """ This is the collection entry point. It accepts a manifest, a temporary directory in which to store output, and a boolean for optional compression. Args: manifest (str or dict): json document or dictionary containing the collection manifest. See default_manifest for an example. tmp_path (str): The temporary directory that will be used to create a working directory for storing component output as well as the final tar.gz if one is generated. compress (boolean): True to create a tar.gz and remove the original workspace containing output. False to leave the workspace without creating a tar.gz Returns: The full path to the created tar.gz or workspace. """ manifest = load_manifest(manifest) client = manifest.get("client", {}) plugins = manifest.get("plugins", {}) run_strategy = client.get("run_strategy", {"name": "parallel"}) apply_default_enabled(plugins.get("default_component_enabled", False)) load_packages(plugins.get("packages", [])) apply_blacklist(client.get("blacklist", {})) apply_configs(plugins) to_persist = get_to_persist(client.get("persist", set())) hostname = call("hostname -f", env=SAFE_ENV).strip() suffix = datetime.utcnow().strftime("%Y%m%d%H%M%S") relative_path = "insights-%s-%s" % (hostname, suffix) tmp_path = tmp_path or tempfile.gettempdir() output_path = os.path.join(tmp_path, relative_path) fs.ensure_path(output_path) fs.touch(os.path.join(output_path, "insights_archive.txt")) broker = dr.Broker() ctx = create_context(client.get("context", {})) broker[ctx.__class__] = ctx parallel = run_strategy.get("name") == "parallel" pool_args = run_strategy.get("args", {}) with get_pool(parallel, pool_args) as pool: h = Hydration(output_path, pool=pool) broker.add_observer(h.make_persister(to_persist)) dr.run_all(broker=broker, pool=pool) if compress: return create_archive(output_path) return output_path
[ "def", "collect", "(", "manifest", "=", "default_manifest", ",", "tmp_path", "=", "None", ",", "compress", "=", "False", ")", ":", "manifest", "=", "load_manifest", "(", "manifest", ")", "client", "=", "manifest", ".", "get", "(", "\"client\"", ",", "{", ...
This is the collection entry point. It accepts a manifest, a temporary directory in which to store output, and a boolean for optional compression. Args: manifest (str or dict): json document or dictionary containing the collection manifest. See default_manifest for an example. tmp_path (str): The temporary directory that will be used to create a working directory for storing component output as well as the final tar.gz if one is generated. compress (boolean): True to create a tar.gz and remove the original workspace containing output. False to leave the workspace without creating a tar.gz Returns: The full path to the created tar.gz or workspace.
[ "This", "is", "the", "collection", "entry", "point", ".", "It", "accepts", "a", "manifest", "a", "temporary", "directory", "in", "which", "to", "store", "output", "and", "a", "boolean", "for", "optional", "compression", "." ]
b57cbf8ed7c089672426ede0441e0a4f789ef4a1
https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/collect.py#L217-L269
train
220,857
RedHatInsights/insights-core
insights/__init__.py
_run
def _run(broker, graph=None, root=None, context=None, inventory=None): """ run is a general interface that is meant for stand alone scripts to use when executing insights components. Args: root (str): None will causes a host collection in which command and file specs are run. A directory or archive path will cause collection from the directory or archive, and only file type specs or those that depend on `insights.core.context.HostArchiveContext` will execute. component (function or class): The component to execute. Will only execute the component and its dependency graph. If None, all components with met dependencies will execute. Returns: broker: object containing the result of the evaluation. """ if not root: context = context or HostContext broker[context] = context() return dr.run(graph, broker=broker) if os.path.isdir(root): return process_dir(broker, root, graph, context, inventory=inventory) else: with extract(root) as ex: return process_dir(broker, ex.tmp_dir, graph, context, inventory=inventory)
python
def _run(broker, graph=None, root=None, context=None, inventory=None): """ run is a general interface that is meant for stand alone scripts to use when executing insights components. Args: root (str): None will causes a host collection in which command and file specs are run. A directory or archive path will cause collection from the directory or archive, and only file type specs or those that depend on `insights.core.context.HostArchiveContext` will execute. component (function or class): The component to execute. Will only execute the component and its dependency graph. If None, all components with met dependencies will execute. Returns: broker: object containing the result of the evaluation. """ if not root: context = context or HostContext broker[context] = context() return dr.run(graph, broker=broker) if os.path.isdir(root): return process_dir(broker, root, graph, context, inventory=inventory) else: with extract(root) as ex: return process_dir(broker, ex.tmp_dir, graph, context, inventory=inventory)
[ "def", "_run", "(", "broker", ",", "graph", "=", "None", ",", "root", "=", "None", ",", "context", "=", "None", ",", "inventory", "=", "None", ")", ":", "if", "not", "root", ":", "context", "=", "context", "or", "HostContext", "broker", "[", "context...
run is a general interface that is meant for stand alone scripts to use when executing insights components. Args: root (str): None will causes a host collection in which command and file specs are run. A directory or archive path will cause collection from the directory or archive, and only file type specs or those that depend on `insights.core.context.HostArchiveContext` will execute. component (function or class): The component to execute. Will only execute the component and its dependency graph. If None, all components with met dependencies will execute. Returns: broker: object containing the result of the evaluation.
[ "run", "is", "a", "general", "interface", "that", "is", "meant", "for", "stand", "alone", "scripts", "to", "use", "when", "executing", "insights", "components", "." ]
b57cbf8ed7c089672426ede0441e0a4f789ef4a1
https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/__init__.py#L98-L126
train
220,858
RedHatInsights/insights-core
insights/__init__.py
apply_default_enabled
def apply_default_enabled(default_enabled): """ Configures dr and already loaded components with a default enabled value. """ for k in dr.ENABLED: dr.ENABLED[k] = default_enabled enabled = defaultdict(lambda: default_enabled) enabled.update(dr.ENABLED) dr.ENABLED = enabled
python
def apply_default_enabled(default_enabled): """ Configures dr and already loaded components with a default enabled value. """ for k in dr.ENABLED: dr.ENABLED[k] = default_enabled enabled = defaultdict(lambda: default_enabled) enabled.update(dr.ENABLED) dr.ENABLED = enabled
[ "def", "apply_default_enabled", "(", "default_enabled", ")", ":", "for", "k", "in", "dr", ".", "ENABLED", ":", "dr", ".", "ENABLED", "[", "k", "]", "=", "default_enabled", "enabled", "=", "defaultdict", "(", "lambda", ":", "default_enabled", ")", "enabled", ...
Configures dr and already loaded components with a default enabled value.
[ "Configures", "dr", "and", "already", "loaded", "components", "with", "a", "default", "enabled", "value", "." ]
b57cbf8ed7c089672426ede0441e0a4f789ef4a1
https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/__init__.py#L158-L168
train
220,859
RedHatInsights/insights-core
insights/__init__.py
apply_configs
def apply_configs(config): """ Configures components. They can be enabled or disabled, have timeouts set if applicable, and have metadata customized. Valid keys are name, enabled, metadata, and timeout. Args: config (list): a list of dictionaries with the following keys: default_component_enabled (bool): default value for whether compoments are enable if not specifically declared in the config section packages (list): a list of packages to be loaded. These will be in addition to any packages previosly loaded for the `-p` option configs: name, enabled, metadata, and timeout. All keys are optional except name. name is the prefix or exact name of any loaded component. Any component starting with name will have the associated configuration applied. enabled is whether the matching components will execute even if their dependencies are met. Defaults to True. timeout sets the class level timeout attribute of any component so long as the attribute already exists. metadata is any dictionary that you want to attach to the component. The dictionary can be retrieved by the component at runtime. """ default_enabled = config.get('default_component_enabled', False) delegate_keys = sorted(dr.DELEGATES, key=dr.get_name) for comp_cfg in config.get('configs', []): name = comp_cfg.get("name") for c in delegate_keys: delegate = dr.DELEGATES[c] cname = dr.get_name(c) if cname.startswith(name): dr.ENABLED[c] = comp_cfg.get("enabled", default_enabled) delegate.metadata.update(comp_cfg.get("metadata", {})) delegate.tags = set(comp_cfg.get("tags", delegate.tags)) for k, v in delegate.metadata.items(): if hasattr(c, k): log.debug("Setting %s.%s to %s", cname, k, v) setattr(c, k, v) if hasattr(c, "timeout"): c.timeout = comp_cfg.get("timeout", c.timeout) if cname == name: break
python
def apply_configs(config): """ Configures components. They can be enabled or disabled, have timeouts set if applicable, and have metadata customized. Valid keys are name, enabled, metadata, and timeout. Args: config (list): a list of dictionaries with the following keys: default_component_enabled (bool): default value for whether compoments are enable if not specifically declared in the config section packages (list): a list of packages to be loaded. These will be in addition to any packages previosly loaded for the `-p` option configs: name, enabled, metadata, and timeout. All keys are optional except name. name is the prefix or exact name of any loaded component. Any component starting with name will have the associated configuration applied. enabled is whether the matching components will execute even if their dependencies are met. Defaults to True. timeout sets the class level timeout attribute of any component so long as the attribute already exists. metadata is any dictionary that you want to attach to the component. The dictionary can be retrieved by the component at runtime. """ default_enabled = config.get('default_component_enabled', False) delegate_keys = sorted(dr.DELEGATES, key=dr.get_name) for comp_cfg in config.get('configs', []): name = comp_cfg.get("name") for c in delegate_keys: delegate = dr.DELEGATES[c] cname = dr.get_name(c) if cname.startswith(name): dr.ENABLED[c] = comp_cfg.get("enabled", default_enabled) delegate.metadata.update(comp_cfg.get("metadata", {})) delegate.tags = set(comp_cfg.get("tags", delegate.tags)) for k, v in delegate.metadata.items(): if hasattr(c, k): log.debug("Setting %s.%s to %s", cname, k, v) setattr(c, k, v) if hasattr(c, "timeout"): c.timeout = comp_cfg.get("timeout", c.timeout) if cname == name: break
[ "def", "apply_configs", "(", "config", ")", ":", "default_enabled", "=", "config", ".", "get", "(", "'default_component_enabled'", ",", "False", ")", "delegate_keys", "=", "sorted", "(", "dr", ".", "DELEGATES", ",", "key", "=", "dr", ".", "get_name", ")", ...
Configures components. They can be enabled or disabled, have timeouts set if applicable, and have metadata customized. Valid keys are name, enabled, metadata, and timeout. Args: config (list): a list of dictionaries with the following keys: default_component_enabled (bool): default value for whether compoments are enable if not specifically declared in the config section packages (list): a list of packages to be loaded. These will be in addition to any packages previosly loaded for the `-p` option configs: name, enabled, metadata, and timeout. All keys are optional except name. name is the prefix or exact name of any loaded component. Any component starting with name will have the associated configuration applied. enabled is whether the matching components will execute even if their dependencies are met. Defaults to True. timeout sets the class level timeout attribute of any component so long as the attribute already exists. metadata is any dictionary that you want to attach to the component. The dictionary can be retrieved by the component at runtime.
[ "Configures", "components", ".", "They", "can", "be", "enabled", "or", "disabled", "have", "timeouts", "set", "if", "applicable", "and", "have", "metadata", "customized", ".", "Valid", "keys", "are", "name", "enabled", "metadata", "and", "timeout", "." ]
b57cbf8ed7c089672426ede0441e0a4f789ef4a1
https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/__init__.py#L171-L221
train
220,860
RedHatInsights/insights-core
insights/contrib/ConfigParser.py
RawConfigParser._read
def _read(self, fp, fpname): """Parse a sectioned setup file. The sections in setup file contains a title line at the top, indicated by a name in square brackets (`[]'), plus key/value options lines, indicated by `name: value' format lines. Continuations are represented by an embedded newline then leading whitespace. Blank lines, lines beginning with a '#', and just about everything else are ignored. """ cursect = None # None, or a dictionary optname = None lineno = 0 e = None # None, or an exception while True: line = fp.readline() if not line: break lineno = lineno + 1 # comment or blank line? if line.strip() == '' or line[0] in '#;': continue if line.split(None, 1)[0].lower() == 'rem' and line[0] in "rR": # no leading whitespace continue # continuation line? if line[0].isspace() and cursect is not None and optname: value = line.strip() if value: cursect[optname].append(value) # a section header or option header? else: # is it a section header? mo = self.SECTCRE.match(line) if mo: sectname = mo.group('header') if sectname in self._sections: cursect = self._sections[sectname] elif sectname == DEFAULTSECT: cursect = self._defaults else: cursect = self._dict() cursect['__name__'] = sectname self._sections[sectname] = cursect # So sections can't start with a continuation line optname = None # no section header in the file? elif cursect is None: raise MissingSectionHeaderError(fpname, lineno, line) # an option line? else: mo = self._optcre.match(line) if mo: optname, vi, optval = mo.group('option', 'vi', 'value') optname = self.optionxform(optname.rstrip()) # This check is fine because the OPTCRE cannot # match if it would set optval to None if optval is not None: if vi in ('=', ':') and ';' in optval: # ';' is a comment delimiter only if it follows # a spacing character pos = optval.find(';') if pos != -1 and optval[pos-1].isspace(): optval = optval[:pos] optval = optval.strip() # allow empty values if optval == '""': optval = '' cursect[optname] = [optval] else: # valueless option handling cursect[optname] = optval else: # a non-fatal parsing error occurred. set up the # exception but keep going. the exception will be # raised at the end of the file and will contain a # list of all bogus lines if not e: e = ParsingError(fpname) e.append(lineno, repr(line)) # if any parsing errors occurred, raise an exception if e: raise e # join the multi-line values collected while reading all_sections = [self._defaults] all_sections.extend(self._sections.values()) for options in all_sections: for name, val in options.items(): if isinstance(val, list): options[name] = '\n'.join(val)
python
def _read(self, fp, fpname): """Parse a sectioned setup file. The sections in setup file contains a title line at the top, indicated by a name in square brackets (`[]'), plus key/value options lines, indicated by `name: value' format lines. Continuations are represented by an embedded newline then leading whitespace. Blank lines, lines beginning with a '#', and just about everything else are ignored. """ cursect = None # None, or a dictionary optname = None lineno = 0 e = None # None, or an exception while True: line = fp.readline() if not line: break lineno = lineno + 1 # comment or blank line? if line.strip() == '' or line[0] in '#;': continue if line.split(None, 1)[0].lower() == 'rem' and line[0] in "rR": # no leading whitespace continue # continuation line? if line[0].isspace() and cursect is not None and optname: value = line.strip() if value: cursect[optname].append(value) # a section header or option header? else: # is it a section header? mo = self.SECTCRE.match(line) if mo: sectname = mo.group('header') if sectname in self._sections: cursect = self._sections[sectname] elif sectname == DEFAULTSECT: cursect = self._defaults else: cursect = self._dict() cursect['__name__'] = sectname self._sections[sectname] = cursect # So sections can't start with a continuation line optname = None # no section header in the file? elif cursect is None: raise MissingSectionHeaderError(fpname, lineno, line) # an option line? else: mo = self._optcre.match(line) if mo: optname, vi, optval = mo.group('option', 'vi', 'value') optname = self.optionxform(optname.rstrip()) # This check is fine because the OPTCRE cannot # match if it would set optval to None if optval is not None: if vi in ('=', ':') and ';' in optval: # ';' is a comment delimiter only if it follows # a spacing character pos = optval.find(';') if pos != -1 and optval[pos-1].isspace(): optval = optval[:pos] optval = optval.strip() # allow empty values if optval == '""': optval = '' cursect[optname] = [optval] else: # valueless option handling cursect[optname] = optval else: # a non-fatal parsing error occurred. set up the # exception but keep going. the exception will be # raised at the end of the file and will contain a # list of all bogus lines if not e: e = ParsingError(fpname) e.append(lineno, repr(line)) # if any parsing errors occurred, raise an exception if e: raise e # join the multi-line values collected while reading all_sections = [self._defaults] all_sections.extend(self._sections.values()) for options in all_sections: for name, val in options.items(): if isinstance(val, list): options[name] = '\n'.join(val)
[ "def", "_read", "(", "self", ",", "fp", ",", "fpname", ")", ":", "cursect", "=", "None", "# None, or a dictionary", "optname", "=", "None", "lineno", "=", "0", "e", "=", "None", "# None, or an exception", "while", "True", ":", "line", "=", "fp", ".", "re...
Parse a sectioned setup file. The sections in setup file contains a title line at the top, indicated by a name in square brackets (`[]'), plus key/value options lines, indicated by `name: value' format lines. Continuations are represented by an embedded newline then leading whitespace. Blank lines, lines beginning with a '#', and just about everything else are ignored.
[ "Parse", "a", "sectioned", "setup", "file", "." ]
b57cbf8ed7c089672426ede0441e0a4f789ef4a1
https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/contrib/ConfigParser.py#L464-L554
train
220,861
RedHatInsights/insights-core
insights/client/utilities.py
determine_hostname
def determine_hostname(display_name=None): """ Find fqdn if we can """ if display_name: # if display_name is provided, just return the given name return display_name else: socket_gethostname = socket.gethostname() socket_fqdn = socket.getfqdn() try: socket_ex = socket.gethostbyname_ex(socket_gethostname)[0] except (LookupError, socket.gaierror): socket_ex = '' gethostname_len = len(socket_gethostname) fqdn_len = len(socket_fqdn) ex_len = len(socket_ex) if fqdn_len > gethostname_len or ex_len > gethostname_len: if "localhost" not in socket_ex and len(socket_ex): return socket_ex if "localhost" not in socket_fqdn: return socket_fqdn return socket_gethostname
python
def determine_hostname(display_name=None): """ Find fqdn if we can """ if display_name: # if display_name is provided, just return the given name return display_name else: socket_gethostname = socket.gethostname() socket_fqdn = socket.getfqdn() try: socket_ex = socket.gethostbyname_ex(socket_gethostname)[0] except (LookupError, socket.gaierror): socket_ex = '' gethostname_len = len(socket_gethostname) fqdn_len = len(socket_fqdn) ex_len = len(socket_ex) if fqdn_len > gethostname_len or ex_len > gethostname_len: if "localhost" not in socket_ex and len(socket_ex): return socket_ex if "localhost" not in socket_fqdn: return socket_fqdn return socket_gethostname
[ "def", "determine_hostname", "(", "display_name", "=", "None", ")", ":", "if", "display_name", ":", "# if display_name is provided, just return the given name", "return", "display_name", "else", ":", "socket_gethostname", "=", "socket", ".", "gethostname", "(", ")", "so...
Find fqdn if we can
[ "Find", "fqdn", "if", "we", "can" ]
b57cbf8ed7c089672426ede0441e0a4f789ef4a1
https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/client/utilities.py#L21-L47
train
220,862
RedHatInsights/insights-core
insights/client/utilities.py
write_unregistered_file
def write_unregistered_file(date=None): """ Write .unregistered out to disk """ delete_registered_file() if date is None: date = get_time() for f in constants.unregistered_files: if os.path.lexists(f): if os.path.islink(f): # kill symlinks and regenerate os.remove(f) write_to_disk(f, content=str(date)) else: write_to_disk(f, content=str(date))
python
def write_unregistered_file(date=None): """ Write .unregistered out to disk """ delete_registered_file() if date is None: date = get_time() for f in constants.unregistered_files: if os.path.lexists(f): if os.path.islink(f): # kill symlinks and regenerate os.remove(f) write_to_disk(f, content=str(date)) else: write_to_disk(f, content=str(date))
[ "def", "write_unregistered_file", "(", "date", "=", "None", ")", ":", "delete_registered_file", "(", ")", "if", "date", "is", "None", ":", "date", "=", "get_time", "(", ")", "for", "f", "in", "constants", ".", "unregistered_files", ":", "if", "os", ".", ...
Write .unregistered out to disk
[ "Write", ".", "unregistered", "out", "to", "disk" ]
b57cbf8ed7c089672426ede0441e0a4f789ef4a1
https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/client/utilities.py#L66-L80
train
220,863
RedHatInsights/insights-core
insights/client/utilities.py
write_to_disk
def write_to_disk(filename, delete=False, content=get_time()): """ Write filename out to disk """ if not os.path.exists(os.path.dirname(filename)): return if delete: if os.path.lexists(filename): os.remove(filename) else: with open(filename, 'wb') as f: f.write(content.encode('utf-8'))
python
def write_to_disk(filename, delete=False, content=get_time()): """ Write filename out to disk """ if not os.path.exists(os.path.dirname(filename)): return if delete: if os.path.lexists(filename): os.remove(filename) else: with open(filename, 'wb') as f: f.write(content.encode('utf-8'))
[ "def", "write_to_disk", "(", "filename", ",", "delete", "=", "False", ",", "content", "=", "get_time", "(", ")", ")", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "os", ".", "path", ".", "dirname", "(", "filename", ")", ")", ":", "retur...
Write filename out to disk
[ "Write", "filename", "out", "to", "disk" ]
b57cbf8ed7c089672426ede0441e0a4f789ef4a1
https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/client/utilities.py#L93-L104
train
220,864
RedHatInsights/insights-core
insights/client/utilities.py
_expand_paths
def _expand_paths(path): """ Expand wildcarded paths """ dir_name = os.path.dirname(path) paths = [] logger.debug("Attempting to expand %s", path) if os.path.isdir(dir_name): files = os.listdir(dir_name) match = os.path.basename(path) for file_path in files: if re.match(match, file_path): expanded_path = os.path.join(dir_name, file_path) paths.append(expanded_path) logger.debug("Expanded paths %s", paths) return paths else: logger.debug("Could not expand %s", path)
python
def _expand_paths(path): """ Expand wildcarded paths """ dir_name = os.path.dirname(path) paths = [] logger.debug("Attempting to expand %s", path) if os.path.isdir(dir_name): files = os.listdir(dir_name) match = os.path.basename(path) for file_path in files: if re.match(match, file_path): expanded_path = os.path.join(dir_name, file_path) paths.append(expanded_path) logger.debug("Expanded paths %s", paths) return paths else: logger.debug("Could not expand %s", path)
[ "def", "_expand_paths", "(", "path", ")", ":", "dir_name", "=", "os", ".", "path", ".", "dirname", "(", "path", ")", "paths", "=", "[", "]", "logger", ".", "debug", "(", "\"Attempting to expand %s\"", ",", "path", ")", "if", "os", ".", "path", ".", "...
Expand wildcarded paths
[ "Expand", "wildcarded", "paths" ]
b57cbf8ed7c089672426ede0441e0a4f789ef4a1
https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/client/utilities.py#L129-L146
train
220,865
RedHatInsights/insights-core
insights/client/utilities.py
validate_remove_file
def validate_remove_file(remove_file): """ Validate the remove file """ if not os.path.isfile(remove_file): logger.warn("WARN: Remove file does not exist") return False # Make sure permissions are 600 mode = stat.S_IMODE(os.stat(remove_file).st_mode) if not mode == 0o600: logger.error("ERROR: Invalid remove file permissions" "Expected 0600 got %s" % oct(mode)) return False else: logger.debug("Correct file permissions") if os.path.isfile(remove_file): parsedconfig = RawConfigParser() parsedconfig.read(remove_file) rm_conf = {} for item, value in parsedconfig.items('remove'): rm_conf[item] = value.strip().split(',') # Using print here as this could contain sensitive information logger.debug("Remove file parsed contents") logger.debug(rm_conf) logger.info("JSON parsed correctly") return True
python
def validate_remove_file(remove_file): """ Validate the remove file """ if not os.path.isfile(remove_file): logger.warn("WARN: Remove file does not exist") return False # Make sure permissions are 600 mode = stat.S_IMODE(os.stat(remove_file).st_mode) if not mode == 0o600: logger.error("ERROR: Invalid remove file permissions" "Expected 0600 got %s" % oct(mode)) return False else: logger.debug("Correct file permissions") if os.path.isfile(remove_file): parsedconfig = RawConfigParser() parsedconfig.read(remove_file) rm_conf = {} for item, value in parsedconfig.items('remove'): rm_conf[item] = value.strip().split(',') # Using print here as this could contain sensitive information logger.debug("Remove file parsed contents") logger.debug(rm_conf) logger.info("JSON parsed correctly") return True
[ "def", "validate_remove_file", "(", "remove_file", ")", ":", "if", "not", "os", ".", "path", ".", "isfile", "(", "remove_file", ")", ":", "logger", ".", "warn", "(", "\"WARN: Remove file does not exist\"", ")", "return", "False", "# Make sure permissions are 600", ...
Validate the remove file
[ "Validate", "the", "remove", "file" ]
b57cbf8ed7c089672426ede0441e0a4f789ef4a1
https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/client/utilities.py#L149-L175
train
220,866
RedHatInsights/insights-core
insights/client/utilities.py
write_data_to_file
def write_data_to_file(data, filepath): ''' Write data to file ''' try: os.makedirs(os.path.dirname(filepath), 0o700) except OSError: pass write_to_disk(filepath, content=data)
python
def write_data_to_file(data, filepath): ''' Write data to file ''' try: os.makedirs(os.path.dirname(filepath), 0o700) except OSError: pass write_to_disk(filepath, content=data)
[ "def", "write_data_to_file", "(", "data", ",", "filepath", ")", ":", "try", ":", "os", ".", "makedirs", "(", "os", ".", "path", ".", "dirname", "(", "filepath", ")", ",", "0o700", ")", "except", "OSError", ":", "pass", "write_to_disk", "(", "filepath", ...
Write data to file
[ "Write", "data", "to", "file" ]
b57cbf8ed7c089672426ede0441e0a4f789ef4a1
https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/client/utilities.py#L178-L187
train
220,867
RedHatInsights/insights-core
insights/client/utilities.py
magic_plan_b
def magic_plan_b(filename): ''' Use this in instances where python-magic is MIA and can't be installed for whatever reason ''' cmd = shlex.split('file --mime-type --mime-encoding ' + filename) stdout, stderr = Popen(cmd, stdout=PIPE).communicate() stdout = stdout.decode("utf-8") mime_str = stdout.split(filename + ': ')[1].strip() return mime_str
python
def magic_plan_b(filename): ''' Use this in instances where python-magic is MIA and can't be installed for whatever reason ''' cmd = shlex.split('file --mime-type --mime-encoding ' + filename) stdout, stderr = Popen(cmd, stdout=PIPE).communicate() stdout = stdout.decode("utf-8") mime_str = stdout.split(filename + ': ')[1].strip() return mime_str
[ "def", "magic_plan_b", "(", "filename", ")", ":", "cmd", "=", "shlex", ".", "split", "(", "'file --mime-type --mime-encoding '", "+", "filename", ")", "stdout", ",", "stderr", "=", "Popen", "(", "cmd", ",", "stdout", "=", "PIPE", ")", ".", "communicate", "...
Use this in instances where python-magic is MIA and can't be installed for whatever reason
[ "Use", "this", "in", "instances", "where", "python", "-", "magic", "is", "MIA", "and", "can", "t", "be", "installed", "for", "whatever", "reason" ]
b57cbf8ed7c089672426ede0441e0a4f789ef4a1
https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/client/utilities.py#L190-L200
train
220,868
RedHatInsights/insights-core
insights/client/utilities.py
modify_config_file
def modify_config_file(updates): ''' Update the config file with certain things ''' cmd = '/bin/sed ' for key in updates: cmd = cmd + '-e \'s/^#*{key}.*=.*$/{key}={value}/\' '.format(key=key, value=updates[key]) cmd = cmd + constants.default_conf_file status = run_command_get_output(cmd) write_to_disk(constants.default_conf_file, content=status['output'])
python
def modify_config_file(updates): ''' Update the config file with certain things ''' cmd = '/bin/sed ' for key in updates: cmd = cmd + '-e \'s/^#*{key}.*=.*$/{key}={value}/\' '.format(key=key, value=updates[key]) cmd = cmd + constants.default_conf_file status = run_command_get_output(cmd) write_to_disk(constants.default_conf_file, content=status['output'])
[ "def", "modify_config_file", "(", "updates", ")", ":", "cmd", "=", "'/bin/sed '", "for", "key", "in", "updates", ":", "cmd", "=", "cmd", "+", "'-e \\'s/^#*{key}.*=.*$/{key}={value}/\\' '", ".", "format", "(", "key", "=", "key", ",", "value", "=", "updates", ...
Update the config file with certain things
[ "Update", "the", "config", "file", "with", "certain", "things" ]
b57cbf8ed7c089672426ede0441e0a4f789ef4a1
https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/client/utilities.py#L214-L223
train
220,869
RedHatInsights/insights-core
insights/parsers/ps.py
Ps.users
def users(self, proc): """ Searches for all users running a given command. Returns: dict: each username as a key to a list of PIDs (as strings) that are running the given process. ``{}`` if neither ``USER`` nor ``UID`` is found or ``proc`` is not found. .. note:: 'proc' must match the entire command and arguments. """ ret = {} if self.first_column in ['USER', 'UID']: for row in self.data: if proc == row[self.command_name]: if row[self.first_column] not in ret: ret[row[self.first_column]] = [] ret[row[self.first_column]].append(row["PID"]) return ret
python
def users(self, proc): """ Searches for all users running a given command. Returns: dict: each username as a key to a list of PIDs (as strings) that are running the given process. ``{}`` if neither ``USER`` nor ``UID`` is found or ``proc`` is not found. .. note:: 'proc' must match the entire command and arguments. """ ret = {} if self.first_column in ['USER', 'UID']: for row in self.data: if proc == row[self.command_name]: if row[self.first_column] not in ret: ret[row[self.first_column]] = [] ret[row[self.first_column]].append(row["PID"]) return ret
[ "def", "users", "(", "self", ",", "proc", ")", ":", "ret", "=", "{", "}", "if", "self", ".", "first_column", "in", "[", "'USER'", ",", "'UID'", "]", ":", "for", "row", "in", "self", ".", "data", ":", "if", "proc", "==", "row", "[", "self", ".",...
Searches for all users running a given command. Returns: dict: each username as a key to a list of PIDs (as strings) that are running the given process. ``{}`` if neither ``USER`` nor ``UID`` is found or ``proc`` is not found. .. note:: 'proc' must match the entire command and arguments.
[ "Searches", "for", "all", "users", "running", "a", "given", "command", "." ]
b57cbf8ed7c089672426ede0441e0a4f789ef4a1
https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/parsers/ps.py#L104-L123
train
220,870
RedHatInsights/insights-core
insights/parsers/ps.py
Ps.fuzzy_match
def fuzzy_match(self, proc): """ Are there any commands that contain the given text? Returns: boolean: ``True`` if the word ``proc`` appears in the command column. .. note:: 'proc' can match anywhere in the command path, name or arguments. """ return any(proc in row[self.command_name] for row in self.data)
python
def fuzzy_match(self, proc): """ Are there any commands that contain the given text? Returns: boolean: ``True`` if the word ``proc`` appears in the command column. .. note:: 'proc' can match anywhere in the command path, name or arguments. """ return any(proc in row[self.command_name] for row in self.data)
[ "def", "fuzzy_match", "(", "self", ",", "proc", ")", ":", "return", "any", "(", "proc", "in", "row", "[", "self", ".", "command_name", "]", "for", "row", "in", "self", ".", "data", ")" ]
Are there any commands that contain the given text? Returns: boolean: ``True`` if the word ``proc`` appears in the command column. .. note:: 'proc' can match anywhere in the command path, name or arguments.
[ "Are", "there", "any", "commands", "that", "contain", "the", "given", "text?" ]
b57cbf8ed7c089672426ede0441e0a4f789ef4a1
https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/parsers/ps.py#L125-L135
train
220,871
RedHatInsights/insights-core
insights/parsers/ps.py
Ps.number_occurences
def number_occurences(self, proc): """ Returns the number of occurencies of commands that contain given text Returns: int: The number of occurencies of commands with given text .. note:: 'proc' can match anywhere in the command path, name or arguments. """ return len([True for row in self.data if proc in row[self.command_name]])
python
def number_occurences(self, proc): """ Returns the number of occurencies of commands that contain given text Returns: int: The number of occurencies of commands with given text .. note:: 'proc' can match anywhere in the command path, name or arguments. """ return len([True for row in self.data if proc in row[self.command_name]])
[ "def", "number_occurences", "(", "self", ",", "proc", ")", ":", "return", "len", "(", "[", "True", "for", "row", "in", "self", ".", "data", "if", "proc", "in", "row", "[", "self", ".", "command_name", "]", "]", ")" ]
Returns the number of occurencies of commands that contain given text Returns: int: The number of occurencies of commands with given text .. note:: 'proc' can match anywhere in the command path, name or arguments.
[ "Returns", "the", "number", "of", "occurencies", "of", "commands", "that", "contain", "given", "text" ]
b57cbf8ed7c089672426ede0441e0a4f789ef4a1
https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/parsers/ps.py#L137-L147
train
220,872
RedHatInsights/insights-core
insights/parsers/journald_conf.py
JournaldConf.parse_content
def parse_content(self, content): """ Main parsing class method which stores all interesting data from the content. Args: content (context.content): Parser context content """ # note, the Parser class sets: # * self.file_path = context.path and # * self.file_name = os.path.basename(context.path) self.active_lines_unparsed = get_active_lines(content) if content is not None else [] # (man page shows all options with "=") self.active_settings = split_kv_pairs(content, use_partition=False) if content is not None else []
python
def parse_content(self, content): """ Main parsing class method which stores all interesting data from the content. Args: content (context.content): Parser context content """ # note, the Parser class sets: # * self.file_path = context.path and # * self.file_name = os.path.basename(context.path) self.active_lines_unparsed = get_active_lines(content) if content is not None else [] # (man page shows all options with "=") self.active_settings = split_kv_pairs(content, use_partition=False) if content is not None else []
[ "def", "parse_content", "(", "self", ",", "content", ")", ":", "# note, the Parser class sets:", "# * self.file_path = context.path and", "# * self.file_name = os.path.basename(context.path)", "self", ".", "active_lines_unparsed", "=", "get_active_lines", "(", "content", ")", "...
Main parsing class method which stores all interesting data from the content. Args: content (context.content): Parser context content
[ "Main", "parsing", "class", "method", "which", "stores", "all", "interesting", "data", "from", "the", "content", "." ]
b57cbf8ed7c089672426ede0441e0a4f789ef4a1
https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/parsers/journald_conf.py#L54-L66
train
220,873
RedHatInsights/insights-core
examples/cluster_rules/allnodes_cpu.py
cluster_info
def cluster_info(cpu, cfg): """ Collects fact for each host Collects the cpu and node configuration facts to be used by the rule. Arguments: cpu (CpuInfo): Parser object for the cpu info. cfg (NodeConfig): Parser object for the node configuration. Returns: dict: Dictionary of fact information including the keys ``cpu_count``, ``pods_per_core_int``, ``pods_per_core_customized``, ``max_pods``, and ``max_pods_customized``. """ cpus = cpu.cpu_count pods_per_core = cfg.doc.find("pods-per-core") pods_per_core_int = int(pods_per_core.value) if pods_per_core else PODS_PER_CORE cfg_max_pods = cfg.doc.find("max-pods") cfg_max_pods_int = int(cfg_max_pods.value) if cfg_max_pods else MAX_PODS calc_max_pods = cpus * pods_per_core_int return { "cpu_count": cpus, "pods_per_core": pods_per_core_int, "pods_per_core_customized": bool(pods_per_core), "max_pods": min(cfg_max_pods_int, calc_max_pods), "max_pods_customized": bool(cfg_max_pods) }
python
def cluster_info(cpu, cfg): """ Collects fact for each host Collects the cpu and node configuration facts to be used by the rule. Arguments: cpu (CpuInfo): Parser object for the cpu info. cfg (NodeConfig): Parser object for the node configuration. Returns: dict: Dictionary of fact information including the keys ``cpu_count``, ``pods_per_core_int``, ``pods_per_core_customized``, ``max_pods``, and ``max_pods_customized``. """ cpus = cpu.cpu_count pods_per_core = cfg.doc.find("pods-per-core") pods_per_core_int = int(pods_per_core.value) if pods_per_core else PODS_PER_CORE cfg_max_pods = cfg.doc.find("max-pods") cfg_max_pods_int = int(cfg_max_pods.value) if cfg_max_pods else MAX_PODS calc_max_pods = cpus * pods_per_core_int return { "cpu_count": cpus, "pods_per_core": pods_per_core_int, "pods_per_core_customized": bool(pods_per_core), "max_pods": min(cfg_max_pods_int, calc_max_pods), "max_pods_customized": bool(cfg_max_pods) }
[ "def", "cluster_info", "(", "cpu", ",", "cfg", ")", ":", "cpus", "=", "cpu", ".", "cpu_count", "pods_per_core", "=", "cfg", ".", "doc", ".", "find", "(", "\"pods-per-core\"", ")", "pods_per_core_int", "=", "int", "(", "pods_per_core", ".", "value", ")", ...
Collects fact for each host Collects the cpu and node configuration facts to be used by the rule. Arguments: cpu (CpuInfo): Parser object for the cpu info. cfg (NodeConfig): Parser object for the node configuration. Returns: dict: Dictionary of fact information including the keys ``cpu_count``, ``pods_per_core_int``, ``pods_per_core_customized``, ``max_pods``, and ``max_pods_customized``.
[ "Collects", "fact", "for", "each", "host" ]
b57cbf8ed7c089672426ede0441e0a4f789ef4a1
https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/examples/cluster_rules/allnodes_cpu.py#L53-L81
train
220,874
RedHatInsights/insights-core
examples/cluster_rules/allnodes_cpu.py
master_etcd
def master_etcd(info, meta, max_pod_cluster, label): """ Function used to create the response for all master node types """ nodes = meta.get(label, []) or [] info = info[info["machine_id"].isin(nodes)] if info.empty: return cpu_factor = max_pod_cluster / 1000.0 nocpu_expected = MASTER_MIN_CORE + (max_pod_cluster / 1000.0) bad = info[info["cpu_count"] < nocpu_expected] good = info[info["cpu_count"] >= nocpu_expected] return make_response("MASTER_ETCD", nocpu_expected=nocpu_expected, cpu_factor=cpu_factor, bad=bad, good=good, max_pod_cluster=max_pod_cluster, GREEN=Fore.GREEN, RED=Fore.RED, YELLOW=Fore.YELLOW, NC=Style.RESET_ALL)
python
def master_etcd(info, meta, max_pod_cluster, label): """ Function used to create the response for all master node types """ nodes = meta.get(label, []) or [] info = info[info["machine_id"].isin(nodes)] if info.empty: return cpu_factor = max_pod_cluster / 1000.0 nocpu_expected = MASTER_MIN_CORE + (max_pod_cluster / 1000.0) bad = info[info["cpu_count"] < nocpu_expected] good = info[info["cpu_count"] >= nocpu_expected] return make_response("MASTER_ETCD", nocpu_expected=nocpu_expected, cpu_factor=cpu_factor, bad=bad, good=good, max_pod_cluster=max_pod_cluster, GREEN=Fore.GREEN, RED=Fore.RED, YELLOW=Fore.YELLOW, NC=Style.RESET_ALL)
[ "def", "master_etcd", "(", "info", ",", "meta", ",", "max_pod_cluster", ",", "label", ")", ":", "nodes", "=", "meta", ".", "get", "(", "label", ",", "[", "]", ")", "or", "[", "]", "info", "=", "info", "[", "info", "[", "\"machine_id\"", "]", ".", ...
Function used to create the response for all master node types
[ "Function", "used", "to", "create", "the", "response", "for", "all", "master", "node", "types" ]
b57cbf8ed7c089672426ede0441e0a4f789ef4a1
https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/examples/cluster_rules/allnodes_cpu.py#L84-L98
train
220,875
RedHatInsights/insights-core
examples/cluster_rules/allnodes_cpu.py
infra_nodes
def infra_nodes(info, meta, max_pod_cluster, label, key): """ Function used to create the response for all infra node types """ nodes = meta.get(label, []) or [] infos = info[info["machine_id"].isin(nodes)] if infos.empty: return return make_response(key, max_pod_cluster=max_pod_cluster, infos=infos, GREEN=Fore.GREEN, RED=Fore.RED, YELLOW=Fore.YELLOW, NC=Style.RESET_ALL)
python
def infra_nodes(info, meta, max_pod_cluster, label, key): """ Function used to create the response for all infra node types """ nodes = meta.get(label, []) or [] infos = info[info["machine_id"].isin(nodes)] if infos.empty: return return make_response(key, max_pod_cluster=max_pod_cluster, infos=infos, GREEN=Fore.GREEN, RED=Fore.RED, YELLOW=Fore.YELLOW, NC=Style.RESET_ALL)
[ "def", "infra_nodes", "(", "info", ",", "meta", ",", "max_pod_cluster", ",", "label", ",", "key", ")", ":", "nodes", "=", "meta", ".", "get", "(", "label", ",", "[", "]", ")", "or", "[", "]", "infos", "=", "info", "[", "info", "[", "\"machine_id\""...
Function used to create the response for all infra node types
[ "Function", "used", "to", "create", "the", "response", "for", "all", "infra", "node", "types" ]
b57cbf8ed7c089672426ede0441e0a4f789ef4a1
https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/examples/cluster_rules/allnodes_cpu.py#L101-L108
train
220,876
RedHatInsights/insights-core
insights/parsers/df.py
parse_df_lines
def parse_df_lines(df_content): """Parse contents of each line in ``df`` output. Parse each line of ``df`` output ensuring that wrapped lines are reassembled prior to parsing, and that mount names containing spaces are maintained. Parameters: df_content (list): Lines of df output to be parsed. Returns: list: A list of ``Record`` ``namedtuple``'s. One for each line of the ``df`` output with columns as the key values. The fields of ``Record`` provide information about the file system attributes as determined by the arguments to the ``df`` command. So, for example, if ``df`` is given the ``-alP``, the values are in terms of 1024 blocks. If ``-li`` is given, then the values are in terms of inodes:: - filesystem: Name of the filesystem - total: total number of resources on the filesystem - used: number of the resources used on the filesystem - available: number of the resource available on the filesystem - capacity: percentage of the resource used on the filesystem - mounted_on: mount point of the filesystem """ df_ls = {} df_out = [] is_sep = False columns = Record._fields for line in df_content[1:]: # [1:] -> Skip the header # Stop at 5 splits to avoid splitting spaces in path line_splits = line.rstrip().split(None, 5) if len(line_splits) >= 6: for i, name in enumerate(columns): df_ls[name] = line_splits[i] is_sep = False elif len(line_splits) == 1: # First line of the separated line df_ls[columns[0]] = line_splits[0] is_sep = True elif is_sep and len(line_splits) >= 5: # Re-split to avoid this kind of "Mounted on": "VMware Tools" line_splits = line.split(None, 4) # Last line of the separated line for i, name in enumerate(columns[1:]): df_ls[name] = line_splits[i] is_sep = False elif not line_splits: # Skip empty lines (might in sosreport) continue else: raise ParseException("Could not parse line '{l}'".format(l=line)) # Only add this record if we've got a line and it's not separated if df_ls and not is_sep: rec = Record(**df_ls) df_out.append(rec) df_ls = {} return df_out
python
def parse_df_lines(df_content): """Parse contents of each line in ``df`` output. Parse each line of ``df`` output ensuring that wrapped lines are reassembled prior to parsing, and that mount names containing spaces are maintained. Parameters: df_content (list): Lines of df output to be parsed. Returns: list: A list of ``Record`` ``namedtuple``'s. One for each line of the ``df`` output with columns as the key values. The fields of ``Record`` provide information about the file system attributes as determined by the arguments to the ``df`` command. So, for example, if ``df`` is given the ``-alP``, the values are in terms of 1024 blocks. If ``-li`` is given, then the values are in terms of inodes:: - filesystem: Name of the filesystem - total: total number of resources on the filesystem - used: number of the resources used on the filesystem - available: number of the resource available on the filesystem - capacity: percentage of the resource used on the filesystem - mounted_on: mount point of the filesystem """ df_ls = {} df_out = [] is_sep = False columns = Record._fields for line in df_content[1:]: # [1:] -> Skip the header # Stop at 5 splits to avoid splitting spaces in path line_splits = line.rstrip().split(None, 5) if len(line_splits) >= 6: for i, name in enumerate(columns): df_ls[name] = line_splits[i] is_sep = False elif len(line_splits) == 1: # First line of the separated line df_ls[columns[0]] = line_splits[0] is_sep = True elif is_sep and len(line_splits) >= 5: # Re-split to avoid this kind of "Mounted on": "VMware Tools" line_splits = line.split(None, 4) # Last line of the separated line for i, name in enumerate(columns[1:]): df_ls[name] = line_splits[i] is_sep = False elif not line_splits: # Skip empty lines (might in sosreport) continue else: raise ParseException("Could not parse line '{l}'".format(l=line)) # Only add this record if we've got a line and it's not separated if df_ls and not is_sep: rec = Record(**df_ls) df_out.append(rec) df_ls = {} return df_out
[ "def", "parse_df_lines", "(", "df_content", ")", ":", "df_ls", "=", "{", "}", "df_out", "=", "[", "]", "is_sep", "=", "False", "columns", "=", "Record", ".", "_fields", "for", "line", "in", "df_content", "[", "1", ":", "]", ":", "# [1:] -> Skip the heade...
Parse contents of each line in ``df`` output. Parse each line of ``df`` output ensuring that wrapped lines are reassembled prior to parsing, and that mount names containing spaces are maintained. Parameters: df_content (list): Lines of df output to be parsed. Returns: list: A list of ``Record`` ``namedtuple``'s. One for each line of the ``df`` output with columns as the key values. The fields of ``Record`` provide information about the file system attributes as determined by the arguments to the ``df`` command. So, for example, if ``df`` is given the ``-alP``, the values are in terms of 1024 blocks. If ``-li`` is given, then the values are in terms of inodes:: - filesystem: Name of the filesystem - total: total number of resources on the filesystem - used: number of the resources used on the filesystem - available: number of the resource available on the filesystem - capacity: percentage of the resource used on the filesystem - mounted_on: mount point of the filesystem
[ "Parse", "contents", "of", "each", "line", "in", "df", "output", "." ]
b57cbf8ed7c089672426ede0441e0a4f789ef4a1
https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/parsers/df.py#L68-L125
train
220,877
RedHatInsights/insights-core
insights/parsers/system_time.py
NTPConfParser.get_param
def get_param(self, keyword, param=None, default=None): """ Get all the parameters for a given keyword, or default if keyword or parameter are not present in the configuration. This finds every declaration of the given parameter (which is the one which takes effect). If no parameter is given, then the entire line is treated as the parameter. There is always at least one element returned - the default, or Parameters: keyword(str): The keyword name, e.g. 'tinker' or 'driftfile' param(str): The parameter name, e.g. 'panic' or 'step'. If not given, all the definitions of that keyword are given. default(str): The default (singular) value if the keyword or parameter is not found. If not given, None is used. Returns: list: All the values of the given parameter, or an empty list if not found. """ if not keyword or keyword not in self.data: return [default] # keyword in data - if no value, we store None, so return that in a list if self.data[keyword] is None: return [None] # If we're not searching for a particular parameter, just return all # the values for this keyword. if not param: return self.data[keyword] found = [] for line in self.data[keyword]: # Line has already had keyword removed. words = line.strip().split() if len(words) > 1: # Line has param and value - check param: if words[0] == param: found.append(words[1]) else: found.append(words[0]) if found == []: return [default] else: return found
python
def get_param(self, keyword, param=None, default=None): """ Get all the parameters for a given keyword, or default if keyword or parameter are not present in the configuration. This finds every declaration of the given parameter (which is the one which takes effect). If no parameter is given, then the entire line is treated as the parameter. There is always at least one element returned - the default, or Parameters: keyword(str): The keyword name, e.g. 'tinker' or 'driftfile' param(str): The parameter name, e.g. 'panic' or 'step'. If not given, all the definitions of that keyword are given. default(str): The default (singular) value if the keyword or parameter is not found. If not given, None is used. Returns: list: All the values of the given parameter, or an empty list if not found. """ if not keyword or keyword not in self.data: return [default] # keyword in data - if no value, we store None, so return that in a list if self.data[keyword] is None: return [None] # If we're not searching for a particular parameter, just return all # the values for this keyword. if not param: return self.data[keyword] found = [] for line in self.data[keyword]: # Line has already had keyword removed. words = line.strip().split() if len(words) > 1: # Line has param and value - check param: if words[0] == param: found.append(words[1]) else: found.append(words[0]) if found == []: return [default] else: return found
[ "def", "get_param", "(", "self", ",", "keyword", ",", "param", "=", "None", ",", "default", "=", "None", ")", ":", "if", "not", "keyword", "or", "keyword", "not", "in", "self", ".", "data", ":", "return", "[", "default", "]", "# keyword in data - if no v...
Get all the parameters for a given keyword, or default if keyword or parameter are not present in the configuration. This finds every declaration of the given parameter (which is the one which takes effect). If no parameter is given, then the entire line is treated as the parameter. There is always at least one element returned - the default, or Parameters: keyword(str): The keyword name, e.g. 'tinker' or 'driftfile' param(str): The parameter name, e.g. 'panic' or 'step'. If not given, all the definitions of that keyword are given. default(str): The default (singular) value if the keyword or parameter is not found. If not given, None is used. Returns: list: All the values of the given parameter, or an empty list if not found.
[ "Get", "all", "the", "parameters", "for", "a", "given", "keyword", "or", "default", "if", "keyword", "or", "parameter", "are", "not", "present", "in", "the", "configuration", "." ]
b57cbf8ed7c089672426ede0441e0a4f789ef4a1
https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/parsers/system_time.py#L89-L133
train
220,878
RedHatInsights/insights-core
insights/parsers/system_time.py
NTPConfParser.get_last
def get_last(self, keyword, param=None, default=None): """ Get the parameters for a given keyword, or default if keyword or parameter are not present in the configuration. This finds the last declaration of the given parameter (which is the one which takes effect). If no parameter is given, then the entire line is treated as the parameter and returned. Parameters: keyword(str): The keyword name, e.g. 'tinker' or 'driftfile' param(str): The parameter name, e.g. 'panic' or 'step'. If not given, the last definition of that keyword is given. Returns: str or None: The value of the given parameter, or None if not found. """ return self.get_param(keyword, param, default)[-1]
python
def get_last(self, keyword, param=None, default=None): """ Get the parameters for a given keyword, or default if keyword or parameter are not present in the configuration. This finds the last declaration of the given parameter (which is the one which takes effect). If no parameter is given, then the entire line is treated as the parameter and returned. Parameters: keyword(str): The keyword name, e.g. 'tinker' or 'driftfile' param(str): The parameter name, e.g. 'panic' or 'step'. If not given, the last definition of that keyword is given. Returns: str or None: The value of the given parameter, or None if not found. """ return self.get_param(keyword, param, default)[-1]
[ "def", "get_last", "(", "self", ",", "keyword", ",", "param", "=", "None", ",", "default", "=", "None", ")", ":", "return", "self", ".", "get_param", "(", "keyword", ",", "param", ",", "default", ")", "[", "-", "1", "]" ]
Get the parameters for a given keyword, or default if keyword or parameter are not present in the configuration. This finds the last declaration of the given parameter (which is the one which takes effect). If no parameter is given, then the entire line is treated as the parameter and returned. Parameters: keyword(str): The keyword name, e.g. 'tinker' or 'driftfile' param(str): The parameter name, e.g. 'panic' or 'step'. If not given, the last definition of that keyword is given. Returns: str or None: The value of the given parameter, or None if not found.
[ "Get", "the", "parameters", "for", "a", "given", "keyword", "or", "default", "if", "keyword", "or", "parameter", "are", "not", "present", "in", "the", "configuration", "." ]
b57cbf8ed7c089672426ede0441e0a4f789ef4a1
https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/parsers/system_time.py#L135-L153
train
220,879
RedHatInsights/insights-core
insights/parsers/rhn_schema_stats.py
_replace_tabs
def _replace_tabs(s, ts=8): """ Replace the tabs in 's' and keep its original alignment with the tab-stop equals to 'ts' """ result = '' for c in s: if c == '\t': while True: result += ' ' if len(result) % ts == 0: break else: result += c return result
python
def _replace_tabs(s, ts=8): """ Replace the tabs in 's' and keep its original alignment with the tab-stop equals to 'ts' """ result = '' for c in s: if c == '\t': while True: result += ' ' if len(result) % ts == 0: break else: result += c return result
[ "def", "_replace_tabs", "(", "s", ",", "ts", "=", "8", ")", ":", "result", "=", "''", "for", "c", "in", "s", ":", "if", "c", "==", "'\\t'", ":", "while", "True", ":", "result", "+=", "' '", "if", "len", "(", "result", ")", "%", "ts", "==", "0...
Replace the tabs in 's' and keep its original alignment with the tab-stop equals to 'ts'
[ "Replace", "the", "tabs", "in", "s", "and", "keep", "its", "original", "alignment", "with", "the", "tab", "-", "stop", "equals", "to", "ts" ]
b57cbf8ed7c089672426ede0441e0a4f789ef4a1
https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/parsers/rhn_schema_stats.py#L6-L20
train
220,880
RedHatInsights/insights-core
insights/core/archives.py
extract
def extract(path, timeout=None, extract_dir=None, content_type=None): """ Extract path into a temporary directory in `extract_dir`. Yields an object containing the temporary path and the content type of the original archive. If the extraction takes longer than `timeout` seconds, the temporary path is removed, and an exception is raised. """ content_type = content_type or content_type_from_file(path) if content_type == "application/zip": extractor = ZipExtractor(timeout=timeout) else: extractor = TarExtractor(timeout=timeout) try: ctx = extractor.from_path(path, extract_dir=extract_dir, content_type=content_type) content_type = extractor.content_type yield Extraction(ctx.tmp_dir, content_type) finally: if extractor.created_tmp_dir: fs.remove(extractor.tmp_dir, chmod=True)
python
def extract(path, timeout=None, extract_dir=None, content_type=None): """ Extract path into a temporary directory in `extract_dir`. Yields an object containing the temporary path and the content type of the original archive. If the extraction takes longer than `timeout` seconds, the temporary path is removed, and an exception is raised. """ content_type = content_type or content_type_from_file(path) if content_type == "application/zip": extractor = ZipExtractor(timeout=timeout) else: extractor = TarExtractor(timeout=timeout) try: ctx = extractor.from_path(path, extract_dir=extract_dir, content_type=content_type) content_type = extractor.content_type yield Extraction(ctx.tmp_dir, content_type) finally: if extractor.created_tmp_dir: fs.remove(extractor.tmp_dir, chmod=True)
[ "def", "extract", "(", "path", ",", "timeout", "=", "None", ",", "extract_dir", "=", "None", ",", "content_type", "=", "None", ")", ":", "content_type", "=", "content_type", "or", "content_type_from_file", "(", "path", ")", "if", "content_type", "==", "\"app...
Extract path into a temporary directory in `extract_dir`. Yields an object containing the temporary path and the content type of the original archive. If the extraction takes longer than `timeout` seconds, the temporary path is removed, and an exception is raised.
[ "Extract", "path", "into", "a", "temporary", "directory", "in", "extract_dir", "." ]
b57cbf8ed7c089672426ede0441e0a4f789ef4a1
https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/core/archives.py#L96-L118
train
220,881
RedHatInsights/insights-core
examples/rules/sample_script.py
report
def report(rel): """Fires if the machine is running Fedora.""" if "Fedora" in rel.product: return make_pass("IS_FEDORA", product=rel.product) else: return make_fail("IS_NOT_FEDORA", product=rel.product)
python
def report(rel): """Fires if the machine is running Fedora.""" if "Fedora" in rel.product: return make_pass("IS_FEDORA", product=rel.product) else: return make_fail("IS_NOT_FEDORA", product=rel.product)
[ "def", "report", "(", "rel", ")", ":", "if", "\"Fedora\"", "in", "rel", ".", "product", ":", "return", "make_pass", "(", "\"IS_FEDORA\"", ",", "product", "=", "rel", ".", "product", ")", "else", ":", "return", "make_fail", "(", "\"IS_NOT_FEDORA\"", ",", ...
Fires if the machine is running Fedora.
[ "Fires", "if", "the", "machine", "is", "running", "Fedora", "." ]
b57cbf8ed7c089672426ede0441e0a4f789ef4a1
https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/examples/rules/sample_script.py#L24-L30
train
220,882
RedHatInsights/insights-core
insights/parsers/crontab.py
_make_cron_re
def _make_cron_re(): """ Make the regular expression that matches a crontab 'cron' line. Each field has a set of allowed values, and can then be in a range, and be listed with dashes. A range can be stepped with the '/' modifier, and ranges can be in a list. A field can also be '*', or '*' divided in steps. The best way to do this is to have a template for a single field that encapsulates the syntax of that field, regardless of what that field matches. We then fill in the actual template's value with the pattern that matches that field. Each field is named, so we can pull them out as a dictionary later. """ range_ = r"{val}(?:-{val}(?:/\d+)?)?" template = r"(?P<{name}>" + "(?:\*(?:/\d+)?|{r}(?:,{r})*)".format(r=range_) + ")\s+" return ( r'^\s*' + template.format(name='minute', val=r'(?:\d|[012345]\d)') + template.format(name='hour', val=r'(?:\d|[01]\d|2[0123])') + template.format(name='day_of_month', val=r'(?:0?[1-9]|[12]\d|3[01])') + template.format(name='month', val=r'(?:0?[1-9]|1[012]|jan|feb|mar|apr|may|jun|jul|aug|sep|oct|nov|dec)') + template.format(name='day_of_week', val=r'(?:[0-7]|mon|tue|wed|thur|fri|sat|sun)') + r'(?P<command>\S.*)$' )
python
def _make_cron_re(): """ Make the regular expression that matches a crontab 'cron' line. Each field has a set of allowed values, and can then be in a range, and be listed with dashes. A range can be stepped with the '/' modifier, and ranges can be in a list. A field can also be '*', or '*' divided in steps. The best way to do this is to have a template for a single field that encapsulates the syntax of that field, regardless of what that field matches. We then fill in the actual template's value with the pattern that matches that field. Each field is named, so we can pull them out as a dictionary later. """ range_ = r"{val}(?:-{val}(?:/\d+)?)?" template = r"(?P<{name}>" + "(?:\*(?:/\d+)?|{r}(?:,{r})*)".format(r=range_) + ")\s+" return ( r'^\s*' + template.format(name='minute', val=r'(?:\d|[012345]\d)') + template.format(name='hour', val=r'(?:\d|[01]\d|2[0123])') + template.format(name='day_of_month', val=r'(?:0?[1-9]|[12]\d|3[01])') + template.format(name='month', val=r'(?:0?[1-9]|1[012]|jan|feb|mar|apr|may|jun|jul|aug|sep|oct|nov|dec)') + template.format(name='day_of_week', val=r'(?:[0-7]|mon|tue|wed|thur|fri|sat|sun)') + r'(?P<command>\S.*)$' )
[ "def", "_make_cron_re", "(", ")", ":", "range_", "=", "r\"{val}(?:-{val}(?:/\\d+)?)?\"", "template", "=", "r\"(?P<{name}>\"", "+", "\"(?:\\*(?:/\\d+)?|{r}(?:,{r})*)\"", ".", "format", "(", "r", "=", "range_", ")", "+", "\")\\s+\"", "return", "(", "r'^\\s*'", "+", ...
Make the regular expression that matches a crontab 'cron' line. Each field has a set of allowed values, and can then be in a range, and be listed with dashes. A range can be stepped with the '/' modifier, and ranges can be in a list. A field can also be '*', or '*' divided in steps. The best way to do this is to have a template for a single field that encapsulates the syntax of that field, regardless of what that field matches. We then fill in the actual template's value with the pattern that matches that field. Each field is named, so we can pull them out as a dictionary later.
[ "Make", "the", "regular", "expression", "that", "matches", "a", "crontab", "cron", "line", "." ]
b57cbf8ed7c089672426ede0441e0a4f789ef4a1
https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/parsers/crontab.py#L8-L32
train
220,883
RedHatInsights/insights-core
insights/client/auto_config.py
verify_connectivity
def verify_connectivity(config): """ Verify connectivity to satellite server """ logger.debug("Verifying Connectivity") ic = InsightsConnection(config) try: branch_info = ic.get_branch_info() except requests.ConnectionError as e: logger.debug(e) logger.debug("Failed to connect to satellite") return False except LookupError as e: logger.debug(e) logger.debug("Failed to parse response from satellite") return False try: remote_leaf = branch_info['remote_leaf'] return remote_leaf except LookupError as e: logger.debug(e) logger.debug("Failed to find accurate branch_info") return False
python
def verify_connectivity(config): """ Verify connectivity to satellite server """ logger.debug("Verifying Connectivity") ic = InsightsConnection(config) try: branch_info = ic.get_branch_info() except requests.ConnectionError as e: logger.debug(e) logger.debug("Failed to connect to satellite") return False except LookupError as e: logger.debug(e) logger.debug("Failed to parse response from satellite") return False try: remote_leaf = branch_info['remote_leaf'] return remote_leaf except LookupError as e: logger.debug(e) logger.debug("Failed to find accurate branch_info") return False
[ "def", "verify_connectivity", "(", "config", ")", ":", "logger", ".", "debug", "(", "\"Verifying Connectivity\"", ")", "ic", "=", "InsightsConnection", "(", "config", ")", "try", ":", "branch_info", "=", "ic", ".", "get_branch_info", "(", ")", "except", "reque...
Verify connectivity to satellite server
[ "Verify", "connectivity", "to", "satellite", "server" ]
b57cbf8ed7c089672426ede0441e0a4f789ef4a1
https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/client/auto_config.py#L27-L50
train
220,884
RedHatInsights/insights-core
insights/client/auto_config.py
set_auto_configuration
def set_auto_configuration(config, hostname, ca_cert, proxy, is_satellite): """ Set config based on discovered data """ logger.debug("Attempting to auto configure!") logger.debug("Attempting to auto configure hostname: %s", hostname) logger.debug("Attempting to auto configure CA cert: %s", ca_cert) logger.debug("Attempting to auto configure proxy: %s", proxy) saved_base_url = config.base_url if ca_cert is not None: saved_cert_verify = config.cert_verify config.cert_verify = ca_cert if proxy is not None: saved_proxy = config.proxy config.proxy = proxy if is_satellite: # satellite config.base_url = hostname + '/r/insights' if not config.legacy_upload: config.base_url += '/platform' logger.debug('Auto-configured base_url: %s', config.base_url) else: # connected directly to RHSM if config.legacy_upload: config.base_url = hostname + '/r/insights' else: config.base_url = hostname + '/api' logger.debug('Auto-configured base_url: %s', config.base_url) logger.debug('Not connected to Satellite, skipping branch_info') # direct connection to RHSM, skip verify_connectivity return if not verify_connectivity(config): logger.warn("Could not auto configure, falling back to static config") logger.warn("See %s for additional information", constants.default_log_file) config.base_url = saved_base_url if proxy is not None: if saved_proxy is not None and saved_proxy.lower() == 'none': saved_proxy = None config.proxy = saved_proxy if ca_cert is not None: config.cert_verify = saved_cert_verify
python
def set_auto_configuration(config, hostname, ca_cert, proxy, is_satellite): """ Set config based on discovered data """ logger.debug("Attempting to auto configure!") logger.debug("Attempting to auto configure hostname: %s", hostname) logger.debug("Attempting to auto configure CA cert: %s", ca_cert) logger.debug("Attempting to auto configure proxy: %s", proxy) saved_base_url = config.base_url if ca_cert is not None: saved_cert_verify = config.cert_verify config.cert_verify = ca_cert if proxy is not None: saved_proxy = config.proxy config.proxy = proxy if is_satellite: # satellite config.base_url = hostname + '/r/insights' if not config.legacy_upload: config.base_url += '/platform' logger.debug('Auto-configured base_url: %s', config.base_url) else: # connected directly to RHSM if config.legacy_upload: config.base_url = hostname + '/r/insights' else: config.base_url = hostname + '/api' logger.debug('Auto-configured base_url: %s', config.base_url) logger.debug('Not connected to Satellite, skipping branch_info') # direct connection to RHSM, skip verify_connectivity return if not verify_connectivity(config): logger.warn("Could not auto configure, falling back to static config") logger.warn("See %s for additional information", constants.default_log_file) config.base_url = saved_base_url if proxy is not None: if saved_proxy is not None and saved_proxy.lower() == 'none': saved_proxy = None config.proxy = saved_proxy if ca_cert is not None: config.cert_verify = saved_cert_verify
[ "def", "set_auto_configuration", "(", "config", ",", "hostname", ",", "ca_cert", ",", "proxy", ",", "is_satellite", ")", ":", "logger", ".", "debug", "(", "\"Attempting to auto configure!\"", ")", "logger", ".", "debug", "(", "\"Attempting to auto configure hostname: ...
Set config based on discovered data
[ "Set", "config", "based", "on", "discovered", "data" ]
b57cbf8ed7c089672426ede0441e0a4f789ef4a1
https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/client/auto_config.py#L53-L95
train
220,885
RedHatInsights/insights-core
insights/client/auto_config.py
_try_satellite6_configuration
def _try_satellite6_configuration(config): """ Try to autoconfigure for Satellite 6 """ try: rhsm_config = _importInitConfig() logger.debug('Trying to autoconfigure...') cert = open(rhsmCertificate.certpath(), 'r').read() key = open(rhsmCertificate.keypath(), 'r').read() rhsm = rhsmCertificate(key, cert) is_satellite = False # This will throw an exception if we are not registered logger.debug('Checking if system is subscription-manager registered') rhsm.getConsumerId() logger.debug('System is subscription-manager registered') rhsm_hostname = rhsm_config.get('server', 'hostname') rhsm_hostport = rhsm_config.get('server', 'port') rhsm_proxy_hostname = rhsm_config.get('server', 'proxy_hostname').strip() rhsm_proxy_port = rhsm_config.get('server', 'proxy_port').strip() rhsm_proxy_user = rhsm_config.get('server', 'proxy_user').strip() rhsm_proxy_pass = rhsm_config.get('server', 'proxy_password').strip() proxy = None if rhsm_proxy_hostname != "": logger.debug("Found rhsm_proxy_hostname %s", rhsm_proxy_hostname) proxy = "http://" if rhsm_proxy_user != "" and rhsm_proxy_pass != "": logger.debug("Found user and password for rhsm_proxy") proxy = proxy + rhsm_proxy_user + ":" + rhsm_proxy_pass + "@" proxy = proxy + rhsm_proxy_hostname + ':' + rhsm_proxy_port logger.debug("RHSM Proxy: %s", proxy) logger.debug("Found %sHost: %s, Port: %s", ('' if _is_rhn_or_rhsm(rhsm_hostname) else 'Satellite 6 Server '), rhsm_hostname, rhsm_hostport) rhsm_ca = rhsm_config.get('rhsm', 'repo_ca_cert') logger.debug("Found CA: %s", rhsm_ca) logger.debug("Setting authmethod to CERT") config.authmethod = 'CERT' # Directly connected to Red Hat, use cert auth directly with the api if _is_rhn_or_rhsm(rhsm_hostname): # URL changes. my favorite if config.legacy_upload: logger.debug("Connected to Red Hat Directly, using cert-api") rhsm_hostname = 'cert-api.access.redhat.com' else: logger.debug("Connected to Red Hat Directly, using cloud.redhat.com") rhsm_hostname = 'cloud.redhat.com' rhsm_ca = None else: # Set the host path # 'rhsm_hostname' should really be named ~ 'rhsm_host_base_url' rhsm_hostname = rhsm_hostname + ':' + rhsm_hostport + '/redhat_access' is_satellite = True logger.debug("Trying to set auto_configuration") set_auto_configuration(config, rhsm_hostname, rhsm_ca, proxy, is_satellite) return True except Exception as e: logger.debug(e) logger.debug('System is NOT subscription-manager registered') return False
python
def _try_satellite6_configuration(config): """ Try to autoconfigure for Satellite 6 """ try: rhsm_config = _importInitConfig() logger.debug('Trying to autoconfigure...') cert = open(rhsmCertificate.certpath(), 'r').read() key = open(rhsmCertificate.keypath(), 'r').read() rhsm = rhsmCertificate(key, cert) is_satellite = False # This will throw an exception if we are not registered logger.debug('Checking if system is subscription-manager registered') rhsm.getConsumerId() logger.debug('System is subscription-manager registered') rhsm_hostname = rhsm_config.get('server', 'hostname') rhsm_hostport = rhsm_config.get('server', 'port') rhsm_proxy_hostname = rhsm_config.get('server', 'proxy_hostname').strip() rhsm_proxy_port = rhsm_config.get('server', 'proxy_port').strip() rhsm_proxy_user = rhsm_config.get('server', 'proxy_user').strip() rhsm_proxy_pass = rhsm_config.get('server', 'proxy_password').strip() proxy = None if rhsm_proxy_hostname != "": logger.debug("Found rhsm_proxy_hostname %s", rhsm_proxy_hostname) proxy = "http://" if rhsm_proxy_user != "" and rhsm_proxy_pass != "": logger.debug("Found user and password for rhsm_proxy") proxy = proxy + rhsm_proxy_user + ":" + rhsm_proxy_pass + "@" proxy = proxy + rhsm_proxy_hostname + ':' + rhsm_proxy_port logger.debug("RHSM Proxy: %s", proxy) logger.debug("Found %sHost: %s, Port: %s", ('' if _is_rhn_or_rhsm(rhsm_hostname) else 'Satellite 6 Server '), rhsm_hostname, rhsm_hostport) rhsm_ca = rhsm_config.get('rhsm', 'repo_ca_cert') logger.debug("Found CA: %s", rhsm_ca) logger.debug("Setting authmethod to CERT") config.authmethod = 'CERT' # Directly connected to Red Hat, use cert auth directly with the api if _is_rhn_or_rhsm(rhsm_hostname): # URL changes. my favorite if config.legacy_upload: logger.debug("Connected to Red Hat Directly, using cert-api") rhsm_hostname = 'cert-api.access.redhat.com' else: logger.debug("Connected to Red Hat Directly, using cloud.redhat.com") rhsm_hostname = 'cloud.redhat.com' rhsm_ca = None else: # Set the host path # 'rhsm_hostname' should really be named ~ 'rhsm_host_base_url' rhsm_hostname = rhsm_hostname + ':' + rhsm_hostport + '/redhat_access' is_satellite = True logger.debug("Trying to set auto_configuration") set_auto_configuration(config, rhsm_hostname, rhsm_ca, proxy, is_satellite) return True except Exception as e: logger.debug(e) logger.debug('System is NOT subscription-manager registered') return False
[ "def", "_try_satellite6_configuration", "(", "config", ")", ":", "try", ":", "rhsm_config", "=", "_importInitConfig", "(", ")", "logger", ".", "debug", "(", "'Trying to autoconfigure...'", ")", "cert", "=", "open", "(", "rhsmCertificate", ".", "certpath", "(", "...
Try to autoconfigure for Satellite 6
[ "Try", "to", "autoconfigure", "for", "Satellite", "6" ]
b57cbf8ed7c089672426ede0441e0a4f789ef4a1
https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/client/auto_config.py#L103-L169
train
220,886
RedHatInsights/insights-core
insights/client/auto_config.py
_try_satellite5_configuration
def _try_satellite5_configuration(config): """ Attempt to determine Satellite 5 Configuration """ logger.debug("Trying Satellite 5 auto_config") rhn_config = '/etc/sysconfig/rhn/up2date' systemid = '/etc/sysconfig/rhn/systemid' if os.path.isfile(rhn_config): if os.path.isfile(systemid): config.systemid = _read_systemid_file(systemid) else: logger.debug("Could not find Satellite 5 systemid file.") return False logger.debug("Found Satellite 5 Config") rhn_conf_file = open(rhn_config, 'r') hostname = None for line in rhn_conf_file: if line.startswith('serverURL='): url = urlparse(line.split('=')[1]) hostname = url.netloc + '/redhat_access' logger.debug("Found hostname %s", hostname) if line.startswith('sslCACert='): rhn_ca = line.strip().split('=')[1] # Auto discover proxy stuff if line.startswith('enableProxy='): proxy_enabled = line.strip().split('=')[1] if line.startswith('httpProxy='): proxy_host_port = line.strip().split('=')[1] if line.startswith('proxyUser='): proxy_user = line.strip().split('=')[1] if line.startswith('proxyPassword='): proxy_password = line.strip().split('=')[1] if hostname: proxy = None if proxy_enabled == "1": proxy = "http://" if proxy_user != "" and proxy_password != "": logger.debug("Found user and password for rhn_proxy") proxy = proxy + proxy_user + ':' + proxy_password proxy = proxy + "@" + proxy_host_port else: proxy = proxy + proxy_host_port logger.debug("RHN Proxy: %s", proxy) set_auto_configuration(config, hostname, rhn_ca, proxy) else: logger.debug("Could not find hostname") return False return True else: logger.debug("Could not find rhn config") return False
python
def _try_satellite5_configuration(config): """ Attempt to determine Satellite 5 Configuration """ logger.debug("Trying Satellite 5 auto_config") rhn_config = '/etc/sysconfig/rhn/up2date' systemid = '/etc/sysconfig/rhn/systemid' if os.path.isfile(rhn_config): if os.path.isfile(systemid): config.systemid = _read_systemid_file(systemid) else: logger.debug("Could not find Satellite 5 systemid file.") return False logger.debug("Found Satellite 5 Config") rhn_conf_file = open(rhn_config, 'r') hostname = None for line in rhn_conf_file: if line.startswith('serverURL='): url = urlparse(line.split('=')[1]) hostname = url.netloc + '/redhat_access' logger.debug("Found hostname %s", hostname) if line.startswith('sslCACert='): rhn_ca = line.strip().split('=')[1] # Auto discover proxy stuff if line.startswith('enableProxy='): proxy_enabled = line.strip().split('=')[1] if line.startswith('httpProxy='): proxy_host_port = line.strip().split('=')[1] if line.startswith('proxyUser='): proxy_user = line.strip().split('=')[1] if line.startswith('proxyPassword='): proxy_password = line.strip().split('=')[1] if hostname: proxy = None if proxy_enabled == "1": proxy = "http://" if proxy_user != "" and proxy_password != "": logger.debug("Found user and password for rhn_proxy") proxy = proxy + proxy_user + ':' + proxy_password proxy = proxy + "@" + proxy_host_port else: proxy = proxy + proxy_host_port logger.debug("RHN Proxy: %s", proxy) set_auto_configuration(config, hostname, rhn_ca, proxy) else: logger.debug("Could not find hostname") return False return True else: logger.debug("Could not find rhn config") return False
[ "def", "_try_satellite5_configuration", "(", "config", ")", ":", "logger", ".", "debug", "(", "\"Trying Satellite 5 auto_config\"", ")", "rhn_config", "=", "'/etc/sysconfig/rhn/up2date'", "systemid", "=", "'/etc/sysconfig/rhn/systemid'", "if", "os", ".", "path", ".", "i...
Attempt to determine Satellite 5 Configuration
[ "Attempt", "to", "determine", "Satellite", "5", "Configuration" ]
b57cbf8ed7c089672426ede0441e0a4f789ef4a1
https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/client/auto_config.py#L178-L231
train
220,887
RedHatInsights/insights-core
insights/parsers/iptables.py
IPTablesConfiguration.get_chain
def get_chain(self, name, table="filter"): """ Get the list of rules for a particular chain. Chain order is kept intact. Args: name (str): chain name, e.g. `` table (str): table name, defaults to ``filter`` Returns: list: rules """ return [r for r in self.rules if r["table"] == table and r["chain"] == name]
python
def get_chain(self, name, table="filter"): """ Get the list of rules for a particular chain. Chain order is kept intact. Args: name (str): chain name, e.g. `` table (str): table name, defaults to ``filter`` Returns: list: rules """ return [r for r in self.rules if r["table"] == table and r["chain"] == name]
[ "def", "get_chain", "(", "self", ",", "name", ",", "table", "=", "\"filter\"", ")", ":", "return", "[", "r", "for", "r", "in", "self", ".", "rules", "if", "r", "[", "\"table\"", "]", "==", "table", "and", "r", "[", "\"chain\"", "]", "==", "name", ...
Get the list of rules for a particular chain. Chain order is kept intact. Args: name (str): chain name, e.g. `` table (str): table name, defaults to ``filter`` Returns: list: rules
[ "Get", "the", "list", "of", "rules", "for", "a", "particular", "chain", ".", "Chain", "order", "is", "kept", "intact", "." ]
b57cbf8ed7c089672426ede0441e0a4f789ef4a1
https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/parsers/iptables.py#L127-L138
train
220,888
RedHatInsights/insights-core
insights/parsers/iptables.py
IPTablesConfiguration.table_chains
def table_chains(self, table="filter"): """ Get a dict where the keys are all the chains for the given table and each value is the set of rules defined for the given chain. Args: table (str): table name, defaults to ``filter`` Returns: dict: chains with set of defined rules """ return dict((c["name"], self.get_chain(c["name"], table)) for c in self.get_table(table))
python
def table_chains(self, table="filter"): """ Get a dict where the keys are all the chains for the given table and each value is the set of rules defined for the given chain. Args: table (str): table name, defaults to ``filter`` Returns: dict: chains with set of defined rules """ return dict((c["name"], self.get_chain(c["name"], table)) for c in self.get_table(table))
[ "def", "table_chains", "(", "self", ",", "table", "=", "\"filter\"", ")", ":", "return", "dict", "(", "(", "c", "[", "\"name\"", "]", ",", "self", ".", "get_chain", "(", "c", "[", "\"name\"", "]", ",", "table", ")", ")", "for", "c", "in", "self", ...
Get a dict where the keys are all the chains for the given table and each value is the set of rules defined for the given chain. Args: table (str): table name, defaults to ``filter`` Returns: dict: chains with set of defined rules
[ "Get", "a", "dict", "where", "the", "keys", "are", "all", "the", "chains", "for", "the", "given", "table", "and", "each", "value", "is", "the", "set", "of", "rules", "defined", "for", "the", "given", "chain", "." ]
b57cbf8ed7c089672426ede0441e0a4f789ef4a1
https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/parsers/iptables.py#L152-L163
train
220,889
RedHatInsights/insights-core
insights/client/__init__.py
InsightsClient.verify
def verify(self, egg_path, gpg_key=constants.pub_gpg_path): """ Verifies the GPG signature of the egg. The signature is assumed to be in the same directory as the egg and named the same as the egg except with an additional ".asc" extension. returns (dict): {'gpg': if the egg checks out, 'stderr': error message if present, 'stdout': stdout, 'rc': return code} """ # check if the provided files (egg and gpg) actually exist if egg_path and not os.path.isfile(egg_path): the_message = "Provided egg path %s does not exist, cannot verify." % (egg_path) logger.debug(the_message) return {'gpg': False, 'stderr': the_message, 'stdout': the_message, 'rc': 1, 'message': the_message} if self.config.gpg and gpg_key and not os.path.isfile(gpg_key): the_message = ("Running in GPG mode but cannot find " "file %s to verify against." % (gpg_key)) logger.debug(the_message) return {'gpg': False, 'stderr': the_message, 'stdout': the_message, 'rc': 1, 'message': the_message} # if we are running in no_gpg or not gpg mode then return true if not self.config.gpg: return {'gpg': True, 'stderr': None, 'stdout': None, 'rc': 0} # if a valid egg path and gpg were received do the verification if egg_path and gpg_key: cmd_template = '/usr/bin/gpg --verify --keyring %s %s %s' cmd = cmd_template % (gpg_key, egg_path + '.asc', egg_path) logger.debug(cmd) process = Popen(shlex.split(cmd), stdout=PIPE, stderr=PIPE) stdout, stderr = process.communicate() rc = process.returncode logger.debug("GPG return code: %s" % rc) return {'gpg': True if rc == 0 else False, 'stderr': stderr, 'stdout': stdout, 'rc': rc} else: return {'gpg': False, 'stderr': 'Must specify a valid core and gpg key.', 'stdout': 'Must specify a valid core and gpg key.', 'rc': 1}
python
def verify(self, egg_path, gpg_key=constants.pub_gpg_path): """ Verifies the GPG signature of the egg. The signature is assumed to be in the same directory as the egg and named the same as the egg except with an additional ".asc" extension. returns (dict): {'gpg': if the egg checks out, 'stderr': error message if present, 'stdout': stdout, 'rc': return code} """ # check if the provided files (egg and gpg) actually exist if egg_path and not os.path.isfile(egg_path): the_message = "Provided egg path %s does not exist, cannot verify." % (egg_path) logger.debug(the_message) return {'gpg': False, 'stderr': the_message, 'stdout': the_message, 'rc': 1, 'message': the_message} if self.config.gpg and gpg_key and not os.path.isfile(gpg_key): the_message = ("Running in GPG mode but cannot find " "file %s to verify against." % (gpg_key)) logger.debug(the_message) return {'gpg': False, 'stderr': the_message, 'stdout': the_message, 'rc': 1, 'message': the_message} # if we are running in no_gpg or not gpg mode then return true if not self.config.gpg: return {'gpg': True, 'stderr': None, 'stdout': None, 'rc': 0} # if a valid egg path and gpg were received do the verification if egg_path and gpg_key: cmd_template = '/usr/bin/gpg --verify --keyring %s %s %s' cmd = cmd_template % (gpg_key, egg_path + '.asc', egg_path) logger.debug(cmd) process = Popen(shlex.split(cmd), stdout=PIPE, stderr=PIPE) stdout, stderr = process.communicate() rc = process.returncode logger.debug("GPG return code: %s" % rc) return {'gpg': True if rc == 0 else False, 'stderr': stderr, 'stdout': stdout, 'rc': rc} else: return {'gpg': False, 'stderr': 'Must specify a valid core and gpg key.', 'stdout': 'Must specify a valid core and gpg key.', 'rc': 1}
[ "def", "verify", "(", "self", ",", "egg_path", ",", "gpg_key", "=", "constants", ".", "pub_gpg_path", ")", ":", "# check if the provided files (egg and gpg) actually exist", "if", "egg_path", "and", "not", "os", ".", "path", ".", "isfile", "(", "egg_path", ")", ...
Verifies the GPG signature of the egg. The signature is assumed to be in the same directory as the egg and named the same as the egg except with an additional ".asc" extension. returns (dict): {'gpg': if the egg checks out, 'stderr': error message if present, 'stdout': stdout, 'rc': return code}
[ "Verifies", "the", "GPG", "signature", "of", "the", "egg", ".", "The", "signature", "is", "assumed", "to", "be", "in", "the", "same", "directory", "as", "the", "egg", "and", "named", "the", "same", "as", "the", "egg", "except", "with", "an", "additional"...
b57cbf8ed7c089672426ede0441e0a4f789ef4a1
https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/client/__init__.py#L213-L267
train
220,890
RedHatInsights/insights-core
insights/client/__init__.py
InsightsClient.get_diagnosis
def get_diagnosis(self, remediation_id=None): ''' returns JSON of diagnosis data on success, None on failure Optional arg remediation_id to get a particular remediation set. ''' if self.config.offline: logger.error('Cannot get diagnosis in offline mode.') return None return self.connection.get_diagnosis(remediation_id)
python
def get_diagnosis(self, remediation_id=None): ''' returns JSON of diagnosis data on success, None on failure Optional arg remediation_id to get a particular remediation set. ''' if self.config.offline: logger.error('Cannot get diagnosis in offline mode.') return None return self.connection.get_diagnosis(remediation_id)
[ "def", "get_diagnosis", "(", "self", ",", "remediation_id", "=", "None", ")", ":", "if", "self", ".", "config", ".", "offline", ":", "logger", ".", "error", "(", "'Cannot get diagnosis in offline mode.'", ")", "return", "None", "return", "self", ".", "connecti...
returns JSON of diagnosis data on success, None on failure Optional arg remediation_id to get a particular remediation set.
[ "returns", "JSON", "of", "diagnosis", "data", "on", "success", "None", "on", "failure", "Optional", "arg", "remediation_id", "to", "get", "a", "particular", "remediation", "set", "." ]
b57cbf8ed7c089672426ede0441e0a4f789ef4a1
https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/client/__init__.py#L455-L463
train
220,891
RedHatInsights/insights-core
insights/client/__init__.py
InsightsClient.delete_cached_branch_info
def delete_cached_branch_info(self): ''' Deletes cached branch_info file ''' if os.path.isfile(constants.cached_branch_info): logger.debug('Deleting cached branch_info file...') os.remove(constants.cached_branch_info) else: logger.debug('Cached branch_info file does not exist.')
python
def delete_cached_branch_info(self): ''' Deletes cached branch_info file ''' if os.path.isfile(constants.cached_branch_info): logger.debug('Deleting cached branch_info file...') os.remove(constants.cached_branch_info) else: logger.debug('Cached branch_info file does not exist.')
[ "def", "delete_cached_branch_info", "(", "self", ")", ":", "if", "os", ".", "path", ".", "isfile", "(", "constants", ".", "cached_branch_info", ")", ":", "logger", ".", "debug", "(", "'Deleting cached branch_info file...'", ")", "os", ".", "remove", "(", "cons...
Deletes cached branch_info file
[ "Deletes", "cached", "branch_info", "file" ]
b57cbf8ed7c089672426ede0441e0a4f789ef4a1
https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/client/__init__.py#L465-L473
train
220,892
RedHatInsights/insights-core
insights/client/__init__.py
InsightsClient.clear_local_registration
def clear_local_registration(self): ''' Deletes dotfiles and machine-id for fresh registration ''' delete_registered_file() delete_unregistered_file() write_to_disk(constants.machine_id_file, delete=True) logger.debug('Re-register set, forcing registration.') logger.debug('New machine-id: %s', generate_machine_id(new=True))
python
def clear_local_registration(self): ''' Deletes dotfiles and machine-id for fresh registration ''' delete_registered_file() delete_unregistered_file() write_to_disk(constants.machine_id_file, delete=True) logger.debug('Re-register set, forcing registration.') logger.debug('New machine-id: %s', generate_machine_id(new=True))
[ "def", "clear_local_registration", "(", "self", ")", ":", "delete_registered_file", "(", ")", "delete_unregistered_file", "(", ")", "write_to_disk", "(", "constants", ".", "machine_id_file", ",", "delete", "=", "True", ")", "logger", ".", "debug", "(", "'Re-regist...
Deletes dotfiles and machine-id for fresh registration
[ "Deletes", "dotfiles", "and", "machine", "-", "id", "for", "fresh", "registration" ]
b57cbf8ed7c089672426ede0441e0a4f789ef4a1
https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/client/__init__.py#L478-L486
train
220,893
RedHatInsights/insights-core
insights/contrib/pyparsing.py
col
def col (loc,strg): """Returns current column within a string, counting newlines as line separators. The first column is number 1. Note: the default parsing behavior is to expand tabs in the input string before starting the parsing process. See L{I{ParserElement.parseString}<ParserElement.parseString>} for more information on parsing strings containing C{<TAB>}s, and suggested methods to maintain a consistent view of the parsed string, the parse location, and line and column positions within the parsed string. """ s = strg return 1 if loc<len(s) and s[loc] == '\n' else loc - s.rfind("\n", 0, loc)
python
def col (loc,strg): """Returns current column within a string, counting newlines as line separators. The first column is number 1. Note: the default parsing behavior is to expand tabs in the input string before starting the parsing process. See L{I{ParserElement.parseString}<ParserElement.parseString>} for more information on parsing strings containing C{<TAB>}s, and suggested methods to maintain a consistent view of the parsed string, the parse location, and line and column positions within the parsed string. """ s = strg return 1 if loc<len(s) and s[loc] == '\n' else loc - s.rfind("\n", 0, loc)
[ "def", "col", "(", "loc", ",", "strg", ")", ":", "s", "=", "strg", "return", "1", "if", "loc", "<", "len", "(", "s", ")", "and", "s", "[", "loc", "]", "==", "'\\n'", "else", "loc", "-", "s", ".", "rfind", "(", "\"\\n\"", ",", "0", ",", "loc...
Returns current column within a string, counting newlines as line separators. The first column is number 1. Note: the default parsing behavior is to expand tabs in the input string before starting the parsing process. See L{I{ParserElement.parseString}<ParserElement.parseString>} for more information on parsing strings containing C{<TAB>}s, and suggested methods to maintain a consistent view of the parsed string, the parse location, and line and column positions within the parsed string.
[ "Returns", "current", "column", "within", "a", "string", "counting", "newlines", "as", "line", "separators", ".", "The", "first", "column", "is", "number", "1", "." ]
b57cbf8ed7c089672426ede0441e0a4f789ef4a1
https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/contrib/pyparsing.py#L716-L727
train
220,894
RedHatInsights/insights-core
insights/contrib/pyparsing.py
upcaseTokens
def upcaseTokens(s,l,t): """Helper parse action to convert tokens to upper case.""" return [ tt.upper() for tt in map(_ustr,t) ]
python
def upcaseTokens(s,l,t): """Helper parse action to convert tokens to upper case.""" return [ tt.upper() for tt in map(_ustr,t) ]
[ "def", "upcaseTokens", "(", "s", ",", "l", ",", "t", ")", ":", "return", "[", "tt", ".", "upper", "(", ")", "for", "tt", "in", "map", "(", "_ustr", ",", "t", ")", "]" ]
Helper parse action to convert tokens to upper case.
[ "Helper", "parse", "action", "to", "convert", "tokens", "to", "upper", "case", "." ]
b57cbf8ed7c089672426ede0441e0a4f789ef4a1
https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/contrib/pyparsing.py#L3592-L3594
train
220,895
RedHatInsights/insights-core
insights/contrib/pyparsing.py
downcaseTokens
def downcaseTokens(s,l,t): """Helper parse action to convert tokens to lower case.""" return [ tt.lower() for tt in map(_ustr,t) ]
python
def downcaseTokens(s,l,t): """Helper parse action to convert tokens to lower case.""" return [ tt.lower() for tt in map(_ustr,t) ]
[ "def", "downcaseTokens", "(", "s", ",", "l", ",", "t", ")", ":", "return", "[", "tt", ".", "lower", "(", ")", "for", "tt", "in", "map", "(", "_ustr", ",", "t", ")", "]" ]
Helper parse action to convert tokens to lower case.
[ "Helper", "parse", "action", "to", "convert", "tokens", "to", "lower", "case", "." ]
b57cbf8ed7c089672426ede0441e0a4f789ef4a1
https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/contrib/pyparsing.py#L3596-L3598
train
220,896
RedHatInsights/insights-core
insights/contrib/pyparsing.py
infixNotation
def infixNotation( baseExpr, opList, lpar=Suppress('('), rpar=Suppress(')') ): """Helper method for constructing grammars of expressions made up of operators working in a precedence hierarchy. Operators may be unary or binary, left- or right-associative. Parse actions can also be attached to operator expressions. Parameters: - baseExpr - expression representing the most basic element for the nested - opList - list of tuples, one for each operator precedence level in the expression grammar; each tuple is of the form (opExpr, numTerms, rightLeftAssoc, parseAction), where: - opExpr is the pyparsing expression for the operator; may also be a string, which will be converted to a Literal; if numTerms is 3, opExpr is a tuple of two expressions, for the two operators separating the 3 terms - numTerms is the number of terms for this operator (must be 1, 2, or 3) - rightLeftAssoc is the indicator whether the operator is right or left associative, using the pyparsing-defined constants C{opAssoc.RIGHT} and C{opAssoc.LEFT}. - parseAction is the parse action to be associated with expressions matching this operator expression (the parse action tuple member may be omitted) - lpar - expression for matching left-parentheses (default=Suppress('(')) - rpar - expression for matching right-parentheses (default=Suppress(')')) """ ret = Forward() lastExpr = baseExpr | ( lpar + ret + rpar ) for i,operDef in enumerate(opList): opExpr,arity,rightLeftAssoc,pa = (operDef + (None,))[:4] termName = "%s term" % opExpr if arity < 3 else "%s%s term" % opExpr if arity == 3: if opExpr is None or len(opExpr) != 2: raise ValueError("if numterms=3, opExpr must be a tuple or list of two expressions") opExpr1, opExpr2 = opExpr thisExpr = Forward().setName(termName) if rightLeftAssoc == opAssoc.LEFT: if arity == 1: matchExpr = FollowedBy(lastExpr + opExpr) + Group( lastExpr + OneOrMore( opExpr ) ) elif arity == 2: if opExpr is not None: matchExpr = FollowedBy(lastExpr + opExpr + lastExpr) + Group( lastExpr + OneOrMore( opExpr + lastExpr ) ) else: matchExpr = FollowedBy(lastExpr+lastExpr) + Group( lastExpr + OneOrMore(lastExpr) ) elif arity == 3: matchExpr = FollowedBy(lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr) + \ Group( lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr ) else: raise ValueError("operator must be unary (1), binary (2), or ternary (3)") elif rightLeftAssoc == opAssoc.RIGHT: if arity == 1: # try to avoid LR with this extra test if not isinstance(opExpr, Optional): opExpr = Optional(opExpr) matchExpr = FollowedBy(opExpr.expr + thisExpr) + Group( opExpr + thisExpr ) elif arity == 2: if opExpr is not None: matchExpr = FollowedBy(lastExpr + opExpr + thisExpr) + Group( lastExpr + OneOrMore( opExpr + thisExpr ) ) else: matchExpr = FollowedBy(lastExpr + thisExpr) + Group( lastExpr + OneOrMore( thisExpr ) ) elif arity == 3: matchExpr = FollowedBy(lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr) + \ Group( lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr ) else: raise ValueError("operator must be unary (1), binary (2), or ternary (3)") else: raise ValueError("operator must indicate right or left associativity") if pa: matchExpr.setParseAction( pa ) thisExpr <<= ( matchExpr.setName(termName) | lastExpr ) lastExpr = thisExpr ret <<= lastExpr return ret
python
def infixNotation( baseExpr, opList, lpar=Suppress('('), rpar=Suppress(')') ): """Helper method for constructing grammars of expressions made up of operators working in a precedence hierarchy. Operators may be unary or binary, left- or right-associative. Parse actions can also be attached to operator expressions. Parameters: - baseExpr - expression representing the most basic element for the nested - opList - list of tuples, one for each operator precedence level in the expression grammar; each tuple is of the form (opExpr, numTerms, rightLeftAssoc, parseAction), where: - opExpr is the pyparsing expression for the operator; may also be a string, which will be converted to a Literal; if numTerms is 3, opExpr is a tuple of two expressions, for the two operators separating the 3 terms - numTerms is the number of terms for this operator (must be 1, 2, or 3) - rightLeftAssoc is the indicator whether the operator is right or left associative, using the pyparsing-defined constants C{opAssoc.RIGHT} and C{opAssoc.LEFT}. - parseAction is the parse action to be associated with expressions matching this operator expression (the parse action tuple member may be omitted) - lpar - expression for matching left-parentheses (default=Suppress('(')) - rpar - expression for matching right-parentheses (default=Suppress(')')) """ ret = Forward() lastExpr = baseExpr | ( lpar + ret + rpar ) for i,operDef in enumerate(opList): opExpr,arity,rightLeftAssoc,pa = (operDef + (None,))[:4] termName = "%s term" % opExpr if arity < 3 else "%s%s term" % opExpr if arity == 3: if opExpr is None or len(opExpr) != 2: raise ValueError("if numterms=3, opExpr must be a tuple or list of two expressions") opExpr1, opExpr2 = opExpr thisExpr = Forward().setName(termName) if rightLeftAssoc == opAssoc.LEFT: if arity == 1: matchExpr = FollowedBy(lastExpr + opExpr) + Group( lastExpr + OneOrMore( opExpr ) ) elif arity == 2: if opExpr is not None: matchExpr = FollowedBy(lastExpr + opExpr + lastExpr) + Group( lastExpr + OneOrMore( opExpr + lastExpr ) ) else: matchExpr = FollowedBy(lastExpr+lastExpr) + Group( lastExpr + OneOrMore(lastExpr) ) elif arity == 3: matchExpr = FollowedBy(lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr) + \ Group( lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr ) else: raise ValueError("operator must be unary (1), binary (2), or ternary (3)") elif rightLeftAssoc == opAssoc.RIGHT: if arity == 1: # try to avoid LR with this extra test if not isinstance(opExpr, Optional): opExpr = Optional(opExpr) matchExpr = FollowedBy(opExpr.expr + thisExpr) + Group( opExpr + thisExpr ) elif arity == 2: if opExpr is not None: matchExpr = FollowedBy(lastExpr + opExpr + thisExpr) + Group( lastExpr + OneOrMore( opExpr + thisExpr ) ) else: matchExpr = FollowedBy(lastExpr + thisExpr) + Group( lastExpr + OneOrMore( thisExpr ) ) elif arity == 3: matchExpr = FollowedBy(lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr) + \ Group( lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr ) else: raise ValueError("operator must be unary (1), binary (2), or ternary (3)") else: raise ValueError("operator must indicate right or left associativity") if pa: matchExpr.setParseAction( pa ) thisExpr <<= ( matchExpr.setName(termName) | lastExpr ) lastExpr = thisExpr ret <<= lastExpr return ret
[ "def", "infixNotation", "(", "baseExpr", ",", "opList", ",", "lpar", "=", "Suppress", "(", "'('", ")", ",", "rpar", "=", "Suppress", "(", "')'", ")", ")", ":", "ret", "=", "Forward", "(", ")", "lastExpr", "=", "baseExpr", "|", "(", "lpar", "+", "re...
Helper method for constructing grammars of expressions made up of operators working in a precedence hierarchy. Operators may be unary or binary, left- or right-associative. Parse actions can also be attached to operator expressions. Parameters: - baseExpr - expression representing the most basic element for the nested - opList - list of tuples, one for each operator precedence level in the expression grammar; each tuple is of the form (opExpr, numTerms, rightLeftAssoc, parseAction), where: - opExpr is the pyparsing expression for the operator; may also be a string, which will be converted to a Literal; if numTerms is 3, opExpr is a tuple of two expressions, for the two operators separating the 3 terms - numTerms is the number of terms for this operator (must be 1, 2, or 3) - rightLeftAssoc is the indicator whether the operator is right or left associative, using the pyparsing-defined constants C{opAssoc.RIGHT} and C{opAssoc.LEFT}. - parseAction is the parse action to be associated with expressions matching this operator expression (the parse action tuple member may be omitted) - lpar - expression for matching left-parentheses (default=Suppress('(')) - rpar - expression for matching right-parentheses (default=Suppress(')'))
[ "Helper", "method", "for", "constructing", "grammars", "of", "expressions", "made", "up", "of", "operators", "working", "in", "a", "precedence", "hierarchy", ".", "Operators", "may", "be", "unary", "or", "binary", "left", "-", "or", "right", "-", "associative"...
b57cbf8ed7c089672426ede0441e0a4f789ef4a1
https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/contrib/pyparsing.py#L3683-L3755
train
220,897
RedHatInsights/insights-core
examples/cluster_rules/ntp_compare.py
report
def report(shas, meta): """ Cluster rule to compare ntp.conf files across a cluster ``shas`` is a Pandas DataFrame for the facts for each host by the fact ``ntp_sha256``. See https://pandas.pydata.org/pandas-docs/stable/api.html#dataframe for information on available attributes and methods. ``meta`` is a dictionary that contains the information from the cluster topology file provided by the ``-i`` switch. The dictionary keys are the sections, and the values are a list of the host information provided in the toplolgy file. Arguments: shas (pandas.DataFrame): Includes facts from ``ntp_sha256`` fact with column "sha" and one row per host in the cluster. meta (dict): Keys are the sections in the topology file and values are a list of the values in the section. """ num_members = meta.num_members uniq = shas.sha.unique() if len(shas) != num_members or len(uniq) != 1: return make_fail("DISTINCT_NTP_CONFS", confs=len(uniq), nodes=num_members) return make_pass("MATCHING_NTP_CONFS", nodes=meta['nodes'], servers=meta['servers'])
python
def report(shas, meta): """ Cluster rule to compare ntp.conf files across a cluster ``shas`` is a Pandas DataFrame for the facts for each host by the fact ``ntp_sha256``. See https://pandas.pydata.org/pandas-docs/stable/api.html#dataframe for information on available attributes and methods. ``meta`` is a dictionary that contains the information from the cluster topology file provided by the ``-i`` switch. The dictionary keys are the sections, and the values are a list of the host information provided in the toplolgy file. Arguments: shas (pandas.DataFrame): Includes facts from ``ntp_sha256`` fact with column "sha" and one row per host in the cluster. meta (dict): Keys are the sections in the topology file and values are a list of the values in the section. """ num_members = meta.num_members uniq = shas.sha.unique() if len(shas) != num_members or len(uniq) != 1: return make_fail("DISTINCT_NTP_CONFS", confs=len(uniq), nodes=num_members) return make_pass("MATCHING_NTP_CONFS", nodes=meta['nodes'], servers=meta['servers'])
[ "def", "report", "(", "shas", ",", "meta", ")", ":", "num_members", "=", "meta", ".", "num_members", "uniq", "=", "shas", ".", "sha", ".", "unique", "(", ")", "if", "len", "(", "shas", ")", "!=", "num_members", "or", "len", "(", "uniq", ")", "!=", ...
Cluster rule to compare ntp.conf files across a cluster ``shas`` is a Pandas DataFrame for the facts for each host by the fact ``ntp_sha256``. See https://pandas.pydata.org/pandas-docs/stable/api.html#dataframe for information on available attributes and methods. ``meta`` is a dictionary that contains the information from the cluster topology file provided by the ``-i`` switch. The dictionary keys are the sections, and the values are a list of the host information provided in the toplolgy file. Arguments: shas (pandas.DataFrame): Includes facts from ``ntp_sha256`` fact with column "sha" and one row per host in the cluster. meta (dict): Keys are the sections in the topology file and values are a list of the values in the section.
[ "Cluster", "rule", "to", "compare", "ntp", ".", "conf", "files", "across", "a", "cluster" ]
b57cbf8ed7c089672426ede0441e0a4f789ef4a1
https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/examples/cluster_rules/ntp_compare.py#L50-L75
train
220,898
RedHatInsights/insights-core
insights/contrib/soscleaner.py
SOSCleaner._create_ip_report
def _create_ip_report(self): ''' this will take the obfuscated ip and hostname databases and output csv files ''' try: ip_report_name = os.path.join(self.report_dir, "%s-ip.csv" % self.session) self.logger.con_out('Creating IP Report - %s', ip_report_name) ip_report = open(ip_report_name, 'wt') ip_report.write('Obfuscated IP,Original IP\n') for k,v in self.ip_db.items(): ip_report.write('%s,%s\n' %(self._int2ip(k),self._int2ip(v))) ip_report.close() self.logger.info('Completed IP Report') self.ip_report = ip_report_name except Exception as e: # pragma: no cover self.logger.exception(e) raise Exception('CreateReport Error: Error Creating IP Report')
python
def _create_ip_report(self): ''' this will take the obfuscated ip and hostname databases and output csv files ''' try: ip_report_name = os.path.join(self.report_dir, "%s-ip.csv" % self.session) self.logger.con_out('Creating IP Report - %s', ip_report_name) ip_report = open(ip_report_name, 'wt') ip_report.write('Obfuscated IP,Original IP\n') for k,v in self.ip_db.items(): ip_report.write('%s,%s\n' %(self._int2ip(k),self._int2ip(v))) ip_report.close() self.logger.info('Completed IP Report') self.ip_report = ip_report_name except Exception as e: # pragma: no cover self.logger.exception(e) raise Exception('CreateReport Error: Error Creating IP Report')
[ "def", "_create_ip_report", "(", "self", ")", ":", "try", ":", "ip_report_name", "=", "os", ".", "path", ".", "join", "(", "self", ".", "report_dir", ",", "\"%s-ip.csv\"", "%", "self", ".", "session", ")", "self", ".", "logger", ".", "con_out", "(", "'...
this will take the obfuscated ip and hostname databases and output csv files
[ "this", "will", "take", "the", "obfuscated", "ip", "and", "hostname", "databases", "and", "output", "csv", "files" ]
b57cbf8ed7c089672426ede0441e0a4f789ef4a1
https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/contrib/soscleaner.py#L214-L231
train
220,899