repo stringlengths 7 55 | path stringlengths 4 127 | func_name stringlengths 1 88 | original_string stringlengths 75 19.8k | language stringclasses 1 value | code stringlengths 75 19.8k | code_tokens listlengths 20 707 | docstring stringlengths 3 17.3k | docstring_tokens listlengths 3 222 | sha stringlengths 40 40 | url stringlengths 87 242 | partition stringclasses 1 value | idx int64 0 252k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
RedHatInsights/insights-core | insights/contrib/soscleaner.py | SOSCleaner._create_archive | def _create_archive(self):
'''This will create a tar.gz compressed archive of the scrubbed directory'''
try:
self.archive_path = os.path.join(self.report_dir, "%s.tar.gz" % self.session)
self.logger.con_out('Creating SOSCleaner Archive - %s', self.archive_path)
t = tarfile.open(self.archive_path, 'w:gz')
for dirpath, dirnames, filenames in os.walk(self.dir_path):
for f in filenames:
f_full = os.path.join(dirpath, f)
f_archive = f_full.replace(self.report_dir,'')
self.logger.debug('adding %s to %s archive', f_archive, self.archive_path)
t.add(f_full, arcname=f_archive)
except Exception as e: #pragma: no cover
self.logger.exception(e)
raise Exception('CreateArchiveError: Unable to create Archive')
self._clean_up()
self.logger.info('Archiving Complete')
self.logger.con_out('SOSCleaner Complete')
if not self.quiet: # pragma: no cover
t.add(self.logfile, arcname=self.logfile.replace(self.report_dir,''))
t.close() | python | def _create_archive(self):
'''This will create a tar.gz compressed archive of the scrubbed directory'''
try:
self.archive_path = os.path.join(self.report_dir, "%s.tar.gz" % self.session)
self.logger.con_out('Creating SOSCleaner Archive - %s', self.archive_path)
t = tarfile.open(self.archive_path, 'w:gz')
for dirpath, dirnames, filenames in os.walk(self.dir_path):
for f in filenames:
f_full = os.path.join(dirpath, f)
f_archive = f_full.replace(self.report_dir,'')
self.logger.debug('adding %s to %s archive', f_archive, self.archive_path)
t.add(f_full, arcname=f_archive)
except Exception as e: #pragma: no cover
self.logger.exception(e)
raise Exception('CreateArchiveError: Unable to create Archive')
self._clean_up()
self.logger.info('Archiving Complete')
self.logger.con_out('SOSCleaner Complete')
if not self.quiet: # pragma: no cover
t.add(self.logfile, arcname=self.logfile.replace(self.report_dir,''))
t.close() | [
"def",
"_create_archive",
"(",
"self",
")",
":",
"try",
":",
"self",
".",
"archive_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"report_dir",
",",
"\"%s.tar.gz\"",
"%",
"self",
".",
"session",
")",
"self",
".",
"logger",
".",
"con_out"... | This will create a tar.gz compressed archive of the scrubbed directory | [
"This",
"will",
"create",
"a",
"tar",
".",
"gz",
"compressed",
"archive",
"of",
"the",
"scrubbed",
"directory"
] | b57cbf8ed7c089672426ede0441e0a4f789ef4a1 | https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/contrib/soscleaner.py#L313-L334 | train | 220,900 |
RedHatInsights/insights-core | insights/contrib/soscleaner.py | SOSCleaner._clean_up | def _clean_up(self):
'''This will clean up origin directories, etc.'''
self.logger.info('Beginning Clean Up Process')
try:
if self.origin_path:
self.logger.info('Removing Origin Directory - %s', self.origin_path)
shutil.rmtree(self.origin_path)
self.logger.info('Removing Working Directory - %s', self.dir_path)
shutil.rmtree(self.dir_path)
self.logger.info('Clean Up Process Complete')
except Exception as e: #pragma: no cover
self.logger.exception(e) | python | def _clean_up(self):
'''This will clean up origin directories, etc.'''
self.logger.info('Beginning Clean Up Process')
try:
if self.origin_path:
self.logger.info('Removing Origin Directory - %s', self.origin_path)
shutil.rmtree(self.origin_path)
self.logger.info('Removing Working Directory - %s', self.dir_path)
shutil.rmtree(self.dir_path)
self.logger.info('Clean Up Process Complete')
except Exception as e: #pragma: no cover
self.logger.exception(e) | [
"def",
"_clean_up",
"(",
"self",
")",
":",
"self",
".",
"logger",
".",
"info",
"(",
"'Beginning Clean Up Process'",
")",
"try",
":",
"if",
"self",
".",
"origin_path",
":",
"self",
".",
"logger",
".",
"info",
"(",
"'Removing Origin Directory - %s'",
",",
"sel... | This will clean up origin directories, etc. | [
"This",
"will",
"clean",
"up",
"origin",
"directories",
"etc",
"."
] | b57cbf8ed7c089672426ede0441e0a4f789ef4a1 | https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/contrib/soscleaner.py#L336-L347 | train | 220,901 |
RedHatInsights/insights-core | insights/contrib/soscleaner.py | SOSCleaner._hn2db | def _hn2db(self, hn):
'''
This will add a hostname for a hostname for an included domain or return an existing entry
'''
db = self.hn_db
hn_found = False
for k,v in db.items():
if v == hn: #the hostname is in the database
ret_hn = k
hn_found = True
if hn_found:
return ret_hn
else:
self.hostname_count += 1 #we have a new hostname, so we increment the counter to get the host ID number
o_domain = self.root_domain
for od,d in self.dn_db.items():
if d in hn:
o_domain = od
new_hn = "host%s.%s" % (self.hostname_count, o_domain)
self.hn_db[new_hn] = hn
return new_hn | python | def _hn2db(self, hn):
'''
This will add a hostname for a hostname for an included domain or return an existing entry
'''
db = self.hn_db
hn_found = False
for k,v in db.items():
if v == hn: #the hostname is in the database
ret_hn = k
hn_found = True
if hn_found:
return ret_hn
else:
self.hostname_count += 1 #we have a new hostname, so we increment the counter to get the host ID number
o_domain = self.root_domain
for od,d in self.dn_db.items():
if d in hn:
o_domain = od
new_hn = "host%s.%s" % (self.hostname_count, o_domain)
self.hn_db[new_hn] = hn
return new_hn | [
"def",
"_hn2db",
"(",
"self",
",",
"hn",
")",
":",
"db",
"=",
"self",
".",
"hn_db",
"hn_found",
"=",
"False",
"for",
"k",
",",
"v",
"in",
"db",
".",
"items",
"(",
")",
":",
"if",
"v",
"==",
"hn",
":",
"#the hostname is in the database",
"ret_hn",
"... | This will add a hostname for a hostname for an included domain or return an existing entry | [
"This",
"will",
"add",
"a",
"hostname",
"for",
"a",
"hostname",
"for",
"an",
"included",
"domain",
"or",
"return",
"an",
"existing",
"entry"
] | b57cbf8ed7c089672426ede0441e0a4f789ef4a1 | https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/contrib/soscleaner.py#L510-L531 | train | 220,902 |
RedHatInsights/insights-core | insights/contrib/soscleaner.py | SOSCleaner._file_list | def _file_list(self, folder):
'''returns a list of file names in an sosreport directory'''
rtn = []
walk = self._walk_report(folder)
for key,val in walk.items():
for v in val:
x=os.path.join(key,v)
rtn.append(x)
self.file_count = len(rtn) #a count of the files we'll have in the final cleaned sosreport, for reporting
return rtn | python | def _file_list(self, folder):
'''returns a list of file names in an sosreport directory'''
rtn = []
walk = self._walk_report(folder)
for key,val in walk.items():
for v in val:
x=os.path.join(key,v)
rtn.append(x)
self.file_count = len(rtn) #a count of the files we'll have in the final cleaned sosreport, for reporting
return rtn | [
"def",
"_file_list",
"(",
"self",
",",
"folder",
")",
":",
"rtn",
"=",
"[",
"]",
"walk",
"=",
"self",
".",
"_walk_report",
"(",
"folder",
")",
"for",
"key",
",",
"val",
"in",
"walk",
".",
"items",
"(",
")",
":",
"for",
"v",
"in",
"val",
":",
"x... | returns a list of file names in an sosreport directory | [
"returns",
"a",
"list",
"of",
"file",
"names",
"in",
"an",
"sosreport",
"directory"
] | b57cbf8ed7c089672426ede0441e0a4f789ef4a1 | https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/contrib/soscleaner.py#L549-L559 | train | 220,903 |
RedHatInsights/insights-core | insights/contrib/soscleaner.py | SOSCleaner._clean_line | def _clean_line(self, l):
'''this will return a line with obfuscations for all possible variables, hostname, ip, etc.'''
new_line = self._sub_ip(l) # IP substitution
new_line = self._sub_hostname(new_line) # Hostname substitution
new_line = self._sub_keywords(new_line) # Keyword Substitution
return new_line | python | def _clean_line(self, l):
'''this will return a line with obfuscations for all possible variables, hostname, ip, etc.'''
new_line = self._sub_ip(l) # IP substitution
new_line = self._sub_hostname(new_line) # Hostname substitution
new_line = self._sub_keywords(new_line) # Keyword Substitution
return new_line | [
"def",
"_clean_line",
"(",
"self",
",",
"l",
")",
":",
"new_line",
"=",
"self",
".",
"_sub_ip",
"(",
"l",
")",
"# IP substitution",
"new_line",
"=",
"self",
".",
"_sub_hostname",
"(",
"new_line",
")",
"# Hostname substitution",
"new_line",
"=",
"self",
".",
... | this will return a line with obfuscations for all possible variables, hostname, ip, etc. | [
"this",
"will",
"return",
"a",
"line",
"with",
"obfuscations",
"for",
"all",
"possible",
"variables",
"hostname",
"ip",
"etc",
"."
] | b57cbf8ed7c089672426ede0441e0a4f789ef4a1 | https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/contrib/soscleaner.py#L561-L568 | train | 220,904 |
RedHatInsights/insights-core | insights/contrib/soscleaner.py | SOSCleaner._clean_file | def _clean_file(self, f):
'''this will take a given file path, scrub it accordingly, and save a new copy of the file
in the same location'''
if os.path.exists(f) and not os.path.islink(f):
tmp_file = tempfile.TemporaryFile(mode='w+b')
try:
fh = open(f, 'r')
data = fh.readlines()
fh.close()
if len(data) > 0: #if the file isn't empty:
for l in data:
new_l = self._clean_line(l)
tmp_file.write(new_l.encode('utf-8'))
tmp_file.seek(0)
except Exception as e: # pragma: no cover
self.logger.exception(e)
raise Exception("CleanFile Error: Cannot Open File For Reading - %s" % f)
try:
if len(data) > 0:
new_fh = open(f, 'wb')
for line in tmp_file:
new_fh.write(line)
new_fh.close()
except Exception as e: # pragma: no cover
self.logger.exception(e)
raise Exception("CleanFile Error: Cannot Write to New File - %s" % f)
finally:
tmp_file.close() | python | def _clean_file(self, f):
'''this will take a given file path, scrub it accordingly, and save a new copy of the file
in the same location'''
if os.path.exists(f) and not os.path.islink(f):
tmp_file = tempfile.TemporaryFile(mode='w+b')
try:
fh = open(f, 'r')
data = fh.readlines()
fh.close()
if len(data) > 0: #if the file isn't empty:
for l in data:
new_l = self._clean_line(l)
tmp_file.write(new_l.encode('utf-8'))
tmp_file.seek(0)
except Exception as e: # pragma: no cover
self.logger.exception(e)
raise Exception("CleanFile Error: Cannot Open File For Reading - %s" % f)
try:
if len(data) > 0:
new_fh = open(f, 'wb')
for line in tmp_file:
new_fh.write(line)
new_fh.close()
except Exception as e: # pragma: no cover
self.logger.exception(e)
raise Exception("CleanFile Error: Cannot Write to New File - %s" % f)
finally:
tmp_file.close() | [
"def",
"_clean_file",
"(",
"self",
",",
"f",
")",
":",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"f",
")",
"and",
"not",
"os",
".",
"path",
".",
"islink",
"(",
"f",
")",
":",
"tmp_file",
"=",
"tempfile",
".",
"TemporaryFile",
"(",
"mode",
"=",... | this will take a given file path, scrub it accordingly, and save a new copy of the file
in the same location | [
"this",
"will",
"take",
"a",
"given",
"file",
"path",
"scrub",
"it",
"accordingly",
"and",
"save",
"a",
"new",
"copy",
"of",
"the",
"file",
"in",
"the",
"same",
"location"
] | b57cbf8ed7c089672426ede0441e0a4f789ef4a1 | https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/contrib/soscleaner.py#L570-L601 | train | 220,905 |
RedHatInsights/insights-core | insights/contrib/soscleaner.py | SOSCleaner._add_extra_files | def _add_extra_files(self, files):
'''if extra files are to be analyzed with an sosreport, this will add them to the origin path to be analyzed'''
try:
for f in files:
self.logger.con_out("adding additional file for analysis: %s" % f)
fname = os.path.basename(f)
f_new = os.path.join(self.dir_path, fname)
shutil.copyfile(f,f_new)
except IOError as e:
self.logger.con_out("ExtraFileError: %s is not readable or does not exist. Skipping File" % f)
self.logger.exception(e)
pass
except Exception as e: # pragma: no cover
self.logger.exception(e)
raise Exception("ExtraFileError: Unable to Process Extra File - %s" % f) | python | def _add_extra_files(self, files):
'''if extra files are to be analyzed with an sosreport, this will add them to the origin path to be analyzed'''
try:
for f in files:
self.logger.con_out("adding additional file for analysis: %s" % f)
fname = os.path.basename(f)
f_new = os.path.join(self.dir_path, fname)
shutil.copyfile(f,f_new)
except IOError as e:
self.logger.con_out("ExtraFileError: %s is not readable or does not exist. Skipping File" % f)
self.logger.exception(e)
pass
except Exception as e: # pragma: no cover
self.logger.exception(e)
raise Exception("ExtraFileError: Unable to Process Extra File - %s" % f) | [
"def",
"_add_extra_files",
"(",
"self",
",",
"files",
")",
":",
"try",
":",
"for",
"f",
"in",
"files",
":",
"self",
".",
"logger",
".",
"con_out",
"(",
"\"adding additional file for analysis: %s\"",
"%",
"f",
")",
"fname",
"=",
"os",
".",
"path",
".",
"b... | if extra files are to be analyzed with an sosreport, this will add them to the origin path to be analyzed | [
"if",
"extra",
"files",
"are",
"to",
"be",
"analyzed",
"with",
"an",
"sosreport",
"this",
"will",
"add",
"them",
"to",
"the",
"origin",
"path",
"to",
"be",
"analyzed"
] | b57cbf8ed7c089672426ede0441e0a4f789ef4a1 | https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/contrib/soscleaner.py#L603-L618 | train | 220,906 |
RedHatInsights/insights-core | insights/contrib/soscleaner.py | SOSCleaner._clean_files_only | def _clean_files_only(self, files):
''' if a user only wants to process one or more specific files, instead of a full sosreport '''
try:
if not (os.path.exists(self.origin_path)):
self.logger.info("Creating Origin Path - %s" % self.origin_path)
os.makedirs(self.origin_path) # create the origin_path directory
if not (os.path.exists(self.dir_path)):
self.logger.info("Creating Directory Path - %s" % self.dir_path)
os.makedirs(self.dir_path) # create the dir_path directory
self._add_extra_files(files)
except OSError as e: # pragma: no cover
if e.errno == errno.EEXIST:
pass
else: # pragma: no cover
self.logger.exception(e)
raise e
except Exception as e: # pragma: no cover
self.logger.exception(e)
raise Exception("CleanFilesOnlyError: unable to process") | python | def _clean_files_only(self, files):
''' if a user only wants to process one or more specific files, instead of a full sosreport '''
try:
if not (os.path.exists(self.origin_path)):
self.logger.info("Creating Origin Path - %s" % self.origin_path)
os.makedirs(self.origin_path) # create the origin_path directory
if not (os.path.exists(self.dir_path)):
self.logger.info("Creating Directory Path - %s" % self.dir_path)
os.makedirs(self.dir_path) # create the dir_path directory
self._add_extra_files(files)
except OSError as e: # pragma: no cover
if e.errno == errno.EEXIST:
pass
else: # pragma: no cover
self.logger.exception(e)
raise e
except Exception as e: # pragma: no cover
self.logger.exception(e)
raise Exception("CleanFilesOnlyError: unable to process") | [
"def",
"_clean_files_only",
"(",
"self",
",",
"files",
")",
":",
"try",
":",
"if",
"not",
"(",
"os",
".",
"path",
".",
"exists",
"(",
"self",
".",
"origin_path",
")",
")",
":",
"self",
".",
"logger",
".",
"info",
"(",
"\"Creating Origin Path - %s\"",
"... | if a user only wants to process one or more specific files, instead of a full sosreport | [
"if",
"a",
"user",
"only",
"wants",
"to",
"process",
"one",
"or",
"more",
"specific",
"files",
"instead",
"of",
"a",
"full",
"sosreport"
] | b57cbf8ed7c089672426ede0441e0a4f789ef4a1 | https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/contrib/soscleaner.py#L620-L640 | train | 220,907 |
RedHatInsights/insights-core | insights/contrib/soscleaner.py | SOSCleaner.clean_report | def clean_report(self, options, sosreport): # pragma: no cover
'''this is the primary function, to put everything together and analyze an sosreport'''
if options.report_dir: # override the default location for artifacts (/tmp)
if os.path.isdir(options.report_dir):
self.report_dir = options.report_dir
self.origin_path, self.dir_path, self.session, self.logfile, self.uuid = self._prep_environment()
self._start_logging(self.logfile)
self._get_disclaimer()
if options.domains:
self.domains = options.domains
if options.keywords:
self.keywords = options.keywords
self._keywords2db()
if not sosreport:
if not options.files:
raise Exception("Error: You must supply either an sosreport and/or files to process")
self.logger.con_out("No sosreport supplied. Only processing specific files")
self._clean_files_only(options.files)
else: # we DO have an sosreport to analyze
self.report = self._extract_sosreport(sosreport)
self._make_dest_env() # create the working directory
if options.hostname_path:
self.hostname, self.domainname = self._get_hostname(options.hostname_path)
else:
self.hostname, self.domainname = self._get_hostname()
if options.files:
self._add_extra_files(options.files)
if self.hostname: # if we have a hostname that's not a None type
self.hn_db['host0'] = self.hostname # we'll prime the hostname pump to clear out a ton of useless logic later
self._process_hosts_file() # we'll take a dig through the hosts file and make sure it is as scrubbed as possible
self._domains2db()
files = self._file_list(self.dir_path)
self.logger.con_out("IP Obfuscation Start Address - %s", self.start_ip)
self.logger.con_out("*** SOSCleaner Processing ***")
self.logger.info("Working Directory - %s", self.dir_path)
for f in files:
self.logger.debug("Cleaning %s", f)
self._clean_file(f)
self.logger.con_out("*** SOSCleaner Statistics ***")
self.logger.con_out("IP Addresses Obfuscated - %s", len(self.ip_db))
self.logger.con_out("Hostnames Obfuscated - %s" , len(self.hn_db))
self.logger.con_out("Domains Obfuscated - %s" , len(self.dn_db))
self.logger.con_out("Total Files Analyzed - %s", self.file_count)
self.logger.con_out("*** SOSCleaner Artifacts ***")
self._create_reports()
self._create_archive()
return_data = [self.archive_path, self.logfile, self.ip_report]
if self.hostname:
return_data.append(self.hn_report)
if len(self.dn_db) >= 1:
return_data.append(self.dn_report)
return return_data | python | def clean_report(self, options, sosreport): # pragma: no cover
'''this is the primary function, to put everything together and analyze an sosreport'''
if options.report_dir: # override the default location for artifacts (/tmp)
if os.path.isdir(options.report_dir):
self.report_dir = options.report_dir
self.origin_path, self.dir_path, self.session, self.logfile, self.uuid = self._prep_environment()
self._start_logging(self.logfile)
self._get_disclaimer()
if options.domains:
self.domains = options.domains
if options.keywords:
self.keywords = options.keywords
self._keywords2db()
if not sosreport:
if not options.files:
raise Exception("Error: You must supply either an sosreport and/or files to process")
self.logger.con_out("No sosreport supplied. Only processing specific files")
self._clean_files_only(options.files)
else: # we DO have an sosreport to analyze
self.report = self._extract_sosreport(sosreport)
self._make_dest_env() # create the working directory
if options.hostname_path:
self.hostname, self.domainname = self._get_hostname(options.hostname_path)
else:
self.hostname, self.domainname = self._get_hostname()
if options.files:
self._add_extra_files(options.files)
if self.hostname: # if we have a hostname that's not a None type
self.hn_db['host0'] = self.hostname # we'll prime the hostname pump to clear out a ton of useless logic later
self._process_hosts_file() # we'll take a dig through the hosts file and make sure it is as scrubbed as possible
self._domains2db()
files = self._file_list(self.dir_path)
self.logger.con_out("IP Obfuscation Start Address - %s", self.start_ip)
self.logger.con_out("*** SOSCleaner Processing ***")
self.logger.info("Working Directory - %s", self.dir_path)
for f in files:
self.logger.debug("Cleaning %s", f)
self._clean_file(f)
self.logger.con_out("*** SOSCleaner Statistics ***")
self.logger.con_out("IP Addresses Obfuscated - %s", len(self.ip_db))
self.logger.con_out("Hostnames Obfuscated - %s" , len(self.hn_db))
self.logger.con_out("Domains Obfuscated - %s" , len(self.dn_db))
self.logger.con_out("Total Files Analyzed - %s", self.file_count)
self.logger.con_out("*** SOSCleaner Artifacts ***")
self._create_reports()
self._create_archive()
return_data = [self.archive_path, self.logfile, self.ip_report]
if self.hostname:
return_data.append(self.hn_report)
if len(self.dn_db) >= 1:
return_data.append(self.dn_report)
return return_data | [
"def",
"clean_report",
"(",
"self",
",",
"options",
",",
"sosreport",
")",
":",
"# pragma: no cover",
"if",
"options",
".",
"report_dir",
":",
"# override the default location for artifacts (/tmp)",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"options",
".",
"repo... | this is the primary function, to put everything together and analyze an sosreport | [
"this",
"is",
"the",
"primary",
"function",
"to",
"put",
"everything",
"together",
"and",
"analyze",
"an",
"sosreport"
] | b57cbf8ed7c089672426ede0441e0a4f789ef4a1 | https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/contrib/soscleaner.py#L642-L702 | train | 220,908 |
RedHatInsights/insights-core | insights/parsers/docker_list.py | DockerList.parse_content | def parse_content(self, content):
"""
Parse the lines given into a list of dictionaries for each row. This
is stored in the ``rows`` attribute.
If the ``key_field`` property is set, use this to key a ``data``
dictionary attribute.
"""
self.rows = []
if len(content) < 2:
self.no_data = True
return
# Parse header, remembering column numbers for data capture. We use
# a finditer to get the positions, and we find by field rather than
# splitting on three or more spaces because of this.
headers = []
field_re = re.compile(r'\w+(\s\w+)*')
for match in field_re.finditer(content[0]):
headers.append({'name': match.group(), 'start': match.start()})
# Parse the rest of the line. Each field starts at the column
# given by the header and ends with at least three spaces.
for line in content[1:]:
# I think the dictionary comprehension version of this is too
# complicated for words :-)
row = {}
for header in headers:
value = line[header['start']:].split(' ', 1)[0]
if value == '':
value = None
row[header['name']] = value
self.rows.append(row)
# If we have a key_field set, construct a data dictionary on it.
# Note that duplicates will be overwritten, but we ignore '<none>'.
if self.key_field and self.key_field in self.rows[0]:
self.data = {}
for row in self.rows:
k = row[self.key_field]
if k is not None and k != '<none>':
self.data[k] = row | python | def parse_content(self, content):
"""
Parse the lines given into a list of dictionaries for each row. This
is stored in the ``rows`` attribute.
If the ``key_field`` property is set, use this to key a ``data``
dictionary attribute.
"""
self.rows = []
if len(content) < 2:
self.no_data = True
return
# Parse header, remembering column numbers for data capture. We use
# a finditer to get the positions, and we find by field rather than
# splitting on three or more spaces because of this.
headers = []
field_re = re.compile(r'\w+(\s\w+)*')
for match in field_re.finditer(content[0]):
headers.append({'name': match.group(), 'start': match.start()})
# Parse the rest of the line. Each field starts at the column
# given by the header and ends with at least three spaces.
for line in content[1:]:
# I think the dictionary comprehension version of this is too
# complicated for words :-)
row = {}
for header in headers:
value = line[header['start']:].split(' ', 1)[0]
if value == '':
value = None
row[header['name']] = value
self.rows.append(row)
# If we have a key_field set, construct a data dictionary on it.
# Note that duplicates will be overwritten, but we ignore '<none>'.
if self.key_field and self.key_field in self.rows[0]:
self.data = {}
for row in self.rows:
k = row[self.key_field]
if k is not None and k != '<none>':
self.data[k] = row | [
"def",
"parse_content",
"(",
"self",
",",
"content",
")",
":",
"self",
".",
"rows",
"=",
"[",
"]",
"if",
"len",
"(",
"content",
")",
"<",
"2",
":",
"self",
".",
"no_data",
"=",
"True",
"return",
"# Parse header, remembering column numbers for data capture. We... | Parse the lines given into a list of dictionaries for each row. This
is stored in the ``rows`` attribute.
If the ``key_field`` property is set, use this to key a ``data``
dictionary attribute. | [
"Parse",
"the",
"lines",
"given",
"into",
"a",
"list",
"of",
"dictionaries",
"for",
"each",
"row",
".",
"This",
"is",
"stored",
"in",
"the",
"rows",
"attribute",
"."
] | b57cbf8ed7c089672426ede0441e0a4f789ef4a1 | https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/parsers/docker_list.py#L78-L119 | train | 220,909 |
RedHatInsights/insights-core | insights/contrib/ipaddress.py | _IPAddressBase._ip_int_from_prefix | def _ip_int_from_prefix(self, prefixlen=None):
"""Turn the prefix length netmask into a int for comparison.
Args:
prefixlen: An integer, the prefix length.
Returns:
An integer.
"""
if prefixlen is None:
prefixlen = self._prefixlen
return self._ALL_ONES ^ (self._ALL_ONES >> prefixlen) | python | def _ip_int_from_prefix(self, prefixlen=None):
"""Turn the prefix length netmask into a int for comparison.
Args:
prefixlen: An integer, the prefix length.
Returns:
An integer.
"""
if prefixlen is None:
prefixlen = self._prefixlen
return self._ALL_ONES ^ (self._ALL_ONES >> prefixlen) | [
"def",
"_ip_int_from_prefix",
"(",
"self",
",",
"prefixlen",
"=",
"None",
")",
":",
"if",
"prefixlen",
"is",
"None",
":",
"prefixlen",
"=",
"self",
".",
"_prefixlen",
"return",
"self",
".",
"_ALL_ONES",
"^",
"(",
"self",
".",
"_ALL_ONES",
">>",
"prefixlen"... | Turn the prefix length netmask into a int for comparison.
Args:
prefixlen: An integer, the prefix length.
Returns:
An integer. | [
"Turn",
"the",
"prefix",
"length",
"netmask",
"into",
"a",
"int",
"for",
"comparison",
"."
] | b57cbf8ed7c089672426ede0441e0a4f789ef4a1 | https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/contrib/ipaddress.py#L530-L542 | train | 220,910 |
RedHatInsights/insights-core | insights/contrib/ipaddress.py | _IPAddressBase._ip_string_from_prefix | def _ip_string_from_prefix(self, prefixlen=None):
"""Turn a prefix length into a dotted decimal string.
Args:
prefixlen: An integer, the netmask prefix length.
Returns:
A string, the dotted decimal netmask string.
"""
if not prefixlen:
prefixlen = self._prefixlen
return self._string_from_ip_int(self._ip_int_from_prefix(prefixlen)) | python | def _ip_string_from_prefix(self, prefixlen=None):
"""Turn a prefix length into a dotted decimal string.
Args:
prefixlen: An integer, the netmask prefix length.
Returns:
A string, the dotted decimal netmask string.
"""
if not prefixlen:
prefixlen = self._prefixlen
return self._string_from_ip_int(self._ip_int_from_prefix(prefixlen)) | [
"def",
"_ip_string_from_prefix",
"(",
"self",
",",
"prefixlen",
"=",
"None",
")",
":",
"if",
"not",
"prefixlen",
":",
"prefixlen",
"=",
"self",
".",
"_prefixlen",
"return",
"self",
".",
"_string_from_ip_int",
"(",
"self",
".",
"_ip_int_from_prefix",
"(",
"pref... | Turn a prefix length into a dotted decimal string.
Args:
prefixlen: An integer, the netmask prefix length.
Returns:
A string, the dotted decimal netmask string. | [
"Turn",
"a",
"prefix",
"length",
"into",
"a",
"dotted",
"decimal",
"string",
"."
] | b57cbf8ed7c089672426ede0441e0a4f789ef4a1 | https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/contrib/ipaddress.py#L557-L569 | train | 220,911 |
RedHatInsights/insights-core | insights/core/context.py | ExecutionContext.check_output | def check_output(self, cmd, timeout=None, keep_rc=False, env=None):
""" Subclasses can override to provide special
environment setup, command prefixes, etc.
"""
return subproc.call(cmd, timeout=timeout or self.timeout,
keep_rc=keep_rc, env=env) | python | def check_output(self, cmd, timeout=None, keep_rc=False, env=None):
""" Subclasses can override to provide special
environment setup, command prefixes, etc.
"""
return subproc.call(cmd, timeout=timeout or self.timeout,
keep_rc=keep_rc, env=env) | [
"def",
"check_output",
"(",
"self",
",",
"cmd",
",",
"timeout",
"=",
"None",
",",
"keep_rc",
"=",
"False",
",",
"env",
"=",
"None",
")",
":",
"return",
"subproc",
".",
"call",
"(",
"cmd",
",",
"timeout",
"=",
"timeout",
"or",
"self",
".",
"timeout",
... | Subclasses can override to provide special
environment setup, command prefixes, etc. | [
"Subclasses",
"can",
"override",
"to",
"provide",
"special",
"environment",
"setup",
"command",
"prefixes",
"etc",
"."
] | b57cbf8ed7c089672426ede0441e0a4f789ef4a1 | https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/core/context.py#L132-L137 | train | 220,912 |
RedHatInsights/insights-core | insights/client/archive.py | InsightsArchive.create_archive_dir | def create_archive_dir(self):
"""
Create the archive dir
"""
archive_dir = os.path.join(self.tmp_dir, self.archive_name)
os.makedirs(archive_dir, 0o700)
return archive_dir | python | def create_archive_dir(self):
"""
Create the archive dir
"""
archive_dir = os.path.join(self.tmp_dir, self.archive_name)
os.makedirs(archive_dir, 0o700)
return archive_dir | [
"def",
"create_archive_dir",
"(",
"self",
")",
":",
"archive_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"tmp_dir",
",",
"self",
".",
"archive_name",
")",
"os",
".",
"makedirs",
"(",
"archive_dir",
",",
"0o700",
")",
"return",
"archive_d... | Create the archive dir | [
"Create",
"the",
"archive",
"dir"
] | b57cbf8ed7c089672426ede0441e0a4f789ef4a1 | https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/client/archive.py#L41-L47 | train | 220,913 |
RedHatInsights/insights-core | insights/client/archive.py | InsightsArchive.create_command_dir | def create_command_dir(self):
"""
Create the "sos_commands" dir
"""
cmd_dir = os.path.join(self.archive_dir, "insights_commands")
os.makedirs(cmd_dir, 0o700)
return cmd_dir | python | def create_command_dir(self):
"""
Create the "sos_commands" dir
"""
cmd_dir = os.path.join(self.archive_dir, "insights_commands")
os.makedirs(cmd_dir, 0o700)
return cmd_dir | [
"def",
"create_command_dir",
"(",
"self",
")",
":",
"cmd_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"archive_dir",
",",
"\"insights_commands\"",
")",
"os",
".",
"makedirs",
"(",
"cmd_dir",
",",
"0o700",
")",
"return",
"cmd_dir"
] | Create the "sos_commands" dir | [
"Create",
"the",
"sos_commands",
"dir"
] | b57cbf8ed7c089672426ede0441e0a4f789ef4a1 | https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/client/archive.py#L49-L55 | train | 220,914 |
RedHatInsights/insights-core | insights/client/archive.py | InsightsArchive.get_full_archive_path | def get_full_archive_path(self, path):
"""
Returns the full archive path
"""
return os.path.join(self.archive_dir, path.lstrip('/')) | python | def get_full_archive_path(self, path):
"""
Returns the full archive path
"""
return os.path.join(self.archive_dir, path.lstrip('/')) | [
"def",
"get_full_archive_path",
"(",
"self",
",",
"path",
")",
":",
"return",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"archive_dir",
",",
"path",
".",
"lstrip",
"(",
"'/'",
")",
")"
] | Returns the full archive path | [
"Returns",
"the",
"full",
"archive",
"path"
] | b57cbf8ed7c089672426ede0441e0a4f789ef4a1 | https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/client/archive.py#L57-L61 | train | 220,915 |
RedHatInsights/insights-core | insights/client/archive.py | InsightsArchive._copy_file | def _copy_file(self, path):
"""
Copy just a single file
"""
full_path = self.get_full_archive_path(path)
# Try to make the dir, eat exception if it fails
try:
os.makedirs(os.path.dirname(full_path))
except OSError:
pass
logger.debug("Copying %s to %s", path, full_path)
shutil.copyfile(path, full_path)
return path | python | def _copy_file(self, path):
"""
Copy just a single file
"""
full_path = self.get_full_archive_path(path)
# Try to make the dir, eat exception if it fails
try:
os.makedirs(os.path.dirname(full_path))
except OSError:
pass
logger.debug("Copying %s to %s", path, full_path)
shutil.copyfile(path, full_path)
return path | [
"def",
"_copy_file",
"(",
"self",
",",
"path",
")",
":",
"full_path",
"=",
"self",
".",
"get_full_archive_path",
"(",
"path",
")",
"# Try to make the dir, eat exception if it fails",
"try",
":",
"os",
".",
"makedirs",
"(",
"os",
".",
"path",
".",
"dirname",
"(... | Copy just a single file | [
"Copy",
"just",
"a",
"single",
"file"
] | b57cbf8ed7c089672426ede0441e0a4f789ef4a1 | https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/client/archive.py#L63-L75 | train | 220,916 |
RedHatInsights/insights-core | insights/client/archive.py | InsightsArchive.copy_file | def copy_file(self, path):
"""
Copy a single file or regex, creating the necessary directories
"""
if "*" in path:
paths = _expand_paths(path)
if paths:
for path in paths:
self._copy_file(path)
else:
if os.path.isfile(path):
return self._copy_file(path)
else:
logger.debug("File %s does not exist", path)
return False | python | def copy_file(self, path):
"""
Copy a single file or regex, creating the necessary directories
"""
if "*" in path:
paths = _expand_paths(path)
if paths:
for path in paths:
self._copy_file(path)
else:
if os.path.isfile(path):
return self._copy_file(path)
else:
logger.debug("File %s does not exist", path)
return False | [
"def",
"copy_file",
"(",
"self",
",",
"path",
")",
":",
"if",
"\"*\"",
"in",
"path",
":",
"paths",
"=",
"_expand_paths",
"(",
"path",
")",
"if",
"paths",
":",
"for",
"path",
"in",
"paths",
":",
"self",
".",
"_copy_file",
"(",
"path",
")",
"else",
"... | Copy a single file or regex, creating the necessary directories | [
"Copy",
"a",
"single",
"file",
"or",
"regex",
"creating",
"the",
"necessary",
"directories"
] | b57cbf8ed7c089672426ede0441e0a4f789ef4a1 | https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/client/archive.py#L77-L91 | train | 220,917 |
RedHatInsights/insights-core | insights/client/archive.py | InsightsArchive.copy_dir | def copy_dir(self, path):
"""
Recursively copy directory
"""
for directory in path:
if os.path.isdir(path):
full_path = os.path.join(self.archive_dir, directory.lstrip('/'))
logger.debug("Copying %s to %s", directory, full_path)
shutil.copytree(directory, full_path)
else:
logger.debug("Not a directory: %s", directory)
return path | python | def copy_dir(self, path):
"""
Recursively copy directory
"""
for directory in path:
if os.path.isdir(path):
full_path = os.path.join(self.archive_dir, directory.lstrip('/'))
logger.debug("Copying %s to %s", directory, full_path)
shutil.copytree(directory, full_path)
else:
logger.debug("Not a directory: %s", directory)
return path | [
"def",
"copy_dir",
"(",
"self",
",",
"path",
")",
":",
"for",
"directory",
"in",
"path",
":",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"path",
")",
":",
"full_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"archive_dir",
",",
"d... | Recursively copy directory | [
"Recursively",
"copy",
"directory"
] | b57cbf8ed7c089672426ede0441e0a4f789ef4a1 | https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/client/archive.py#L93-L104 | train | 220,918 |
RedHatInsights/insights-core | insights/client/archive.py | InsightsArchive.create_tar_file | def create_tar_file(self, full_archive=False):
"""
Create tar file to be compressed
"""
tar_file_name = os.path.join(self.archive_tmp_dir, self.archive_name)
ext = "" if self.compressor == "none" else ".%s" % self.compressor
tar_file_name = tar_file_name + ".tar" + ext
logger.debug("Tar File: " + tar_file_name)
subprocess.call(shlex.split("tar c%sfS %s -C %s ." % (
self.get_compression_flag(self.compressor),
tar_file_name,
# for the docker "uber archive,"use archive_dir
# rather than tmp_dir for all the files we tar,
# because all the individual archives are in there
self.tmp_dir if not full_archive else self.archive_dir)),
stderr=subprocess.PIPE)
self.delete_archive_dir()
logger.debug("Tar File Size: %s", str(os.path.getsize(tar_file_name)))
return tar_file_name | python | def create_tar_file(self, full_archive=False):
"""
Create tar file to be compressed
"""
tar_file_name = os.path.join(self.archive_tmp_dir, self.archive_name)
ext = "" if self.compressor == "none" else ".%s" % self.compressor
tar_file_name = tar_file_name + ".tar" + ext
logger.debug("Tar File: " + tar_file_name)
subprocess.call(shlex.split("tar c%sfS %s -C %s ." % (
self.get_compression_flag(self.compressor),
tar_file_name,
# for the docker "uber archive,"use archive_dir
# rather than tmp_dir for all the files we tar,
# because all the individual archives are in there
self.tmp_dir if not full_archive else self.archive_dir)),
stderr=subprocess.PIPE)
self.delete_archive_dir()
logger.debug("Tar File Size: %s", str(os.path.getsize(tar_file_name)))
return tar_file_name | [
"def",
"create_tar_file",
"(",
"self",
",",
"full_archive",
"=",
"False",
")",
":",
"tar_file_name",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"archive_tmp_dir",
",",
"self",
".",
"archive_name",
")",
"ext",
"=",
"\"\"",
"if",
"self",
".",
... | Create tar file to be compressed | [
"Create",
"tar",
"file",
"to",
"be",
"compressed"
] | b57cbf8ed7c089672426ede0441e0a4f789ef4a1 | https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/client/archive.py#L114-L132 | train | 220,919 |
RedHatInsights/insights-core | insights/client/archive.py | InsightsArchive.delete_tmp_dir | def delete_tmp_dir(self):
"""
Delete the entire tmp dir
"""
logger.debug("Deleting: " + self.tmp_dir)
shutil.rmtree(self.tmp_dir, True) | python | def delete_tmp_dir(self):
"""
Delete the entire tmp dir
"""
logger.debug("Deleting: " + self.tmp_dir)
shutil.rmtree(self.tmp_dir, True) | [
"def",
"delete_tmp_dir",
"(",
"self",
")",
":",
"logger",
".",
"debug",
"(",
"\"Deleting: \"",
"+",
"self",
".",
"tmp_dir",
")",
"shutil",
".",
"rmtree",
"(",
"self",
".",
"tmp_dir",
",",
"True",
")"
] | Delete the entire tmp dir | [
"Delete",
"the",
"entire",
"tmp",
"dir"
] | b57cbf8ed7c089672426ede0441e0a4f789ef4a1 | https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/client/archive.py#L134-L139 | train | 220,920 |
RedHatInsights/insights-core | insights/client/archive.py | InsightsArchive.delete_archive_dir | def delete_archive_dir(self):
"""
Delete the entire archive dir
"""
logger.debug("Deleting: " + self.archive_dir)
shutil.rmtree(self.archive_dir, True) | python | def delete_archive_dir(self):
"""
Delete the entire archive dir
"""
logger.debug("Deleting: " + self.archive_dir)
shutil.rmtree(self.archive_dir, True) | [
"def",
"delete_archive_dir",
"(",
"self",
")",
":",
"logger",
".",
"debug",
"(",
"\"Deleting: \"",
"+",
"self",
".",
"archive_dir",
")",
"shutil",
".",
"rmtree",
"(",
"self",
".",
"archive_dir",
",",
"True",
")"
] | Delete the entire archive dir | [
"Delete",
"the",
"entire",
"archive",
"dir"
] | b57cbf8ed7c089672426ede0441e0a4f789ef4a1 | https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/client/archive.py#L141-L146 | train | 220,921 |
RedHatInsights/insights-core | insights/client/archive.py | InsightsArchive.delete_archive_file | def delete_archive_file(self):
"""
Delete the directory containing the constructed archive
"""
logger.debug("Deleting %s", self.archive_tmp_dir)
shutil.rmtree(self.archive_tmp_dir, True) | python | def delete_archive_file(self):
"""
Delete the directory containing the constructed archive
"""
logger.debug("Deleting %s", self.archive_tmp_dir)
shutil.rmtree(self.archive_tmp_dir, True) | [
"def",
"delete_archive_file",
"(",
"self",
")",
":",
"logger",
".",
"debug",
"(",
"\"Deleting %s\"",
",",
"self",
".",
"archive_tmp_dir",
")",
"shutil",
".",
"rmtree",
"(",
"self",
".",
"archive_tmp_dir",
",",
"True",
")"
] | Delete the directory containing the constructed archive | [
"Delete",
"the",
"directory",
"containing",
"the",
"constructed",
"archive"
] | b57cbf8ed7c089672426ede0441e0a4f789ef4a1 | https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/client/archive.py#L148-L153 | train | 220,922 |
RedHatInsights/insights-core | insights/client/archive.py | InsightsArchive.add_metadata_to_archive | def add_metadata_to_archive(self, metadata, meta_path):
'''
Add metadata to archive
'''
archive_path = self.get_full_archive_path(meta_path.lstrip('/'))
write_data_to_file(metadata, archive_path) | python | def add_metadata_to_archive(self, metadata, meta_path):
'''
Add metadata to archive
'''
archive_path = self.get_full_archive_path(meta_path.lstrip('/'))
write_data_to_file(metadata, archive_path) | [
"def",
"add_metadata_to_archive",
"(",
"self",
",",
"metadata",
",",
"meta_path",
")",
":",
"archive_path",
"=",
"self",
".",
"get_full_archive_path",
"(",
"meta_path",
".",
"lstrip",
"(",
"'/'",
")",
")",
"write_data_to_file",
"(",
"metadata",
",",
"archive_pat... | Add metadata to archive | [
"Add",
"metadata",
"to",
"archive"
] | b57cbf8ed7c089672426ede0441e0a4f789ef4a1 | https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/client/archive.py#L168-L173 | train | 220,923 |
RedHatInsights/insights-core | insights/core/remote_resource.py | RemoteResource.get | def get(self, url, params={}, headers={}, auth=(), certificate_path=None):
"""
Returns the response payload from the request to the given URL.
Args:
url (str): The URL for the WEB API that the request is being made too.
params (dict): Dictionary containing the query string parameters.
headers (dict): HTTP Headers that may be needed for the request.
auth (tuple): User ID and password for Basic Auth
certificate_path (str): Path to the ssl certificate.
Returns:
response: (HttpResponse): Response object from requests.get api request
"""
certificate_path = certificate_path if certificate_path else False
return self.session.get(url, params=params, headers=headers, verify=certificate_path, auth=auth,
timeout=self.timeout) | python | def get(self, url, params={}, headers={}, auth=(), certificate_path=None):
"""
Returns the response payload from the request to the given URL.
Args:
url (str): The URL for the WEB API that the request is being made too.
params (dict): Dictionary containing the query string parameters.
headers (dict): HTTP Headers that may be needed for the request.
auth (tuple): User ID and password for Basic Auth
certificate_path (str): Path to the ssl certificate.
Returns:
response: (HttpResponse): Response object from requests.get api request
"""
certificate_path = certificate_path if certificate_path else False
return self.session.get(url, params=params, headers=headers, verify=certificate_path, auth=auth,
timeout=self.timeout) | [
"def",
"get",
"(",
"self",
",",
"url",
",",
"params",
"=",
"{",
"}",
",",
"headers",
"=",
"{",
"}",
",",
"auth",
"=",
"(",
")",
",",
"certificate_path",
"=",
"None",
")",
":",
"certificate_path",
"=",
"certificate_path",
"if",
"certificate_path",
"else... | Returns the response payload from the request to the given URL.
Args:
url (str): The URL for the WEB API that the request is being made too.
params (dict): Dictionary containing the query string parameters.
headers (dict): HTTP Headers that may be needed for the request.
auth (tuple): User ID and password for Basic Auth
certificate_path (str): Path to the ssl certificate.
Returns:
response: (HttpResponse): Response object from requests.get api request | [
"Returns",
"the",
"response",
"payload",
"from",
"the",
"request",
"to",
"the",
"given",
"URL",
"."
] | b57cbf8ed7c089672426ede0441e0a4f789ef4a1 | https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/core/remote_resource.py#L34-L51 | train | 220,924 |
RedHatInsights/insights-core | insights/core/remote_resource.py | DefaultHeuristic.update_headers | def update_headers(self, response):
"""
Returns the updated caching headers.
Args:
response (HttpResponse): The response from the remote service
Returns:
response:(HttpResponse.Headers): Http caching headers
"""
if 'expires' in response.headers and 'cache-control' in response.headers:
self.msg = self.server_cache_headers
return response.headers
else:
self.msg = self.default_cache_vars
date = parsedate(response.headers['date'])
expires = datetime(*date[:6]) + timedelta(0, self.expire_after)
response.headers.update({'expires': formatdate(calendar.timegm(expires.timetuple())),
'cache-control': 'public'})
return response.headers | python | def update_headers(self, response):
"""
Returns the updated caching headers.
Args:
response (HttpResponse): The response from the remote service
Returns:
response:(HttpResponse.Headers): Http caching headers
"""
if 'expires' in response.headers and 'cache-control' in response.headers:
self.msg = self.server_cache_headers
return response.headers
else:
self.msg = self.default_cache_vars
date = parsedate(response.headers['date'])
expires = datetime(*date[:6]) + timedelta(0, self.expire_after)
response.headers.update({'expires': formatdate(calendar.timegm(expires.timetuple())),
'cache-control': 'public'})
return response.headers | [
"def",
"update_headers",
"(",
"self",
",",
"response",
")",
":",
"if",
"'expires'",
"in",
"response",
".",
"headers",
"and",
"'cache-control'",
"in",
"response",
".",
"headers",
":",
"self",
".",
"msg",
"=",
"self",
".",
"server_cache_headers",
"return",
"re... | Returns the updated caching headers.
Args:
response (HttpResponse): The response from the remote service
Returns:
response:(HttpResponse.Headers): Http caching headers | [
"Returns",
"the",
"updated",
"caching",
"headers",
"."
] | b57cbf8ed7c089672426ede0441e0a4f789ef4a1 | https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/core/remote_resource.py#L119-L138 | train | 220,925 |
RedHatInsights/insights-core | insights/combiners/hostname.py | hostname | def hostname(hn, ft, si):
"""Check hostname, facter and systemid to get the fqdn, hostname and domain.
Prefer hostname to facter and systemid.
Returns:
insights.combiners.hostname.Hostname: A named tuple with `fqdn`,
`hostname` and `domain` components.
Raises:
Exception: If no hostname can be found in any of the three parsers.
"""
if not hn or not hn.fqdn:
hn = ft
if hn and hn.fqdn:
fqdn = hn.fqdn
hostname = hn.hostname if hn.hostname else fqdn.split(".")[0]
domain = hn.domain if hn.domain else ".".join(fqdn.split(".")[1:])
return Hostname(fqdn, hostname, domain)
else:
fqdn = si.get("profile_name") if si else None
if fqdn:
hostname = fqdn.split(".")[0]
domain = ".".join(fqdn.split(".")[1:])
return Hostname(fqdn, hostname, domain)
raise Exception("Unable to get hostname.") | python | def hostname(hn, ft, si):
"""Check hostname, facter and systemid to get the fqdn, hostname and domain.
Prefer hostname to facter and systemid.
Returns:
insights.combiners.hostname.Hostname: A named tuple with `fqdn`,
`hostname` and `domain` components.
Raises:
Exception: If no hostname can be found in any of the three parsers.
"""
if not hn or not hn.fqdn:
hn = ft
if hn and hn.fqdn:
fqdn = hn.fqdn
hostname = hn.hostname if hn.hostname else fqdn.split(".")[0]
domain = hn.domain if hn.domain else ".".join(fqdn.split(".")[1:])
return Hostname(fqdn, hostname, domain)
else:
fqdn = si.get("profile_name") if si else None
if fqdn:
hostname = fqdn.split(".")[0]
domain = ".".join(fqdn.split(".")[1:])
return Hostname(fqdn, hostname, domain)
raise Exception("Unable to get hostname.") | [
"def",
"hostname",
"(",
"hn",
",",
"ft",
",",
"si",
")",
":",
"if",
"not",
"hn",
"or",
"not",
"hn",
".",
"fqdn",
":",
"hn",
"=",
"ft",
"if",
"hn",
"and",
"hn",
".",
"fqdn",
":",
"fqdn",
"=",
"hn",
".",
"fqdn",
"hostname",
"=",
"hn",
".",
"h... | Check hostname, facter and systemid to get the fqdn, hostname and domain.
Prefer hostname to facter and systemid.
Returns:
insights.combiners.hostname.Hostname: A named tuple with `fqdn`,
`hostname` and `domain` components.
Raises:
Exception: If no hostname can be found in any of the three parsers. | [
"Check",
"hostname",
"facter",
"and",
"systemid",
"to",
"get",
"the",
"fqdn",
"hostname",
"and",
"domain",
"."
] | b57cbf8ed7c089672426ede0441e0a4f789ef4a1 | https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/combiners/hostname.py#L44-L72 | train | 220,926 |
RedHatInsights/insights-core | insights/configtree/iniconfig.py | parse_doc | def parse_doc(f, ctx=None, overwrite=False):
""" Accepts an open file or a list of lines. """
lg = LineGetter(f, comment_marker=("#", ";"), strip=False)
cfg = ConfigParser(ctx).parse_doc(lg)
set_defaults(cfg)
if overwrite:
squash(cfg)
return cfg | python | def parse_doc(f, ctx=None, overwrite=False):
""" Accepts an open file or a list of lines. """
lg = LineGetter(f, comment_marker=("#", ";"), strip=False)
cfg = ConfigParser(ctx).parse_doc(lg)
set_defaults(cfg)
if overwrite:
squash(cfg)
return cfg | [
"def",
"parse_doc",
"(",
"f",
",",
"ctx",
"=",
"None",
",",
"overwrite",
"=",
"False",
")",
":",
"lg",
"=",
"LineGetter",
"(",
"f",
",",
"comment_marker",
"=",
"(",
"\"#\"",
",",
"\";\"",
")",
",",
"strip",
"=",
"False",
")",
"cfg",
"=",
"ConfigPar... | Accepts an open file or a list of lines. | [
"Accepts",
"an",
"open",
"file",
"or",
"a",
"list",
"of",
"lines",
"."
] | b57cbf8ed7c089672426ede0441e0a4f789ef4a1 | https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/configtree/iniconfig.py#L82-L89 | train | 220,927 |
RedHatInsights/insights-core | insights/parsers/uname.py | pad_release | def pad_release(release_to_pad, num_sections=4):
'''
Pad out package and kernel release versions so that
``LooseVersion`` comparisons will be correct.
Release versions with less than num_sections will
be padded in front of the last section with zeros.
For example ::
pad_release("390.el6", 4)
will return ``390.0.0.el6`` and ::
pad_release("390.11.el6", 4)
will return ``390.11.0.el6``.
If the number of sections of the release to be padded is
greater than num_sections, a ``ValueError`` will be raised.
'''
parts = release_to_pad.split('.')
if len(parts) > num_sections:
raise ValueError("Too many sections encountered ({found} > {num} in release string {rel}".format(
found=len(parts), num=num_sections, rel=release_to_pad
))
pad_count = num_sections - len(parts)
return ".".join(parts[:-1] + ['0'] * pad_count + parts[-1:]) | python | def pad_release(release_to_pad, num_sections=4):
'''
Pad out package and kernel release versions so that
``LooseVersion`` comparisons will be correct.
Release versions with less than num_sections will
be padded in front of the last section with zeros.
For example ::
pad_release("390.el6", 4)
will return ``390.0.0.el6`` and ::
pad_release("390.11.el6", 4)
will return ``390.11.0.el6``.
If the number of sections of the release to be padded is
greater than num_sections, a ``ValueError`` will be raised.
'''
parts = release_to_pad.split('.')
if len(parts) > num_sections:
raise ValueError("Too many sections encountered ({found} > {num} in release string {rel}".format(
found=len(parts), num=num_sections, rel=release_to_pad
))
pad_count = num_sections - len(parts)
return ".".join(parts[:-1] + ['0'] * pad_count + parts[-1:]) | [
"def",
"pad_release",
"(",
"release_to_pad",
",",
"num_sections",
"=",
"4",
")",
":",
"parts",
"=",
"release_to_pad",
".",
"split",
"(",
"'.'",
")",
"if",
"len",
"(",
"parts",
")",
">",
"num_sections",
":",
"raise",
"ValueError",
"(",
"\"Too many sections en... | Pad out package and kernel release versions so that
``LooseVersion`` comparisons will be correct.
Release versions with less than num_sections will
be padded in front of the last section with zeros.
For example ::
pad_release("390.el6", 4)
will return ``390.0.0.el6`` and ::
pad_release("390.11.el6", 4)
will return ``390.11.0.el6``.
If the number of sections of the release to be padded is
greater than num_sections, a ``ValueError`` will be raised. | [
"Pad",
"out",
"package",
"and",
"kernel",
"release",
"versions",
"so",
"that",
"LooseVersion",
"comparisons",
"will",
"be",
"correct",
"."
] | b57cbf8ed7c089672426ede0441e0a4f789ef4a1 | https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/parsers/uname.py#L537-L566 | train | 220,928 |
RedHatInsights/insights-core | insights/parsers/alternatives.py | AlternativesOutput.parse_content | def parse_content(self, content):
"""
Parse the output of the ``alternatives`` command.
"""
self.program = None
self.status = None
self.link = None
self.best = None
self.paths = []
current_path = None
# Set up instance variable
for line in content:
words = line.split(None)
if ' - status is' in line:
# alternatives only displays one program, so finding
# this line again is an error.
if self.program:
raise ParseException(
"Program line for {newprog} found in output for {oldprog}".format(
newprog=words[0], oldprog=self.program
)
)
# Set up new program data
self.program = words[0]
self.status = words[4][:-1] # remove trailing .
self.alternatives = []
current_path = {}
elif not self.program:
# Lines before 'status is' line are ignored
continue
elif line.startswith(' link currently points to ') and len(words) == 5:
# line: ' link currently points to /usr/lib/jvm/java-1.8.0-openjdk-1.8.0.111-1.b15.el7_2.x86_64/jre/bin/java'
self.link = words[4]
elif ' - priority ' in line and len(words) == 4 and words[3].isdigit():
# line: /usr/lib/jvm/jre-1.6.0-ibm.x86_64/bin/java - priority 16091
# New path - save current path if set
self.paths.append({
'path': words[0],
'priority': int(words[3]),
'slave': {},
})
current_path = self.paths[-1]
elif line.startswith(' slave ') and len(words) == 3 and current_path:
# line: ' slave ControlPanel: /usr/lib/jvm/jre-1.6.0-ibm.x86_64/bin/ControlPanel'
current_path['slave'][words[1][:-1]] = words[2] # remove final : from program
elif line.startswith("Current `best' version is ") and len(words) == 5:
# line: 'Current `best' version is /usr/lib/jvm/jre-1.6.0-ibm.x86_64/bin/java.'
self.best = words[4][:-1] | python | def parse_content(self, content):
"""
Parse the output of the ``alternatives`` command.
"""
self.program = None
self.status = None
self.link = None
self.best = None
self.paths = []
current_path = None
# Set up instance variable
for line in content:
words = line.split(None)
if ' - status is' in line:
# alternatives only displays one program, so finding
# this line again is an error.
if self.program:
raise ParseException(
"Program line for {newprog} found in output for {oldprog}".format(
newprog=words[0], oldprog=self.program
)
)
# Set up new program data
self.program = words[0]
self.status = words[4][:-1] # remove trailing .
self.alternatives = []
current_path = {}
elif not self.program:
# Lines before 'status is' line are ignored
continue
elif line.startswith(' link currently points to ') and len(words) == 5:
# line: ' link currently points to /usr/lib/jvm/java-1.8.0-openjdk-1.8.0.111-1.b15.el7_2.x86_64/jre/bin/java'
self.link = words[4]
elif ' - priority ' in line and len(words) == 4 and words[3].isdigit():
# line: /usr/lib/jvm/jre-1.6.0-ibm.x86_64/bin/java - priority 16091
# New path - save current path if set
self.paths.append({
'path': words[0],
'priority': int(words[3]),
'slave': {},
})
current_path = self.paths[-1]
elif line.startswith(' slave ') and len(words) == 3 and current_path:
# line: ' slave ControlPanel: /usr/lib/jvm/jre-1.6.0-ibm.x86_64/bin/ControlPanel'
current_path['slave'][words[1][:-1]] = words[2] # remove final : from program
elif line.startswith("Current `best' version is ") and len(words) == 5:
# line: 'Current `best' version is /usr/lib/jvm/jre-1.6.0-ibm.x86_64/bin/java.'
self.best = words[4][:-1] | [
"def",
"parse_content",
"(",
"self",
",",
"content",
")",
":",
"self",
".",
"program",
"=",
"None",
"self",
".",
"status",
"=",
"None",
"self",
".",
"link",
"=",
"None",
"self",
".",
"best",
"=",
"None",
"self",
".",
"paths",
"=",
"[",
"]",
"curren... | Parse the output of the ``alternatives`` command. | [
"Parse",
"the",
"output",
"of",
"the",
"alternatives",
"command",
"."
] | b57cbf8ed7c089672426ede0441e0a4f789ef4a1 | https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/parsers/alternatives.py#L79-L127 | train | 220,929 |
RedHatInsights/insights-core | insights/core/__init__.py | CommandParser.validate_lines | def validate_lines(results, bad_lines):
"""
If `results` contains a single line and that line is included
in the `bad_lines` list, this function returns `False`. If no bad
line is found the function returns `True`
Parameters:
results(str): The results string of the output from the command
defined by the command spec.
Returns:
(Boolean): True for no bad lines or False for bad line found.
"""
if results and len(results) == 1:
first = results[0]
if any(l in first.lower() for l in bad_lines):
return False
return True | python | def validate_lines(results, bad_lines):
"""
If `results` contains a single line and that line is included
in the `bad_lines` list, this function returns `False`. If no bad
line is found the function returns `True`
Parameters:
results(str): The results string of the output from the command
defined by the command spec.
Returns:
(Boolean): True for no bad lines or False for bad line found.
"""
if results and len(results) == 1:
first = results[0]
if any(l in first.lower() for l in bad_lines):
return False
return True | [
"def",
"validate_lines",
"(",
"results",
",",
"bad_lines",
")",
":",
"if",
"results",
"and",
"len",
"(",
"results",
")",
"==",
"1",
":",
"first",
"=",
"results",
"[",
"0",
"]",
"if",
"any",
"(",
"l",
"in",
"first",
".",
"lower",
"(",
")",
"for",
... | If `results` contains a single line and that line is included
in the `bad_lines` list, this function returns `False`. If no bad
line is found the function returns `True`
Parameters:
results(str): The results string of the output from the command
defined by the command spec.
Returns:
(Boolean): True for no bad lines or False for bad line found. | [
"If",
"results",
"contains",
"a",
"single",
"line",
"and",
"that",
"line",
"is",
"included",
"in",
"the",
"bad_lines",
"list",
"this",
"function",
"returns",
"False",
".",
"If",
"no",
"bad",
"line",
"is",
"found",
"the",
"function",
"returns",
"True"
] | b57cbf8ed7c089672426ede0441e0a4f789ef4a1 | https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/core/__init__.py#L528-L546 | train | 220,930 |
RedHatInsights/insights-core | insights/core/__init__.py | Scannable._scan | def _scan(cls, result_key, scanner):
"""
Registers a `scanner` which is a function that will be called once per
logical line in a document. A scanners job is to evaluate the content
of the line and set a so-called `result_key` on the class to be
retrieved later by a rule.
"""
if result_key in cls.scanner_keys:
raise ValueError("'%s' is already a registered scanner key" % result_key)
cls.scanners.append(scanner)
cls.scanner_keys.add(result_key) | python | def _scan(cls, result_key, scanner):
"""
Registers a `scanner` which is a function that will be called once per
logical line in a document. A scanners job is to evaluate the content
of the line and set a so-called `result_key` on the class to be
retrieved later by a rule.
"""
if result_key in cls.scanner_keys:
raise ValueError("'%s' is already a registered scanner key" % result_key)
cls.scanners.append(scanner)
cls.scanner_keys.add(result_key) | [
"def",
"_scan",
"(",
"cls",
",",
"result_key",
",",
"scanner",
")",
":",
"if",
"result_key",
"in",
"cls",
".",
"scanner_keys",
":",
"raise",
"ValueError",
"(",
"\"'%s' is already a registered scanner key\"",
"%",
"result_key",
")",
"cls",
".",
"scanners",
".",
... | Registers a `scanner` which is a function that will be called once per
logical line in a document. A scanners job is to evaluate the content
of the line and set a so-called `result_key` on the class to be
retrieved later by a rule. | [
"Registers",
"a",
"scanner",
"which",
"is",
"a",
"function",
"that",
"will",
"be",
"called",
"once",
"per",
"logical",
"line",
"in",
"a",
"document",
".",
"A",
"scanners",
"job",
"is",
"to",
"evaluate",
"the",
"content",
"of",
"the",
"line",
"and",
"set"... | b57cbf8ed7c089672426ede0441e0a4f789ef4a1 | https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/core/__init__.py#L790-L802 | train | 220,931 |
RedHatInsights/insights-core | insights/core/__init__.py | Scannable.any | def any(cls, result_key, func):
"""
Sets the `result_key` to the output of `func` if `func` ever returns
truthy
"""
def scanner(self, obj):
current_value = getattr(self, result_key, None)
setattr(self, result_key, current_value or func(obj))
cls._scan(result_key, scanner) | python | def any(cls, result_key, func):
"""
Sets the `result_key` to the output of `func` if `func` ever returns
truthy
"""
def scanner(self, obj):
current_value = getattr(self, result_key, None)
setattr(self, result_key, current_value or func(obj))
cls._scan(result_key, scanner) | [
"def",
"any",
"(",
"cls",
",",
"result_key",
",",
"func",
")",
":",
"def",
"scanner",
"(",
"self",
",",
"obj",
")",
":",
"current_value",
"=",
"getattr",
"(",
"self",
",",
"result_key",
",",
"None",
")",
"setattr",
"(",
"self",
",",
"result_key",
","... | Sets the `result_key` to the output of `func` if `func` ever returns
truthy | [
"Sets",
"the",
"result_key",
"to",
"the",
"output",
"of",
"func",
"if",
"func",
"ever",
"returns",
"truthy"
] | b57cbf8ed7c089672426ede0441e0a4f789ef4a1 | https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/core/__init__.py#L805-L814 | train | 220,932 |
RedHatInsights/insights-core | insights/core/__init__.py | LogFileOutput.parse_content | def parse_content(self, content):
"""
Use all the defined scanners to search the log file, setting the
properties defined in the scanner.
"""
self.lines = content
for scanner in self.scanners:
scanner(self) | python | def parse_content(self, content):
"""
Use all the defined scanners to search the log file, setting the
properties defined in the scanner.
"""
self.lines = content
for scanner in self.scanners:
scanner(self) | [
"def",
"parse_content",
"(",
"self",
",",
"content",
")",
":",
"self",
".",
"lines",
"=",
"content",
"for",
"scanner",
"in",
"self",
".",
"scanners",
":",
"scanner",
"(",
"self",
")"
] | Use all the defined scanners to search the log file, setting the
properties defined in the scanner. | [
"Use",
"all",
"the",
"defined",
"scanners",
"to",
"search",
"the",
"log",
"file",
"setting",
"the",
"properties",
"defined",
"in",
"the",
"scanner",
"."
] | b57cbf8ed7c089672426ede0441e0a4f789ef4a1 | https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/core/__init__.py#L902-L909 | train | 220,933 |
RedHatInsights/insights-core | insights/core/__init__.py | LogFileOutput._valid_search | def _valid_search(self, s):
"""
Check this given `s`, it must be a string or a list of strings.
Otherwise, a TypeError will be raised.
"""
if isinstance(s, six.string_types):
return lambda l: s in l
elif (isinstance(s, list) and len(s) > 0 and
all(isinstance(w, six.string_types) for w in s)):
return lambda l: all(w in l for w in s)
elif s is not None:
raise TypeError('Search items must be given as a string or a list of strings') | python | def _valid_search(self, s):
"""
Check this given `s`, it must be a string or a list of strings.
Otherwise, a TypeError will be raised.
"""
if isinstance(s, six.string_types):
return lambda l: s in l
elif (isinstance(s, list) and len(s) > 0 and
all(isinstance(w, six.string_types) for w in s)):
return lambda l: all(w in l for w in s)
elif s is not None:
raise TypeError('Search items must be given as a string or a list of strings') | [
"def",
"_valid_search",
"(",
"self",
",",
"s",
")",
":",
"if",
"isinstance",
"(",
"s",
",",
"six",
".",
"string_types",
")",
":",
"return",
"lambda",
"l",
":",
"s",
"in",
"l",
"elif",
"(",
"isinstance",
"(",
"s",
",",
"list",
")",
"and",
"len",
"... | Check this given `s`, it must be a string or a list of strings.
Otherwise, a TypeError will be raised. | [
"Check",
"this",
"given",
"s",
"it",
"must",
"be",
"a",
"string",
"or",
"a",
"list",
"of",
"strings",
".",
"Otherwise",
"a",
"TypeError",
"will",
"be",
"raised",
"."
] | b57cbf8ed7c089672426ede0441e0a4f789ef4a1 | https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/core/__init__.py#L925-L936 | train | 220,934 |
RedHatInsights/insights-core | insights/core/__init__.py | LogFileOutput.get | def get(self, s):
"""
Returns all lines that contain `s` anywhere and wrap them in a list of
dictionaries. `s` can be either a single string or a string list. For
list, all keywords in the list must be found in each line.
Parameters:
s(str or list): one or more strings to search for.
Returns:
(list): list of dictionaries corresponding to the parsed lines
contain the `s`.
"""
ret = []
search_by_expression = self._valid_search(s)
for l in self.lines:
if search_by_expression(l):
ret.append(self._parse_line(l))
return ret | python | def get(self, s):
"""
Returns all lines that contain `s` anywhere and wrap them in a list of
dictionaries. `s` can be either a single string or a string list. For
list, all keywords in the list must be found in each line.
Parameters:
s(str or list): one or more strings to search for.
Returns:
(list): list of dictionaries corresponding to the parsed lines
contain the `s`.
"""
ret = []
search_by_expression = self._valid_search(s)
for l in self.lines:
if search_by_expression(l):
ret.append(self._parse_line(l))
return ret | [
"def",
"get",
"(",
"self",
",",
"s",
")",
":",
"ret",
"=",
"[",
"]",
"search_by_expression",
"=",
"self",
".",
"_valid_search",
"(",
"s",
")",
"for",
"l",
"in",
"self",
".",
"lines",
":",
"if",
"search_by_expression",
"(",
"l",
")",
":",
"ret",
"."... | Returns all lines that contain `s` anywhere and wrap them in a list of
dictionaries. `s` can be either a single string or a string list. For
list, all keywords in the list must be found in each line.
Parameters:
s(str or list): one or more strings to search for.
Returns:
(list): list of dictionaries corresponding to the parsed lines
contain the `s`. | [
"Returns",
"all",
"lines",
"that",
"contain",
"s",
"anywhere",
"and",
"wrap",
"them",
"in",
"a",
"list",
"of",
"dictionaries",
".",
"s",
"can",
"be",
"either",
"a",
"single",
"string",
"or",
"a",
"string",
"list",
".",
"For",
"list",
"all",
"keywords",
... | b57cbf8ed7c089672426ede0441e0a4f789ef4a1 | https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/core/__init__.py#L938-L956 | train | 220,935 |
RedHatInsights/insights-core | insights/core/__init__.py | LogFileOutput.scan | def scan(cls, result_key, func):
"""
Define computed fields based on a string to "grep for". This is
preferred to utilizing raw log lines in plugins because computed fields
will be serialized, whereas raw log lines will not.
"""
if result_key in cls.scanner_keys:
raise ValueError("'%s' is already a registered scanner key" % result_key)
def scanner(self):
result = func(self)
setattr(self, result_key, result)
cls.scanners.append(scanner)
cls.scanner_keys.add(result_key) | python | def scan(cls, result_key, func):
"""
Define computed fields based on a string to "grep for". This is
preferred to utilizing raw log lines in plugins because computed fields
will be serialized, whereas raw log lines will not.
"""
if result_key in cls.scanner_keys:
raise ValueError("'%s' is already a registered scanner key" % result_key)
def scanner(self):
result = func(self)
setattr(self, result_key, result)
cls.scanners.append(scanner)
cls.scanner_keys.add(result_key) | [
"def",
"scan",
"(",
"cls",
",",
"result_key",
",",
"func",
")",
":",
"if",
"result_key",
"in",
"cls",
".",
"scanner_keys",
":",
"raise",
"ValueError",
"(",
"\"'%s' is already a registered scanner key\"",
"%",
"result_key",
")",
"def",
"scanner",
"(",
"self",
"... | Define computed fields based on a string to "grep for". This is
preferred to utilizing raw log lines in plugins because computed fields
will be serialized, whereas raw log lines will not. | [
"Define",
"computed",
"fields",
"based",
"on",
"a",
"string",
"to",
"grep",
"for",
".",
"This",
"is",
"preferred",
"to",
"utilizing",
"raw",
"log",
"lines",
"in",
"plugins",
"because",
"computed",
"fields",
"will",
"be",
"serialized",
"whereas",
"raw",
"log"... | b57cbf8ed7c089672426ede0441e0a4f789ef4a1 | https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/core/__init__.py#L959-L974 | train | 220,936 |
RedHatInsights/insights-core | insights/core/__init__.py | LogFileOutput.keep_scan | def keep_scan(cls, result_key, token):
"""
Define a property that is set to the list of lines that contain the
given token. Uses the get method of the log file.
"""
def _scan(self):
return self.get(token)
cls.scan(result_key, _scan) | python | def keep_scan(cls, result_key, token):
"""
Define a property that is set to the list of lines that contain the
given token. Uses the get method of the log file.
"""
def _scan(self):
return self.get(token)
cls.scan(result_key, _scan) | [
"def",
"keep_scan",
"(",
"cls",
",",
"result_key",
",",
"token",
")",
":",
"def",
"_scan",
"(",
"self",
")",
":",
"return",
"self",
".",
"get",
"(",
"token",
")",
"cls",
".",
"scan",
"(",
"result_key",
",",
"_scan",
")"
] | Define a property that is set to the list of lines that contain the
given token. Uses the get method of the log file. | [
"Define",
"a",
"property",
"that",
"is",
"set",
"to",
"the",
"list",
"of",
"lines",
"that",
"contain",
"the",
"given",
"token",
".",
"Uses",
"the",
"get",
"method",
"of",
"the",
"log",
"file",
"."
] | b57cbf8ed7c089672426ede0441e0a4f789ef4a1 | https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/core/__init__.py#L988-L996 | train | 220,937 |
RedHatInsights/insights-core | insights/core/__init__.py | IniConfigFile.parse_content | def parse_content(self, content, allow_no_value=False):
"""Parses content of the config file.
In child class overload and call super to set flag
``allow_no_values`` and allow keys with no value in
config file::
def parse_content(self, content):
super(YourClass, self).parse_content(content,
allow_no_values=True)
"""
super(IniConfigFile, self).parse_content(content)
config = RawConfigParser(allow_no_value=allow_no_value)
fp = io.StringIO(u"\n".join(content))
config.readfp(fp, filename=self.file_name)
self.data = config | python | def parse_content(self, content, allow_no_value=False):
"""Parses content of the config file.
In child class overload and call super to set flag
``allow_no_values`` and allow keys with no value in
config file::
def parse_content(self, content):
super(YourClass, self).parse_content(content,
allow_no_values=True)
"""
super(IniConfigFile, self).parse_content(content)
config = RawConfigParser(allow_no_value=allow_no_value)
fp = io.StringIO(u"\n".join(content))
config.readfp(fp, filename=self.file_name)
self.data = config | [
"def",
"parse_content",
"(",
"self",
",",
"content",
",",
"allow_no_value",
"=",
"False",
")",
":",
"super",
"(",
"IniConfigFile",
",",
"self",
")",
".",
"parse_content",
"(",
"content",
")",
"config",
"=",
"RawConfigParser",
"(",
"allow_no_value",
"=",
"all... | Parses content of the config file.
In child class overload and call super to set flag
``allow_no_values`` and allow keys with no value in
config file::
def parse_content(self, content):
super(YourClass, self).parse_content(content,
allow_no_values=True) | [
"Parses",
"content",
"of",
"the",
"config",
"file",
"."
] | b57cbf8ed7c089672426ede0441e0a4f789ef4a1 | https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/core/__init__.py#L1315-L1330 | train | 220,938 |
RedHatInsights/insights-core | insights/core/__init__.py | FileListing.path_entry | def path_entry(self, path):
"""
The parsed data given a path, which is separated into its directory
and entry name.
"""
if path[0] != '/':
return None
path_parts = path.split('/')
# Note that here the first element will be '' because it's before the
# first separator. That's OK, the join puts it back together.
directory = '/'.join(path_parts[:-1])
name = path_parts[-1]
if directory not in self.listings:
return None
if name not in self.listings[directory]['entries']:
return None
return self.listings[directory]['entries'][name] | python | def path_entry(self, path):
"""
The parsed data given a path, which is separated into its directory
and entry name.
"""
if path[0] != '/':
return None
path_parts = path.split('/')
# Note that here the first element will be '' because it's before the
# first separator. That's OK, the join puts it back together.
directory = '/'.join(path_parts[:-1])
name = path_parts[-1]
if directory not in self.listings:
return None
if name not in self.listings[directory]['entries']:
return None
return self.listings[directory]['entries'][name] | [
"def",
"path_entry",
"(",
"self",
",",
"path",
")",
":",
"if",
"path",
"[",
"0",
"]",
"!=",
"'/'",
":",
"return",
"None",
"path_parts",
"=",
"path",
".",
"split",
"(",
"'/'",
")",
"# Note that here the first element will be '' because it's before the",
"# first ... | The parsed data given a path, which is separated into its directory
and entry name. | [
"The",
"parsed",
"data",
"given",
"a",
"path",
"which",
"is",
"separated",
"into",
"its",
"directory",
"and",
"entry",
"name",
"."
] | b57cbf8ed7c089672426ede0441e0a4f789ef4a1 | https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/core/__init__.py#L1525-L1541 | train | 220,939 |
RedHatInsights/insights-core | insights/client/util.py | image_by_name | def image_by_name(img_name, images=None):
"""
Returns a list of image data for images which match img_name. Will
optionally take a list of images from a docker.Client.images
query to avoid multiple docker queries.
"""
i_reg, i_rep, i_tag = _decompose(img_name)
# Correct for bash-style matching expressions.
if not i_reg:
i_reg = '*'
if not i_tag:
i_tag = '*'
# If the images were not passed in, go get them.
if images is None:
c = docker.Client(**kwargs_from_env())
images = c.images(all=False)
valid_images = []
for i in images:
for t in i['RepoTags']:
reg, rep, tag = _decompose(t)
if matches(reg, i_reg) \
and matches(rep, i_rep) \
and matches(tag, i_tag):
valid_images.append(i)
break
# Some repo after decompose end up with the img_name
# at the end. i.e. rhel7/rsyslog
if rep.endswith(img_name):
valid_images.append(i)
break
return valid_images | python | def image_by_name(img_name, images=None):
"""
Returns a list of image data for images which match img_name. Will
optionally take a list of images from a docker.Client.images
query to avoid multiple docker queries.
"""
i_reg, i_rep, i_tag = _decompose(img_name)
# Correct for bash-style matching expressions.
if not i_reg:
i_reg = '*'
if not i_tag:
i_tag = '*'
# If the images were not passed in, go get them.
if images is None:
c = docker.Client(**kwargs_from_env())
images = c.images(all=False)
valid_images = []
for i in images:
for t in i['RepoTags']:
reg, rep, tag = _decompose(t)
if matches(reg, i_reg) \
and matches(rep, i_rep) \
and matches(tag, i_tag):
valid_images.append(i)
break
# Some repo after decompose end up with the img_name
# at the end. i.e. rhel7/rsyslog
if rep.endswith(img_name):
valid_images.append(i)
break
return valid_images | [
"def",
"image_by_name",
"(",
"img_name",
",",
"images",
"=",
"None",
")",
":",
"i_reg",
",",
"i_rep",
",",
"i_tag",
"=",
"_decompose",
"(",
"img_name",
")",
"# Correct for bash-style matching expressions.",
"if",
"not",
"i_reg",
":",
"i_reg",
"=",
"'*'",
"if",... | Returns a list of image data for images which match img_name. Will
optionally take a list of images from a docker.Client.images
query to avoid multiple docker queries. | [
"Returns",
"a",
"list",
"of",
"image",
"data",
"for",
"images",
"which",
"match",
"img_name",
".",
"Will",
"optionally",
"take",
"a",
"list",
"of",
"images",
"from",
"a",
"docker",
".",
"Client",
".",
"images",
"query",
"to",
"avoid",
"multiple",
"docker",... | b57cbf8ed7c089672426ede0441e0a4f789ef4a1 | https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/client/util.py#L31-L64 | train | 220,940 |
RedHatInsights/insights-core | insights/client/util.py | subp | def subp(cmd):
"""
Run a command as a subprocess.
Return a triple of return code, standard out, standard err.
"""
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = proc.communicate()
return ReturnTuple(proc.returncode, stdout=out, stderr=err) | python | def subp(cmd):
"""
Run a command as a subprocess.
Return a triple of return code, standard out, standard err.
"""
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = proc.communicate()
return ReturnTuple(proc.returncode, stdout=out, stderr=err) | [
"def",
"subp",
"(",
"cmd",
")",
":",
"proc",
"=",
"subprocess",
".",
"Popen",
"(",
"cmd",
",",
"stdout",
"=",
"subprocess",
".",
"PIPE",
",",
"stderr",
"=",
"subprocess",
".",
"PIPE",
")",
"out",
",",
"err",
"=",
"proc",
".",
"communicate",
"(",
")... | Run a command as a subprocess.
Return a triple of return code, standard out, standard err. | [
"Run",
"a",
"command",
"as",
"a",
"subprocess",
".",
"Return",
"a",
"triple",
"of",
"return",
"code",
"standard",
"out",
"standard",
"err",
"."
] | b57cbf8ed7c089672426ede0441e0a4f789ef4a1 | https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/client/util.py#L67-L75 | train | 220,941 |
RedHatInsights/insights-core | insights/client/util.py | print_scan_summary | def print_scan_summary(json_data, names=None):
'''
Print a summary of the data returned from a
CVE scan.
'''
max_col_width = 50
min_width = 15
def _max_width(data):
max_name = 0
for name in data:
max_name = len(data[name]) if len(data[name]) > max_name \
else max_name
# If the max name length is less that max_width
if max_name < min_width:
max_name = min_width
# If the man name is greater than the max col leng
# we wish to use
if max_name > max_col_width:
max_name = max_col_width
return max_name
clean = True
if len(names) > 0:
max_width = _max_width(names)
else:
max_width = min_width
template = "{0:" + str(max_width) + "} {1:5} {2:5} {3:5} {4:5}"
sevs = ['critical', 'important', 'moderate', 'low']
writeOut(template.format("Container/Image", "Cri", "Imp", "Med", "Low"))
writeOut(template.format("-" * max_width, "---", "---", "---", "---"))
res_summary = json_data['results_summary']
for image in res_summary.keys():
image_res = res_summary[image]
if 'msg' in image_res.keys():
tmp_tuple = (image_res['msg'], "", "", "", "")
else:
if len(names) < 1:
image_name = image[:max_width]
else:
image_name = names[image][-max_width:]
if len(image_name) == max_col_width:
image_name = '...' + image_name[-(len(image_name) - 3):]
tmp_tuple = tuple([image_name] +
[str(image_res[sev]) for sev in sevs])
sev_results = [image_res[sev] for sev in
sevs if image_res[sev] > 0]
if len(sev_results) > 0:
clean = False
writeOut(template.format(*tmp_tuple))
writeOut("")
return clean | python | def print_scan_summary(json_data, names=None):
'''
Print a summary of the data returned from a
CVE scan.
'''
max_col_width = 50
min_width = 15
def _max_width(data):
max_name = 0
for name in data:
max_name = len(data[name]) if len(data[name]) > max_name \
else max_name
# If the max name length is less that max_width
if max_name < min_width:
max_name = min_width
# If the man name is greater than the max col leng
# we wish to use
if max_name > max_col_width:
max_name = max_col_width
return max_name
clean = True
if len(names) > 0:
max_width = _max_width(names)
else:
max_width = min_width
template = "{0:" + str(max_width) + "} {1:5} {2:5} {3:5} {4:5}"
sevs = ['critical', 'important', 'moderate', 'low']
writeOut(template.format("Container/Image", "Cri", "Imp", "Med", "Low"))
writeOut(template.format("-" * max_width, "---", "---", "---", "---"))
res_summary = json_data['results_summary']
for image in res_summary.keys():
image_res = res_summary[image]
if 'msg' in image_res.keys():
tmp_tuple = (image_res['msg'], "", "", "", "")
else:
if len(names) < 1:
image_name = image[:max_width]
else:
image_name = names[image][-max_width:]
if len(image_name) == max_col_width:
image_name = '...' + image_name[-(len(image_name) - 3):]
tmp_tuple = tuple([image_name] +
[str(image_res[sev]) for sev in sevs])
sev_results = [image_res[sev] for sev in
sevs if image_res[sev] > 0]
if len(sev_results) > 0:
clean = False
writeOut(template.format(*tmp_tuple))
writeOut("")
return clean | [
"def",
"print_scan_summary",
"(",
"json_data",
",",
"names",
"=",
"None",
")",
":",
"max_col_width",
"=",
"50",
"min_width",
"=",
"15",
"def",
"_max_width",
"(",
"data",
")",
":",
"max_name",
"=",
"0",
"for",
"name",
"in",
"data",
":",
"max_name",
"=",
... | Print a summary of the data returned from a
CVE scan. | [
"Print",
"a",
"summary",
"of",
"the",
"data",
"returned",
"from",
"a",
"CVE",
"scan",
"."
] | b57cbf8ed7c089672426ede0441e0a4f789ef4a1 | https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/client/util.py#L98-L153 | train | 220,942 |
RedHatInsights/insights-core | insights/client/util.py | print_detail_scan_summary | def print_detail_scan_summary(json_data, names=None):
'''
Print a detailed summary of the data returned from
a CVE scan.
'''
clean = True
sevs = ['Critical', 'Important', 'Moderate', 'Low']
cve_summary = json_data['host_results']
image_template = " {0:10}: {1}"
cve_template = " {0:10}: {1}"
for image in cve_summary.keys():
image_res = cve_summary[image]
writeOut("")
writeOut(image[:12])
if not image_res['isRHEL']:
writeOut(image_template.format("Result",
"Not based on Red Hat"
"Enterprise Linux"))
continue
else:
writeOut(image_template.format("OS", image_res['os'].rstrip()))
scan_results = image_res['cve_summary']['scan_results']
for sev in sevs:
if sev in scan_results:
clean = False
writeOut(image_template.format(sev,
str(scan_results[sev]['num'])))
for cve in scan_results[sev]['cves']:
writeOut(cve_template.format("CVE", cve['cve_title']))
writeOut(cve_template.format("CVE URL",
cve['cve_ref_url']))
writeOut(cve_template.format("RHSA ID",
cve['rhsa_ref_id']))
writeOut(cve_template.format("RHSA URL",
cve['rhsa_ref_url']))
writeOut("")
return clean | python | def print_detail_scan_summary(json_data, names=None):
'''
Print a detailed summary of the data returned from
a CVE scan.
'''
clean = True
sevs = ['Critical', 'Important', 'Moderate', 'Low']
cve_summary = json_data['host_results']
image_template = " {0:10}: {1}"
cve_template = " {0:10}: {1}"
for image in cve_summary.keys():
image_res = cve_summary[image]
writeOut("")
writeOut(image[:12])
if not image_res['isRHEL']:
writeOut(image_template.format("Result",
"Not based on Red Hat"
"Enterprise Linux"))
continue
else:
writeOut(image_template.format("OS", image_res['os'].rstrip()))
scan_results = image_res['cve_summary']['scan_results']
for sev in sevs:
if sev in scan_results:
clean = False
writeOut(image_template.format(sev,
str(scan_results[sev]['num'])))
for cve in scan_results[sev]['cves']:
writeOut(cve_template.format("CVE", cve['cve_title']))
writeOut(cve_template.format("CVE URL",
cve['cve_ref_url']))
writeOut(cve_template.format("RHSA ID",
cve['rhsa_ref_id']))
writeOut(cve_template.format("RHSA URL",
cve['rhsa_ref_url']))
writeOut("")
return clean | [
"def",
"print_detail_scan_summary",
"(",
"json_data",
",",
"names",
"=",
"None",
")",
":",
"clean",
"=",
"True",
"sevs",
"=",
"[",
"'Critical'",
",",
"'Important'",
",",
"'Moderate'",
",",
"'Low'",
"]",
"cve_summary",
"=",
"json_data",
"[",
"'host_results'",
... | Print a detailed summary of the data returned from
a CVE scan. | [
"Print",
"a",
"detailed",
"summary",
"of",
"the",
"data",
"returned",
"from",
"a",
"CVE",
"scan",
"."
] | b57cbf8ed7c089672426ede0441e0a4f789ef4a1 | https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/client/util.py#L156-L193 | train | 220,943 |
RedHatInsights/insights-core | insights/util/subproc.py | call | def call(cmd,
timeout=None,
signum=signal.SIGKILL,
keep_rc=False,
encoding="utf-8",
env=os.environ):
"""
Execute a cmd or list of commands with an optional timeout in seconds.
If `timeout` is supplied and expires, the process is killed with
SIGKILL (kill -9) and an exception is raised. Otherwise, the command
output is returned.
Parameters
----------
cmd: str or [[str]]
The command(s) to execute
timeout: int
Seconds before kill is issued to the process
signum: int
The signal number to issue to the process on timeout
keep_rc: bool
Whether to return the exit code along with the output
encoding: str
unicode decoding scheme to use. Default is "utf-8"
env: dict
The environment in which to execute commands. Default is os.environ
Returns
-------
str
Content of stdout of cmd on success.
Raises
------
CalledProcessError
Raised when cmd fails
"""
if not isinstance(cmd, list):
cmd = [cmd]
p = Pipeline(*cmd, timeout=timeout, signum=signum, env=env)
res = p(keep_rc=keep_rc)
if keep_rc:
rc, output = res
output = output.decode(encoding, 'ignore')
return rc, output
return res.decode(encoding, "ignore") | python | def call(cmd,
timeout=None,
signum=signal.SIGKILL,
keep_rc=False,
encoding="utf-8",
env=os.environ):
"""
Execute a cmd or list of commands with an optional timeout in seconds.
If `timeout` is supplied and expires, the process is killed with
SIGKILL (kill -9) and an exception is raised. Otherwise, the command
output is returned.
Parameters
----------
cmd: str or [[str]]
The command(s) to execute
timeout: int
Seconds before kill is issued to the process
signum: int
The signal number to issue to the process on timeout
keep_rc: bool
Whether to return the exit code along with the output
encoding: str
unicode decoding scheme to use. Default is "utf-8"
env: dict
The environment in which to execute commands. Default is os.environ
Returns
-------
str
Content of stdout of cmd on success.
Raises
------
CalledProcessError
Raised when cmd fails
"""
if not isinstance(cmd, list):
cmd = [cmd]
p = Pipeline(*cmd, timeout=timeout, signum=signum, env=env)
res = p(keep_rc=keep_rc)
if keep_rc:
rc, output = res
output = output.decode(encoding, 'ignore')
return rc, output
return res.decode(encoding, "ignore") | [
"def",
"call",
"(",
"cmd",
",",
"timeout",
"=",
"None",
",",
"signum",
"=",
"signal",
".",
"SIGKILL",
",",
"keep_rc",
"=",
"False",
",",
"encoding",
"=",
"\"utf-8\"",
",",
"env",
"=",
"os",
".",
"environ",
")",
":",
"if",
"not",
"isinstance",
"(",
... | Execute a cmd or list of commands with an optional timeout in seconds.
If `timeout` is supplied and expires, the process is killed with
SIGKILL (kill -9) and an exception is raised. Otherwise, the command
output is returned.
Parameters
----------
cmd: str or [[str]]
The command(s) to execute
timeout: int
Seconds before kill is issued to the process
signum: int
The signal number to issue to the process on timeout
keep_rc: bool
Whether to return the exit code along with the output
encoding: str
unicode decoding scheme to use. Default is "utf-8"
env: dict
The environment in which to execute commands. Default is os.environ
Returns
-------
str
Content of stdout of cmd on success.
Raises
------
CalledProcessError
Raised when cmd fails | [
"Execute",
"a",
"cmd",
"or",
"list",
"of",
"commands",
"with",
"an",
"optional",
"timeout",
"in",
"seconds",
"."
] | b57cbf8ed7c089672426ede0441e0a4f789ef4a1 | https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/util/subproc.py#L165-L214 | train | 220,944 |
RedHatInsights/insights-core | insights/util/subproc.py | Pipeline.write | def write(self, output, mode="w", keep_rc=False):
"""
Executes the pipeline and writes the results to the supplied output.
If output is a filename and the file didn't already exist before trying
to write, the file will be removed if an exception is raised.
Args:
output (str or file like object): will create a new file of this
name or overwrite an existing file. If output is already a file
like object, it is used.
mode (str): mode to use when creating or opening the provided file
name if it is a string. Ignored if output is a file like object.
Returns:
The final output of the pipeline.
Raises:
CalledProcessError if any return code in the pipeline is nonzero.
"""
if isinstance(output, six.string_types):
already_exists = os.path.exists(output)
try:
with open(output, mode) as f:
p = self._build_pipes(f)
rc = p.wait()
if keep_rc:
return rc
if rc:
raise CalledProcessError(rc, self.cmds[0], "")
except BaseException as be:
if not already_exists and os.path.exists(output):
os.remove(output)
six.reraise(be.__class__, be, sys.exc_info()[2])
else:
p = self._build_pipes(output)
rc = p.wait()
if keep_rc:
return rc
if rc:
raise CalledProcessError(rc, self.cmds[0], "") | python | def write(self, output, mode="w", keep_rc=False):
"""
Executes the pipeline and writes the results to the supplied output.
If output is a filename and the file didn't already exist before trying
to write, the file will be removed if an exception is raised.
Args:
output (str or file like object): will create a new file of this
name or overwrite an existing file. If output is already a file
like object, it is used.
mode (str): mode to use when creating or opening the provided file
name if it is a string. Ignored if output is a file like object.
Returns:
The final output of the pipeline.
Raises:
CalledProcessError if any return code in the pipeline is nonzero.
"""
if isinstance(output, six.string_types):
already_exists = os.path.exists(output)
try:
with open(output, mode) as f:
p = self._build_pipes(f)
rc = p.wait()
if keep_rc:
return rc
if rc:
raise CalledProcessError(rc, self.cmds[0], "")
except BaseException as be:
if not already_exists and os.path.exists(output):
os.remove(output)
six.reraise(be.__class__, be, sys.exc_info()[2])
else:
p = self._build_pipes(output)
rc = p.wait()
if keep_rc:
return rc
if rc:
raise CalledProcessError(rc, self.cmds[0], "") | [
"def",
"write",
"(",
"self",
",",
"output",
",",
"mode",
"=",
"\"w\"",
",",
"keep_rc",
"=",
"False",
")",
":",
"if",
"isinstance",
"(",
"output",
",",
"six",
".",
"string_types",
")",
":",
"already_exists",
"=",
"os",
".",
"path",
".",
"exists",
"(",... | Executes the pipeline and writes the results to the supplied output.
If output is a filename and the file didn't already exist before trying
to write, the file will be removed if an exception is raised.
Args:
output (str or file like object): will create a new file of this
name or overwrite an existing file. If output is already a file
like object, it is used.
mode (str): mode to use when creating or opening the provided file
name if it is a string. Ignored if output is a file like object.
Returns:
The final output of the pipeline.
Raises:
CalledProcessError if any return code in the pipeline is nonzero. | [
"Executes",
"the",
"pipeline",
"and",
"writes",
"the",
"results",
"to",
"the",
"supplied",
"output",
".",
"If",
"output",
"is",
"a",
"filename",
"and",
"the",
"file",
"didn",
"t",
"already",
"exist",
"before",
"trying",
"to",
"write",
"the",
"file",
"will"... | b57cbf8ed7c089672426ede0441e0a4f789ef4a1 | https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/util/subproc.py#L124-L162 | train | 220,945 |
RedHatInsights/insights-core | insights/core/plugins.py | Response.validate_kwargs | def validate_kwargs(self, kwargs):
"""
Validates expected subclass attributes and constructor keyword
arguments.
"""
if not self.response_type:
msg = "response_type must be set on the Response subclass."
raise ValidationException(msg)
if (self.key_name and self.key_name in kwargs) or "type" in kwargs:
name = self.__class__.__name__
msg = "%s is an invalid argument for %s" % (self.key_name, name)
raise ValidationException(msg) | python | def validate_kwargs(self, kwargs):
"""
Validates expected subclass attributes and constructor keyword
arguments.
"""
if not self.response_type:
msg = "response_type must be set on the Response subclass."
raise ValidationException(msg)
if (self.key_name and self.key_name in kwargs) or "type" in kwargs:
name = self.__class__.__name__
msg = "%s is an invalid argument for %s" % (self.key_name, name)
raise ValidationException(msg) | [
"def",
"validate_kwargs",
"(",
"self",
",",
"kwargs",
")",
":",
"if",
"not",
"self",
".",
"response_type",
":",
"msg",
"=",
"\"response_type must be set on the Response subclass.\"",
"raise",
"ValidationException",
"(",
"msg",
")",
"if",
"(",
"self",
".",
"key_nam... | Validates expected subclass attributes and constructor keyword
arguments. | [
"Validates",
"expected",
"subclass",
"attributes",
"and",
"constructor",
"keyword",
"arguments",
"."
] | b57cbf8ed7c089672426ede0441e0a4f789ef4a1 | https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/core/plugins.py#L390-L402 | train | 220,946 |
RedHatInsights/insights-core | insights/core/plugins.py | Response.validate_key | def validate_key(self, key):
""" Called if the key_name class attribute is not None. """
if not key:
name = self.__class__.__name__
msg = "%s response missing %s" % (name, self.key_name)
raise ValidationException(msg, self)
elif not isinstance(key, str):
msg = "Response contains invalid %s type" % self.key_name
raise ValidationException(msg, type(key)) | python | def validate_key(self, key):
""" Called if the key_name class attribute is not None. """
if not key:
name = self.__class__.__name__
msg = "%s response missing %s" % (name, self.key_name)
raise ValidationException(msg, self)
elif not isinstance(key, str):
msg = "Response contains invalid %s type" % self.key_name
raise ValidationException(msg, type(key)) | [
"def",
"validate_key",
"(",
"self",
",",
"key",
")",
":",
"if",
"not",
"key",
":",
"name",
"=",
"self",
".",
"__class__",
".",
"__name__",
"msg",
"=",
"\"%s response missing %s\"",
"%",
"(",
"name",
",",
"self",
".",
"key_name",
")",
"raise",
"Validation... | Called if the key_name class attribute is not None. | [
"Called",
"if",
"the",
"key_name",
"class",
"attribute",
"is",
"not",
"None",
"."
] | b57cbf8ed7c089672426ede0441e0a4f789ef4a1 | https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/core/plugins.py#L404-L412 | train | 220,947 |
RedHatInsights/insights-core | insights/core/plugins.py | Response.adjust_for_length | def adjust_for_length(self, key, r, kwargs):
"""
Converts the response to a string and compares its length to a max
length specified in settings. If the response is too long, an error is
logged, and an abbreviated response is returned instead.
"""
length = len(str(kwargs))
if length > settings.defaults["max_detail_length"]:
self._log_length_error(key, length)
r["max_detail_length_error"] = length
return r
return kwargs | python | def adjust_for_length(self, key, r, kwargs):
"""
Converts the response to a string and compares its length to a max
length specified in settings. If the response is too long, an error is
logged, and an abbreviated response is returned instead.
"""
length = len(str(kwargs))
if length > settings.defaults["max_detail_length"]:
self._log_length_error(key, length)
r["max_detail_length_error"] = length
return r
return kwargs | [
"def",
"adjust_for_length",
"(",
"self",
",",
"key",
",",
"r",
",",
"kwargs",
")",
":",
"length",
"=",
"len",
"(",
"str",
"(",
"kwargs",
")",
")",
"if",
"length",
">",
"settings",
".",
"defaults",
"[",
"\"max_detail_length\"",
"]",
":",
"self",
".",
... | Converts the response to a string and compares its length to a max
length specified in settings. If the response is too long, an error is
logged, and an abbreviated response is returned instead. | [
"Converts",
"the",
"response",
"to",
"a",
"string",
"and",
"compares",
"its",
"length",
"to",
"a",
"max",
"length",
"specified",
"in",
"settings",
".",
"If",
"the",
"response",
"is",
"too",
"long",
"an",
"error",
"is",
"logged",
"and",
"an",
"abbreviated",... | b57cbf8ed7c089672426ede0441e0a4f789ef4a1 | https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/core/plugins.py#L414-L425 | train | 220,948 |
RedHatInsights/insights-core | insights/core/plugins.py | Response._log_length_error | def _log_length_error(self, key, length):
""" Helper function for logging a response length error. """
extra = {
"max_detail_length": settings.defaults["max_detail_length"],
"len": length
}
if self.key_name:
extra[self.key_name] = key
msg = "Length of data in %s is too long." % self.__class__.__name__
log.error(msg, extra=extra) | python | def _log_length_error(self, key, length):
""" Helper function for logging a response length error. """
extra = {
"max_detail_length": settings.defaults["max_detail_length"],
"len": length
}
if self.key_name:
extra[self.key_name] = key
msg = "Length of data in %s is too long." % self.__class__.__name__
log.error(msg, extra=extra) | [
"def",
"_log_length_error",
"(",
"self",
",",
"key",
",",
"length",
")",
":",
"extra",
"=",
"{",
"\"max_detail_length\"",
":",
"settings",
".",
"defaults",
"[",
"\"max_detail_length\"",
"]",
",",
"\"len\"",
":",
"length",
"}",
"if",
"self",
".",
"key_name",
... | Helper function for logging a response length error. | [
"Helper",
"function",
"for",
"logging",
"a",
"response",
"length",
"error",
"."
] | b57cbf8ed7c089672426ede0441e0a4f789ef4a1 | https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/core/plugins.py#L427-L436 | train | 220,949 |
RedHatInsights/insights-core | insights/parsers/oracle.py | _parse_oracle | def _parse_oracle(lines):
"""
Performs the actual file parsing, returning a dict of the config values
in a given Oracle DB config file.
Despite their differences, the two filetypes are similar enough to
allow idential parsing.
"""
config = {}
for line in get_active_lines(lines):
# Check for NULL in line to begin control char removal
if '\00' in line:
line = cleanup.sub('', line)
if '=' in line:
(key, value) = line.split('=', 1)
key = key.strip(whitespace + '"\'').lower()
if ',' in line:
value = [s.strip(whitespace + '"\'').lower() for s in value.split(',')]
else:
value = value.strip(whitespace + '"\'').lower()
config[key] = value
return config | python | def _parse_oracle(lines):
"""
Performs the actual file parsing, returning a dict of the config values
in a given Oracle DB config file.
Despite their differences, the two filetypes are similar enough to
allow idential parsing.
"""
config = {}
for line in get_active_lines(lines):
# Check for NULL in line to begin control char removal
if '\00' in line:
line = cleanup.sub('', line)
if '=' in line:
(key, value) = line.split('=', 1)
key = key.strip(whitespace + '"\'').lower()
if ',' in line:
value = [s.strip(whitespace + '"\'').lower() for s in value.split(',')]
else:
value = value.strip(whitespace + '"\'').lower()
config[key] = value
return config | [
"def",
"_parse_oracle",
"(",
"lines",
")",
":",
"config",
"=",
"{",
"}",
"for",
"line",
"in",
"get_active_lines",
"(",
"lines",
")",
":",
"# Check for NULL in line to begin control char removal",
"if",
"'\\00'",
"in",
"line",
":",
"line",
"=",
"cleanup",
".",
... | Performs the actual file parsing, returning a dict of the config values
in a given Oracle DB config file.
Despite their differences, the two filetypes are similar enough to
allow idential parsing. | [
"Performs",
"the",
"actual",
"file",
"parsing",
"returning",
"a",
"dict",
"of",
"the",
"config",
"values",
"in",
"a",
"given",
"Oracle",
"DB",
"config",
"file",
"."
] | b57cbf8ed7c089672426ede0441e0a4f789ef4a1 | https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/parsers/oracle.py#L18-L41 | train | 220,950 |
RedHatInsights/insights-core | insights/parsers/mdstat.py | apply_upstring | def apply_upstring(upstring, component_list):
"""Update the dictionaries resulting from ``parse_array_start`` with
the "up" key based on the upstring returned from ``parse_upstring``.
The function assumes that the upstring and component_list parameters
passed in are from the same device array stanza of a
``/proc/mdstat`` file.
The function modifies component_list in place, adding or updating
the value of the "up" key to True if there is a corresponding ``U``
in the upstring string, or to False if there is a corresponding
``_``.
If there the number of rows in component_list does not match the
number of characters in upstring, an ``AssertionError`` is raised.
Parameters
----------
upstring : str
String sequence of ``U``s and ``_``s as determined by the
``parse_upstring`` method
component_list : list
List of dictionaries output from the ``parse_array_start`` method.
"""
assert len(upstring) == len(component_list)
def add_up_key(comp_dict, up_indicator):
assert up_indicator == 'U' or up_indicator == "_"
comp_dict['up'] = up_indicator == 'U'
for comp_dict, up_indicator in zip(component_list, upstring):
add_up_key(comp_dict, up_indicator) | python | def apply_upstring(upstring, component_list):
"""Update the dictionaries resulting from ``parse_array_start`` with
the "up" key based on the upstring returned from ``parse_upstring``.
The function assumes that the upstring and component_list parameters
passed in are from the same device array stanza of a
``/proc/mdstat`` file.
The function modifies component_list in place, adding or updating
the value of the "up" key to True if there is a corresponding ``U``
in the upstring string, or to False if there is a corresponding
``_``.
If there the number of rows in component_list does not match the
number of characters in upstring, an ``AssertionError`` is raised.
Parameters
----------
upstring : str
String sequence of ``U``s and ``_``s as determined by the
``parse_upstring`` method
component_list : list
List of dictionaries output from the ``parse_array_start`` method.
"""
assert len(upstring) == len(component_list)
def add_up_key(comp_dict, up_indicator):
assert up_indicator == 'U' or up_indicator == "_"
comp_dict['up'] = up_indicator == 'U'
for comp_dict, up_indicator in zip(component_list, upstring):
add_up_key(comp_dict, up_indicator) | [
"def",
"apply_upstring",
"(",
"upstring",
",",
"component_list",
")",
":",
"assert",
"len",
"(",
"upstring",
")",
"==",
"len",
"(",
"component_list",
")",
"def",
"add_up_key",
"(",
"comp_dict",
",",
"up_indicator",
")",
":",
"assert",
"up_indicator",
"==",
"... | Update the dictionaries resulting from ``parse_array_start`` with
the "up" key based on the upstring returned from ``parse_upstring``.
The function assumes that the upstring and component_list parameters
passed in are from the same device array stanza of a
``/proc/mdstat`` file.
The function modifies component_list in place, adding or updating
the value of the "up" key to True if there is a corresponding ``U``
in the upstring string, or to False if there is a corresponding
``_``.
If there the number of rows in component_list does not match the
number of characters in upstring, an ``AssertionError`` is raised.
Parameters
----------
upstring : str
String sequence of ``U``s and ``_``s as determined by the
``parse_upstring`` method
component_list : list
List of dictionaries output from the ``parse_array_start`` method. | [
"Update",
"the",
"dictionaries",
"resulting",
"from",
"parse_array_start",
"with",
"the",
"up",
"key",
"based",
"on",
"the",
"upstring",
"returned",
"from",
"parse_upstring",
"."
] | b57cbf8ed7c089672426ede0441e0a4f789ef4a1 | https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/parsers/mdstat.py#L344-L377 | train | 220,951 |
RedHatInsights/insights-core | insights/util/fs.py | remove | def remove(path, chmod=False):
"""Remove a file or directory located on the filesystem at path.
If chmod is True, chmod -R 755 is executed on the path
before rm -rf path is called.
Parameters
----------
path : str
file system path to an existing file or directory
chmod : bool
If True, chmod -R 755 is executed on path before
it's removed.
Raises
------
CalledProcessError
If any part of the removal process fails.
"""
if not os.path.exists(path):
return
if chmod:
cmd = "chmod -R 755 %s" % path
subproc.call(cmd)
cmd = 'rm -rf "{p}"'.format(p=path)
subproc.call(cmd) | python | def remove(path, chmod=False):
"""Remove a file or directory located on the filesystem at path.
If chmod is True, chmod -R 755 is executed on the path
before rm -rf path is called.
Parameters
----------
path : str
file system path to an existing file or directory
chmod : bool
If True, chmod -R 755 is executed on path before
it's removed.
Raises
------
CalledProcessError
If any part of the removal process fails.
"""
if not os.path.exists(path):
return
if chmod:
cmd = "chmod -R 755 %s" % path
subproc.call(cmd)
cmd = 'rm -rf "{p}"'.format(p=path)
subproc.call(cmd) | [
"def",
"remove",
"(",
"path",
",",
"chmod",
"=",
"False",
")",
":",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"path",
")",
":",
"return",
"if",
"chmod",
":",
"cmd",
"=",
"\"chmod -R 755 %s\"",
"%",
"path",
"subproc",
".",
"call",
"(",
"cm... | Remove a file or directory located on the filesystem at path.
If chmod is True, chmod -R 755 is executed on the path
before rm -rf path is called.
Parameters
----------
path : str
file system path to an existing file or directory
chmod : bool
If True, chmod -R 755 is executed on path before
it's removed.
Raises
------
CalledProcessError
If any part of the removal process fails. | [
"Remove",
"a",
"file",
"or",
"directory",
"located",
"on",
"the",
"filesystem",
"at",
"path",
"."
] | b57cbf8ed7c089672426ede0441e0a4f789ef4a1 | https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/util/fs.py#L41-L69 | train | 220,952 |
RedHatInsights/insights-core | insights/util/fs.py | ensure_path | def ensure_path(path, mode=0o777):
"""Ensure that path exists in a multiprocessing safe way.
If the path does not exist, recursively create it and its parent
directories using the provided mode. If the path already exists,
do nothing. The umask is cleared to enable the mode to be set,
and then reset to the original value after the mode is set.
Parameters
----------
path : str
file system path to a non-existent directory
that should be created.
mode : int
octal representation of the mode to use when creating
the directory.
Raises
------
OSError
If os.makedirs raises an OSError for any reason
other than if the directory already exists.
"""
if path:
try:
umask = os.umask(000)
os.makedirs(path, mode)
os.umask(umask)
except OSError as e:
if e.errno != errno.EEXIST:
raise | python | def ensure_path(path, mode=0o777):
"""Ensure that path exists in a multiprocessing safe way.
If the path does not exist, recursively create it and its parent
directories using the provided mode. If the path already exists,
do nothing. The umask is cleared to enable the mode to be set,
and then reset to the original value after the mode is set.
Parameters
----------
path : str
file system path to a non-existent directory
that should be created.
mode : int
octal representation of the mode to use when creating
the directory.
Raises
------
OSError
If os.makedirs raises an OSError for any reason
other than if the directory already exists.
"""
if path:
try:
umask = os.umask(000)
os.makedirs(path, mode)
os.umask(umask)
except OSError as e:
if e.errno != errno.EEXIST:
raise | [
"def",
"ensure_path",
"(",
"path",
",",
"mode",
"=",
"0o777",
")",
":",
"if",
"path",
":",
"try",
":",
"umask",
"=",
"os",
".",
"umask",
"(",
"000",
")",
"os",
".",
"makedirs",
"(",
"path",
",",
"mode",
")",
"os",
".",
"umask",
"(",
"umask",
")... | Ensure that path exists in a multiprocessing safe way.
If the path does not exist, recursively create it and its parent
directories using the provided mode. If the path already exists,
do nothing. The umask is cleared to enable the mode to be set,
and then reset to the original value after the mode is set.
Parameters
----------
path : str
file system path to a non-existent directory
that should be created.
mode : int
octal representation of the mode to use when creating
the directory.
Raises
------
OSError
If os.makedirs raises an OSError for any reason
other than if the directory already exists. | [
"Ensure",
"that",
"path",
"exists",
"in",
"a",
"multiprocessing",
"safe",
"way",
"."
] | b57cbf8ed7c089672426ede0441e0a4f789ef4a1 | https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/util/fs.py#L72-L103 | train | 220,953 |
RedHatInsights/insights-core | insights/combiners/uptime.py | uptime | def uptime(ut, facter):
"""Check uptime and facts to get the uptime information.
Prefer uptime to facts.
Returns:
insights.combiners.uptime.Uptime: A named tuple with `currtime`,
`updays`, `uphhmm`, `users`, `loadavg` and `uptime` components.
Raises:
Exception: If no data is available from both of the parsers.
"""
ut = ut
if ut and ut.loadavg:
return Uptime(ut.currtime, ut.updays, ut.uphhmm,
ut.users, ut.loadavg, ut.uptime)
ft = facter
if ft and hasattr(ft, 'uptime_seconds'):
import datetime
secs = int(ft.uptime_seconds)
up_dd = secs // (3600 * 24)
up_hh = (secs % (3600 * 24)) // 3600
up_mm = (secs % 3600) // 60
updays = str(up_dd) if up_dd > 0 else ''
uphhmm = '%02d:%02d' % (up_hh, up_mm)
up_time = datetime.timedelta(seconds=secs)
return Uptime(None, updays, uphhmm, None, None, up_time)
raise Exception("Unable to get uptime information.") | python | def uptime(ut, facter):
"""Check uptime and facts to get the uptime information.
Prefer uptime to facts.
Returns:
insights.combiners.uptime.Uptime: A named tuple with `currtime`,
`updays`, `uphhmm`, `users`, `loadavg` and `uptime` components.
Raises:
Exception: If no data is available from both of the parsers.
"""
ut = ut
if ut and ut.loadavg:
return Uptime(ut.currtime, ut.updays, ut.uphhmm,
ut.users, ut.loadavg, ut.uptime)
ft = facter
if ft and hasattr(ft, 'uptime_seconds'):
import datetime
secs = int(ft.uptime_seconds)
up_dd = secs // (3600 * 24)
up_hh = (secs % (3600 * 24)) // 3600
up_mm = (secs % 3600) // 60
updays = str(up_dd) if up_dd > 0 else ''
uphhmm = '%02d:%02d' % (up_hh, up_mm)
up_time = datetime.timedelta(seconds=secs)
return Uptime(None, updays, uphhmm, None, None, up_time)
raise Exception("Unable to get uptime information.") | [
"def",
"uptime",
"(",
"ut",
",",
"facter",
")",
":",
"ut",
"=",
"ut",
"if",
"ut",
"and",
"ut",
".",
"loadavg",
":",
"return",
"Uptime",
"(",
"ut",
".",
"currtime",
",",
"ut",
".",
"updays",
",",
"ut",
".",
"uphhmm",
",",
"ut",
".",
"users",
","... | Check uptime and facts to get the uptime information.
Prefer uptime to facts.
Returns:
insights.combiners.uptime.Uptime: A named tuple with `currtime`,
`updays`, `uphhmm`, `users`, `loadavg` and `uptime` components.
Raises:
Exception: If no data is available from both of the parsers. | [
"Check",
"uptime",
"and",
"facts",
"to",
"get",
"the",
"uptime",
"information",
"."
] | b57cbf8ed7c089672426ede0441e0a4f789ef4a1 | https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/combiners/uptime.py#L31-L60 | train | 220,954 |
census-instrumentation/opencensus-python | contrib/opencensus-ext-ocagent/opencensus/ext/ocagent/utils/__init__.py | get_node | def get_node(service_name, host_name):
"""Generates Node message from params and system information.
"""
return common_pb2.Node(
identifier=common_pb2.ProcessIdentifier(
host_name=socket.gethostname() if host_name is None
else host_name,
pid=os.getpid(),
start_timestamp=proto_ts_from_datetime(
datetime.datetime.utcnow())),
library_info=common_pb2.LibraryInfo(
language=common_pb2.LibraryInfo.Language.Value('PYTHON'),
exporter_version=EXPORTER_VERSION,
core_library_version=opencensus_version),
service_info=common_pb2.ServiceInfo(name=service_name)) | python | def get_node(service_name, host_name):
"""Generates Node message from params and system information.
"""
return common_pb2.Node(
identifier=common_pb2.ProcessIdentifier(
host_name=socket.gethostname() if host_name is None
else host_name,
pid=os.getpid(),
start_timestamp=proto_ts_from_datetime(
datetime.datetime.utcnow())),
library_info=common_pb2.LibraryInfo(
language=common_pb2.LibraryInfo.Language.Value('PYTHON'),
exporter_version=EXPORTER_VERSION,
core_library_version=opencensus_version),
service_info=common_pb2.ServiceInfo(name=service_name)) | [
"def",
"get_node",
"(",
"service_name",
",",
"host_name",
")",
":",
"return",
"common_pb2",
".",
"Node",
"(",
"identifier",
"=",
"common_pb2",
".",
"ProcessIdentifier",
"(",
"host_name",
"=",
"socket",
".",
"gethostname",
"(",
")",
"if",
"host_name",
"is",
"... | Generates Node message from params and system information. | [
"Generates",
"Node",
"message",
"from",
"params",
"and",
"system",
"information",
"."
] | 992b223f7e34c5dcb65922b7d5c827e7a1351e7d | https://github.com/census-instrumentation/opencensus-python/blob/992b223f7e34c5dcb65922b7d5c827e7a1351e7d/contrib/opencensus-ext-ocagent/opencensus/ext/ocagent/utils/__init__.py#L17-L31 | train | 220,955 |
census-instrumentation/opencensus-python | opencensus/trace/tracers/context_tracer.py | ContextTracer.end_span | def end_span(self, *args, **kwargs):
"""End a span. Update the span_id in SpanContext to the current span's
parent span id; Update the current span.
"""
cur_span = self.current_span()
if cur_span is None and self._spans_list:
cur_span = self._spans_list[-1]
if cur_span is None:
logging.warning('No active span, cannot do end_span.')
return
cur_span.finish()
self.span_context.span_id = cur_span.parent_span.span_id if \
cur_span.parent_span else None
if isinstance(cur_span.parent_span, trace_span.Span):
execution_context.set_current_span(cur_span.parent_span)
else:
execution_context.set_current_span(None)
with self._spans_list_condition:
if cur_span in self._spans_list:
span_datas = self.get_span_datas(cur_span)
self.exporter.export(span_datas)
self._spans_list.remove(cur_span)
return cur_span | python | def end_span(self, *args, **kwargs):
"""End a span. Update the span_id in SpanContext to the current span's
parent span id; Update the current span.
"""
cur_span = self.current_span()
if cur_span is None and self._spans_list:
cur_span = self._spans_list[-1]
if cur_span is None:
logging.warning('No active span, cannot do end_span.')
return
cur_span.finish()
self.span_context.span_id = cur_span.parent_span.span_id if \
cur_span.parent_span else None
if isinstance(cur_span.parent_span, trace_span.Span):
execution_context.set_current_span(cur_span.parent_span)
else:
execution_context.set_current_span(None)
with self._spans_list_condition:
if cur_span in self._spans_list:
span_datas = self.get_span_datas(cur_span)
self.exporter.export(span_datas)
self._spans_list.remove(cur_span)
return cur_span | [
"def",
"end_span",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"cur_span",
"=",
"self",
".",
"current_span",
"(",
")",
"if",
"cur_span",
"is",
"None",
"and",
"self",
".",
"_spans_list",
":",
"cur_span",
"=",
"self",
".",
"_spans... | End a span. Update the span_id in SpanContext to the current span's
parent span id; Update the current span. | [
"End",
"a",
"span",
".",
"Update",
"the",
"span_id",
"in",
"SpanContext",
"to",
"the",
"current",
"span",
"s",
"parent",
"span",
"id",
";",
"Update",
"the",
"current",
"span",
"."
] | 992b223f7e34c5dcb65922b7d5c827e7a1351e7d | https://github.com/census-instrumentation/opencensus-python/blob/992b223f7e34c5dcb65922b7d5c827e7a1351e7d/opencensus/trace/tracers/context_tracer.py#L99-L126 | train | 220,956 |
census-instrumentation/opencensus-python | opencensus/trace/tracers/context_tracer.py | ContextTracer.add_attribute_to_current_span | def add_attribute_to_current_span(self, attribute_key, attribute_value):
"""Add attribute to current span.
:type attribute_key: str
:param attribute_key: Attribute key.
:type attribute_value:str
:param attribute_value: Attribute value.
"""
current_span = self.current_span()
current_span.add_attribute(attribute_key, attribute_value) | python | def add_attribute_to_current_span(self, attribute_key, attribute_value):
"""Add attribute to current span.
:type attribute_key: str
:param attribute_key: Attribute key.
:type attribute_value:str
:param attribute_value: Attribute value.
"""
current_span = self.current_span()
current_span.add_attribute(attribute_key, attribute_value) | [
"def",
"add_attribute_to_current_span",
"(",
"self",
",",
"attribute_key",
",",
"attribute_value",
")",
":",
"current_span",
"=",
"self",
".",
"current_span",
"(",
")",
"current_span",
".",
"add_attribute",
"(",
"attribute_key",
",",
"attribute_value",
")"
] | Add attribute to current span.
:type attribute_key: str
:param attribute_key: Attribute key.
:type attribute_value:str
:param attribute_value: Attribute value. | [
"Add",
"attribute",
"to",
"current",
"span",
"."
] | 992b223f7e34c5dcb65922b7d5c827e7a1351e7d | https://github.com/census-instrumentation/opencensus-python/blob/992b223f7e34c5dcb65922b7d5c827e7a1351e7d/opencensus/trace/tracers/context_tracer.py#L137-L147 | train | 220,957 |
census-instrumentation/opencensus-python | opencensus/trace/tracers/context_tracer.py | ContextTracer.get_span_datas | def get_span_datas(self, span):
"""Extracts a list of SpanData tuples from a span
:rtype: list of opencensus.trace.span_data.SpanData
:return list of SpanData tuples
"""
span_datas = [
span_data_module.SpanData(
name=ss.name,
context=self.span_context,
span_id=ss.span_id,
parent_span_id=ss.parent_span.span_id if
ss.parent_span else None,
attributes=ss.attributes,
start_time=ss.start_time,
end_time=ss.end_time,
child_span_count=len(ss.children),
stack_trace=ss.stack_trace,
time_events=ss.time_events,
links=ss.links,
status=ss.status,
same_process_as_parent_span=ss.same_process_as_parent_span,
span_kind=ss.span_kind
)
for ss in span
]
return span_datas | python | def get_span_datas(self, span):
"""Extracts a list of SpanData tuples from a span
:rtype: list of opencensus.trace.span_data.SpanData
:return list of SpanData tuples
"""
span_datas = [
span_data_module.SpanData(
name=ss.name,
context=self.span_context,
span_id=ss.span_id,
parent_span_id=ss.parent_span.span_id if
ss.parent_span else None,
attributes=ss.attributes,
start_time=ss.start_time,
end_time=ss.end_time,
child_span_count=len(ss.children),
stack_trace=ss.stack_trace,
time_events=ss.time_events,
links=ss.links,
status=ss.status,
same_process_as_parent_span=ss.same_process_as_parent_span,
span_kind=ss.span_kind
)
for ss in span
]
return span_datas | [
"def",
"get_span_datas",
"(",
"self",
",",
"span",
")",
":",
"span_datas",
"=",
"[",
"span_data_module",
".",
"SpanData",
"(",
"name",
"=",
"ss",
".",
"name",
",",
"context",
"=",
"self",
".",
"span_context",
",",
"span_id",
"=",
"ss",
".",
"span_id",
... | Extracts a list of SpanData tuples from a span
:rtype: list of opencensus.trace.span_data.SpanData
:return list of SpanData tuples | [
"Extracts",
"a",
"list",
"of",
"SpanData",
"tuples",
"from",
"a",
"span"
] | 992b223f7e34c5dcb65922b7d5c827e7a1351e7d | https://github.com/census-instrumentation/opencensus-python/blob/992b223f7e34c5dcb65922b7d5c827e7a1351e7d/opencensus/trace/tracers/context_tracer.py#L149-L176 | train | 220,958 |
census-instrumentation/opencensus-python | opencensus/metrics/export/metric_producer.py | MetricProducerManager.add | def add(self, metric_producer):
"""Add a metric producer.
:type metric_producer: :class: 'MetricProducer'
:param metric_producer: The metric producer to add.
"""
if metric_producer is None:
raise ValueError
with self.mp_lock:
self.metric_producers.add(metric_producer) | python | def add(self, metric_producer):
"""Add a metric producer.
:type metric_producer: :class: 'MetricProducer'
:param metric_producer: The metric producer to add.
"""
if metric_producer is None:
raise ValueError
with self.mp_lock:
self.metric_producers.add(metric_producer) | [
"def",
"add",
"(",
"self",
",",
"metric_producer",
")",
":",
"if",
"metric_producer",
"is",
"None",
":",
"raise",
"ValueError",
"with",
"self",
".",
"mp_lock",
":",
"self",
".",
"metric_producers",
".",
"add",
"(",
"metric_producer",
")"
] | Add a metric producer.
:type metric_producer: :class: 'MetricProducer'
:param metric_producer: The metric producer to add. | [
"Add",
"a",
"metric",
"producer",
"."
] | 992b223f7e34c5dcb65922b7d5c827e7a1351e7d | https://github.com/census-instrumentation/opencensus-python/blob/992b223f7e34c5dcb65922b7d5c827e7a1351e7d/opencensus/metrics/export/metric_producer.py#L44-L53 | train | 220,959 |
census-instrumentation/opencensus-python | opencensus/metrics/export/metric_producer.py | MetricProducerManager.remove | def remove(self, metric_producer):
"""Remove a metric producer.
:type metric_producer: :class: 'MetricProducer'
:param metric_producer: The metric producer to remove.
"""
if metric_producer is None:
raise ValueError
try:
with self.mp_lock:
self.metric_producers.remove(metric_producer)
except KeyError:
pass | python | def remove(self, metric_producer):
"""Remove a metric producer.
:type metric_producer: :class: 'MetricProducer'
:param metric_producer: The metric producer to remove.
"""
if metric_producer is None:
raise ValueError
try:
with self.mp_lock:
self.metric_producers.remove(metric_producer)
except KeyError:
pass | [
"def",
"remove",
"(",
"self",
",",
"metric_producer",
")",
":",
"if",
"metric_producer",
"is",
"None",
":",
"raise",
"ValueError",
"try",
":",
"with",
"self",
".",
"mp_lock",
":",
"self",
".",
"metric_producers",
".",
"remove",
"(",
"metric_producer",
")",
... | Remove a metric producer.
:type metric_producer: :class: 'MetricProducer'
:param metric_producer: The metric producer to remove. | [
"Remove",
"a",
"metric",
"producer",
"."
] | 992b223f7e34c5dcb65922b7d5c827e7a1351e7d | https://github.com/census-instrumentation/opencensus-python/blob/992b223f7e34c5dcb65922b7d5c827e7a1351e7d/opencensus/metrics/export/metric_producer.py#L55-L67 | train | 220,960 |
census-instrumentation/opencensus-python | contrib/opencensus-ext-grpc/opencensus/ext/grpc/utils.py | add_message_event | def add_message_event(proto_message, span, message_event_type, message_id=1):
"""Adds a MessageEvent to the span based off of the given protobuf
message
"""
span.add_time_event(
time_event=time_event.TimeEvent(
datetime.utcnow(),
message_event=time_event.MessageEvent(
message_id,
type=message_event_type,
uncompressed_size_bytes=proto_message.ByteSize()
)
)
) | python | def add_message_event(proto_message, span, message_event_type, message_id=1):
"""Adds a MessageEvent to the span based off of the given protobuf
message
"""
span.add_time_event(
time_event=time_event.TimeEvent(
datetime.utcnow(),
message_event=time_event.MessageEvent(
message_id,
type=message_event_type,
uncompressed_size_bytes=proto_message.ByteSize()
)
)
) | [
"def",
"add_message_event",
"(",
"proto_message",
",",
"span",
",",
"message_event_type",
",",
"message_id",
"=",
"1",
")",
":",
"span",
".",
"add_time_event",
"(",
"time_event",
"=",
"time_event",
".",
"TimeEvent",
"(",
"datetime",
".",
"utcnow",
"(",
")",
... | Adds a MessageEvent to the span based off of the given protobuf
message | [
"Adds",
"a",
"MessageEvent",
"to",
"the",
"span",
"based",
"off",
"of",
"the",
"given",
"protobuf",
"message"
] | 992b223f7e34c5dcb65922b7d5c827e7a1351e7d | https://github.com/census-instrumentation/opencensus-python/blob/992b223f7e34c5dcb65922b7d5c827e7a1351e7d/contrib/opencensus-ext-grpc/opencensus/ext/grpc/utils.py#L9-L22 | train | 220,961 |
census-instrumentation/opencensus-python | contrib/opencensus-ext-grpc/opencensus/ext/grpc/utils.py | wrap_iter_with_message_events | def wrap_iter_with_message_events(
request_or_response_iter,
span,
message_event_type
):
"""Wraps a request or response iterator to add message events to the span
for each proto message sent or received
"""
for message_id, message in enumerate(request_or_response_iter, start=1):
add_message_event(
proto_message=message,
span=span,
message_event_type=message_event_type,
message_id=message_id
)
yield message | python | def wrap_iter_with_message_events(
request_or_response_iter,
span,
message_event_type
):
"""Wraps a request or response iterator to add message events to the span
for each proto message sent or received
"""
for message_id, message in enumerate(request_or_response_iter, start=1):
add_message_event(
proto_message=message,
span=span,
message_event_type=message_event_type,
message_id=message_id
)
yield message | [
"def",
"wrap_iter_with_message_events",
"(",
"request_or_response_iter",
",",
"span",
",",
"message_event_type",
")",
":",
"for",
"message_id",
",",
"message",
"in",
"enumerate",
"(",
"request_or_response_iter",
",",
"start",
"=",
"1",
")",
":",
"add_message_event",
... | Wraps a request or response iterator to add message events to the span
for each proto message sent or received | [
"Wraps",
"a",
"request",
"or",
"response",
"iterator",
"to",
"add",
"message",
"events",
"to",
"the",
"span",
"for",
"each",
"proto",
"message",
"sent",
"or",
"received"
] | 992b223f7e34c5dcb65922b7d5c827e7a1351e7d | https://github.com/census-instrumentation/opencensus-python/blob/992b223f7e34c5dcb65922b7d5c827e7a1351e7d/contrib/opencensus-ext-grpc/opencensus/ext/grpc/utils.py#L25-L40 | train | 220,962 |
census-instrumentation/opencensus-python | opencensus/trace/stack_trace.py | StackFrame.format_stack_frame_json | def format_stack_frame_json(self):
"""Convert StackFrame object to json format."""
stack_frame_json = {}
stack_frame_json['function_name'] = get_truncatable_str(
self.func_name)
stack_frame_json['original_function_name'] = get_truncatable_str(
self.original_func_name)
stack_frame_json['file_name'] = get_truncatable_str(self.file_name)
stack_frame_json['line_number'] = self.line_num
stack_frame_json['column_number'] = self.col_num
stack_frame_json['load_module'] = {
'module': get_truncatable_str(self.load_module),
'build_id': get_truncatable_str(self.build_id),
}
stack_frame_json['source_version'] = get_truncatable_str(
self.source_version)
return stack_frame_json | python | def format_stack_frame_json(self):
"""Convert StackFrame object to json format."""
stack_frame_json = {}
stack_frame_json['function_name'] = get_truncatable_str(
self.func_name)
stack_frame_json['original_function_name'] = get_truncatable_str(
self.original_func_name)
stack_frame_json['file_name'] = get_truncatable_str(self.file_name)
stack_frame_json['line_number'] = self.line_num
stack_frame_json['column_number'] = self.col_num
stack_frame_json['load_module'] = {
'module': get_truncatable_str(self.load_module),
'build_id': get_truncatable_str(self.build_id),
}
stack_frame_json['source_version'] = get_truncatable_str(
self.source_version)
return stack_frame_json | [
"def",
"format_stack_frame_json",
"(",
"self",
")",
":",
"stack_frame_json",
"=",
"{",
"}",
"stack_frame_json",
"[",
"'function_name'",
"]",
"=",
"get_truncatable_str",
"(",
"self",
".",
"func_name",
")",
"stack_frame_json",
"[",
"'original_function_name'",
"]",
"="... | Convert StackFrame object to json format. | [
"Convert",
"StackFrame",
"object",
"to",
"json",
"format",
"."
] | 992b223f7e34c5dcb65922b7d5c827e7a1351e7d | https://github.com/census-instrumentation/opencensus-python/blob/992b223f7e34c5dcb65922b7d5c827e7a1351e7d/opencensus/trace/stack_trace.py#L86-L103 | train | 220,963 |
census-instrumentation/opencensus-python | opencensus/trace/stack_trace.py | StackTrace.from_traceback | def from_traceback(cls, tb):
"""Initializes a StackTrace from a python traceback instance"""
stack_trace = cls(
stack_trace_hash_id=generate_hash_id_from_traceback(tb)
)
# use the add_stack_frame so that json formatting is applied
for tb_frame_info in traceback.extract_tb(tb):
filename, line_num, fn_name, _ = tb_frame_info
stack_trace.add_stack_frame(
StackFrame(
func_name=fn_name,
original_func_name=fn_name,
file_name=filename,
line_num=line_num,
col_num=0, # I don't think this is available in python
load_module=filename,
build_id=BUILD_ID,
source_version=SOURCE_VERSION
)
)
return stack_trace | python | def from_traceback(cls, tb):
"""Initializes a StackTrace from a python traceback instance"""
stack_trace = cls(
stack_trace_hash_id=generate_hash_id_from_traceback(tb)
)
# use the add_stack_frame so that json formatting is applied
for tb_frame_info in traceback.extract_tb(tb):
filename, line_num, fn_name, _ = tb_frame_info
stack_trace.add_stack_frame(
StackFrame(
func_name=fn_name,
original_func_name=fn_name,
file_name=filename,
line_num=line_num,
col_num=0, # I don't think this is available in python
load_module=filename,
build_id=BUILD_ID,
source_version=SOURCE_VERSION
)
)
return stack_trace | [
"def",
"from_traceback",
"(",
"cls",
",",
"tb",
")",
":",
"stack_trace",
"=",
"cls",
"(",
"stack_trace_hash_id",
"=",
"generate_hash_id_from_traceback",
"(",
"tb",
")",
")",
"# use the add_stack_frame so that json formatting is applied",
"for",
"tb_frame_info",
"in",
"t... | Initializes a StackTrace from a python traceback instance | [
"Initializes",
"a",
"StackTrace",
"from",
"a",
"python",
"traceback",
"instance"
] | 992b223f7e34c5dcb65922b7d5c827e7a1351e7d | https://github.com/census-instrumentation/opencensus-python/blob/992b223f7e34c5dcb65922b7d5c827e7a1351e7d/opencensus/trace/stack_trace.py#L134-L154 | train | 220,964 |
census-instrumentation/opencensus-python | opencensus/trace/stack_trace.py | StackTrace.add_stack_frame | def add_stack_frame(self, stack_frame):
"""Add StackFrame to frames list."""
if len(self.stack_frames) >= MAX_FRAMES:
self.dropped_frames_count += 1
else:
self.stack_frames.append(stack_frame.format_stack_frame_json()) | python | def add_stack_frame(self, stack_frame):
"""Add StackFrame to frames list."""
if len(self.stack_frames) >= MAX_FRAMES:
self.dropped_frames_count += 1
else:
self.stack_frames.append(stack_frame.format_stack_frame_json()) | [
"def",
"add_stack_frame",
"(",
"self",
",",
"stack_frame",
")",
":",
"if",
"len",
"(",
"self",
".",
"stack_frames",
")",
">=",
"MAX_FRAMES",
":",
"self",
".",
"dropped_frames_count",
"+=",
"1",
"else",
":",
"self",
".",
"stack_frames",
".",
"append",
"(",
... | Add StackFrame to frames list. | [
"Add",
"StackFrame",
"to",
"frames",
"list",
"."
] | 992b223f7e34c5dcb65922b7d5c827e7a1351e7d | https://github.com/census-instrumentation/opencensus-python/blob/992b223f7e34c5dcb65922b7d5c827e7a1351e7d/opencensus/trace/stack_trace.py#L156-L161 | train | 220,965 |
census-instrumentation/opencensus-python | opencensus/trace/stack_trace.py | StackTrace.format_stack_trace_json | def format_stack_trace_json(self):
"""Convert a StackTrace object to json format."""
stack_trace_json = {}
if self.stack_frames:
stack_trace_json['stack_frames'] = {
'frame': self.stack_frames,
'dropped_frames_count': self.dropped_frames_count
}
stack_trace_json['stack_trace_hash_id'] = self.stack_trace_hash_id
return stack_trace_json | python | def format_stack_trace_json(self):
"""Convert a StackTrace object to json format."""
stack_trace_json = {}
if self.stack_frames:
stack_trace_json['stack_frames'] = {
'frame': self.stack_frames,
'dropped_frames_count': self.dropped_frames_count
}
stack_trace_json['stack_trace_hash_id'] = self.stack_trace_hash_id
return stack_trace_json | [
"def",
"format_stack_trace_json",
"(",
"self",
")",
":",
"stack_trace_json",
"=",
"{",
"}",
"if",
"self",
".",
"stack_frames",
":",
"stack_trace_json",
"[",
"'stack_frames'",
"]",
"=",
"{",
"'frame'",
":",
"self",
".",
"stack_frames",
",",
"'dropped_frames_count... | Convert a StackTrace object to json format. | [
"Convert",
"a",
"StackTrace",
"object",
"to",
"json",
"format",
"."
] | 992b223f7e34c5dcb65922b7d5c827e7a1351e7d | https://github.com/census-instrumentation/opencensus-python/blob/992b223f7e34c5dcb65922b7d5c827e7a1351e7d/opencensus/trace/stack_trace.py#L163-L175 | train | 220,966 |
census-instrumentation/opencensus-python | opencensus/stats/view_manager.py | ViewManager.register_view | def register_view(self, view):
"""registers the given view"""
self.measure_to_view_map.register_view(view=view, timestamp=self.time) | python | def register_view(self, view):
"""registers the given view"""
self.measure_to_view_map.register_view(view=view, timestamp=self.time) | [
"def",
"register_view",
"(",
"self",
",",
"view",
")",
":",
"self",
".",
"measure_to_view_map",
".",
"register_view",
"(",
"view",
"=",
"view",
",",
"timestamp",
"=",
"self",
".",
"time",
")"
] | registers the given view | [
"registers",
"the",
"given",
"view"
] | 992b223f7e34c5dcb65922b7d5c827e7a1351e7d | https://github.com/census-instrumentation/opencensus-python/blob/992b223f7e34c5dcb65922b7d5c827e7a1351e7d/opencensus/stats/view_manager.py#L35-L37 | train | 220,967 |
census-instrumentation/opencensus-python | opencensus/stats/view_manager.py | ViewManager.get_view | def get_view(self, view_name):
"""gets the view given the view name """
return self.measure_to_view_map.get_view(view_name=view_name,
timestamp=self.time) | python | def get_view(self, view_name):
"""gets the view given the view name """
return self.measure_to_view_map.get_view(view_name=view_name,
timestamp=self.time) | [
"def",
"get_view",
"(",
"self",
",",
"view_name",
")",
":",
"return",
"self",
".",
"measure_to_view_map",
".",
"get_view",
"(",
"view_name",
"=",
"view_name",
",",
"timestamp",
"=",
"self",
".",
"time",
")"
] | gets the view given the view name | [
"gets",
"the",
"view",
"given",
"the",
"view",
"name"
] | 992b223f7e34c5dcb65922b7d5c827e7a1351e7d | https://github.com/census-instrumentation/opencensus-python/blob/992b223f7e34c5dcb65922b7d5c827e7a1351e7d/opencensus/stats/view_manager.py#L39-L42 | train | 220,968 |
census-instrumentation/opencensus-python | contrib/opencensus-ext-stackdriver/opencensus/ext/stackdriver/stats_exporter/__init__.py | new_stats_exporter | def new_stats_exporter(options=None, interval=None):
"""Get a stats exporter and running transport thread.
Create a new `StackdriverStatsExporter` with the given options and start
periodically exporting stats to stackdriver in the background.
Fall back to default auth if `options` is null. This will raise
`google.auth.exceptions.DefaultCredentialsError` if default credentials
aren't configured.
See `opencensus.metrics.transport.get_exporter_thread` for details on the
transport thread.
:type options: :class:`Options`
:param exporter: Options to pass to the exporter
:type interval: int or float
:param interval: Seconds between export calls.
:rtype: :class:`StackdriverStatsExporter`
:return: The newly-created exporter.
"""
if options is None:
_, project_id = google.auth.default()
options = Options(project_id=project_id)
if str(options.project_id).strip() == "":
raise ValueError(ERROR_BLANK_PROJECT_ID)
ci = client_info.ClientInfo(client_library_version=get_user_agent_slug())
client = monitoring_v3.MetricServiceClient(client_info=ci)
exporter = StackdriverStatsExporter(client=client, options=options)
transport.get_exporter_thread(stats.stats, exporter, interval=interval)
return exporter | python | def new_stats_exporter(options=None, interval=None):
"""Get a stats exporter and running transport thread.
Create a new `StackdriverStatsExporter` with the given options and start
periodically exporting stats to stackdriver in the background.
Fall back to default auth if `options` is null. This will raise
`google.auth.exceptions.DefaultCredentialsError` if default credentials
aren't configured.
See `opencensus.metrics.transport.get_exporter_thread` for details on the
transport thread.
:type options: :class:`Options`
:param exporter: Options to pass to the exporter
:type interval: int or float
:param interval: Seconds between export calls.
:rtype: :class:`StackdriverStatsExporter`
:return: The newly-created exporter.
"""
if options is None:
_, project_id = google.auth.default()
options = Options(project_id=project_id)
if str(options.project_id).strip() == "":
raise ValueError(ERROR_BLANK_PROJECT_ID)
ci = client_info.ClientInfo(client_library_version=get_user_agent_slug())
client = monitoring_v3.MetricServiceClient(client_info=ci)
exporter = StackdriverStatsExporter(client=client, options=options)
transport.get_exporter_thread(stats.stats, exporter, interval=interval)
return exporter | [
"def",
"new_stats_exporter",
"(",
"options",
"=",
"None",
",",
"interval",
"=",
"None",
")",
":",
"if",
"options",
"is",
"None",
":",
"_",
",",
"project_id",
"=",
"google",
".",
"auth",
".",
"default",
"(",
")",
"options",
"=",
"Options",
"(",
"project... | Get a stats exporter and running transport thread.
Create a new `StackdriverStatsExporter` with the given options and start
periodically exporting stats to stackdriver in the background.
Fall back to default auth if `options` is null. This will raise
`google.auth.exceptions.DefaultCredentialsError` if default credentials
aren't configured.
See `opencensus.metrics.transport.get_exporter_thread` for details on the
transport thread.
:type options: :class:`Options`
:param exporter: Options to pass to the exporter
:type interval: int or float
:param interval: Seconds between export calls.
:rtype: :class:`StackdriverStatsExporter`
:return: The newly-created exporter. | [
"Get",
"a",
"stats",
"exporter",
"and",
"running",
"transport",
"thread",
"."
] | 992b223f7e34c5dcb65922b7d5c827e7a1351e7d | https://github.com/census-instrumentation/opencensus-python/blob/992b223f7e34c5dcb65922b7d5c827e7a1351e7d/contrib/opencensus-ext-stackdriver/opencensus/ext/stackdriver/stats_exporter/__init__.py#L366-L399 | train | 220,969 |
census-instrumentation/opencensus-python | contrib/opencensus-ext-stackdriver/opencensus/ext/stackdriver/stats_exporter/__init__.py | namespaced_view_name | def namespaced_view_name(view_name, metric_prefix):
""" create string to be used as metric type
"""
metric_prefix = metric_prefix or "custom.googleapis.com/opencensus"
return os.path.join(metric_prefix, view_name).replace('\\', '/') | python | def namespaced_view_name(view_name, metric_prefix):
""" create string to be used as metric type
"""
metric_prefix = metric_prefix or "custom.googleapis.com/opencensus"
return os.path.join(metric_prefix, view_name).replace('\\', '/') | [
"def",
"namespaced_view_name",
"(",
"view_name",
",",
"metric_prefix",
")",
":",
"metric_prefix",
"=",
"metric_prefix",
"or",
"\"custom.googleapis.com/opencensus\"",
"return",
"os",
".",
"path",
".",
"join",
"(",
"metric_prefix",
",",
"view_name",
")",
".",
"replace... | create string to be used as metric type | [
"create",
"string",
"to",
"be",
"used",
"as",
"metric",
"type"
] | 992b223f7e34c5dcb65922b7d5c827e7a1351e7d | https://github.com/census-instrumentation/opencensus-python/blob/992b223f7e34c5dcb65922b7d5c827e7a1351e7d/contrib/opencensus-ext-stackdriver/opencensus/ext/stackdriver/stats_exporter/__init__.py#L412-L416 | train | 220,970 |
census-instrumentation/opencensus-python | contrib/opencensus-ext-stackdriver/opencensus/ext/stackdriver/stats_exporter/__init__.py | new_label_descriptors | def new_label_descriptors(defaults, keys):
""" create labels for the metric_descriptor
that will be sent to Stackdriver Monitoring
"""
label_descriptors = []
for lk in itertools.chain.from_iterable((defaults.keys(), keys)):
label = {}
label["key"] = sanitize_label(lk.key)
label["description"] = lk.description
label_descriptors.append(label)
return label_descriptors | python | def new_label_descriptors(defaults, keys):
""" create labels for the metric_descriptor
that will be sent to Stackdriver Monitoring
"""
label_descriptors = []
for lk in itertools.chain.from_iterable((defaults.keys(), keys)):
label = {}
label["key"] = sanitize_label(lk.key)
label["description"] = lk.description
label_descriptors.append(label)
return label_descriptors | [
"def",
"new_label_descriptors",
"(",
"defaults",
",",
"keys",
")",
":",
"label_descriptors",
"=",
"[",
"]",
"for",
"lk",
"in",
"itertools",
".",
"chain",
".",
"from_iterable",
"(",
"(",
"defaults",
".",
"keys",
"(",
")",
",",
"keys",
")",
")",
":",
"la... | create labels for the metric_descriptor
that will be sent to Stackdriver Monitoring | [
"create",
"labels",
"for",
"the",
"metric_descriptor",
"that",
"will",
"be",
"sent",
"to",
"Stackdriver",
"Monitoring"
] | 992b223f7e34c5dcb65922b7d5c827e7a1351e7d | https://github.com/census-instrumentation/opencensus-python/blob/992b223f7e34c5dcb65922b7d5c827e7a1351e7d/contrib/opencensus-ext-stackdriver/opencensus/ext/stackdriver/stats_exporter/__init__.py#L419-L430 | train | 220,971 |
census-instrumentation/opencensus-python | contrib/opencensus-ext-stackdriver/opencensus/ext/stackdriver/stats_exporter/__init__.py | sanitize_label | def sanitize_label(text):
"""Remove characters not accepted in labels key
This replaces any non-word characters (alphanumeric or underscore), with
an underscore. It also ensures that the first character is a letter by
prepending with 'key' if necessary, and trims the text to 100 characters.
"""
if not text:
return text
text = re.sub('\\W+', '_', text)
if text[0] in string.digits:
text = "key_" + text
elif text[0] == '_':
text = "key" + text
return text[:100] | python | def sanitize_label(text):
"""Remove characters not accepted in labels key
This replaces any non-word characters (alphanumeric or underscore), with
an underscore. It also ensures that the first character is a letter by
prepending with 'key' if necessary, and trims the text to 100 characters.
"""
if not text:
return text
text = re.sub('\\W+', '_', text)
if text[0] in string.digits:
text = "key_" + text
elif text[0] == '_':
text = "key" + text
return text[:100] | [
"def",
"sanitize_label",
"(",
"text",
")",
":",
"if",
"not",
"text",
":",
"return",
"text",
"text",
"=",
"re",
".",
"sub",
"(",
"'\\\\W+'",
",",
"'_'",
",",
"text",
")",
"if",
"text",
"[",
"0",
"]",
"in",
"string",
".",
"digits",
":",
"text",
"="... | Remove characters not accepted in labels key
This replaces any non-word characters (alphanumeric or underscore), with
an underscore. It also ensures that the first character is a letter by
prepending with 'key' if necessary, and trims the text to 100 characters. | [
"Remove",
"characters",
"not",
"accepted",
"in",
"labels",
"key"
] | 992b223f7e34c5dcb65922b7d5c827e7a1351e7d | https://github.com/census-instrumentation/opencensus-python/blob/992b223f7e34c5dcb65922b7d5c827e7a1351e7d/contrib/opencensus-ext-stackdriver/opencensus/ext/stackdriver/stats_exporter/__init__.py#L433-L447 | train | 220,972 |
census-instrumentation/opencensus-python | contrib/opencensus-ext-stackdriver/opencensus/ext/stackdriver/stats_exporter/__init__.py | StackdriverStatsExporter._convert_series | def _convert_series(self, metric, ts):
"""Convert an OC timeseries to a SD series."""
series = monitoring_v3.types.TimeSeries()
series.metric.type = self.get_metric_type(metric.descriptor)
for lk, lv in self.options.default_monitoring_labels.items():
series.metric.labels[lk.key] = lv.value
for key, val in zip(metric.descriptor.label_keys, ts.label_values):
if val.value is not None:
safe_key = sanitize_label(key.key)
series.metric.labels[safe_key] = val.value
set_monitored_resource(series, self.options.resource)
for point in ts.points:
sd_point = series.points.add()
# this just modifies points, no return
self._convert_point(metric, ts, point, sd_point)
return series | python | def _convert_series(self, metric, ts):
"""Convert an OC timeseries to a SD series."""
series = monitoring_v3.types.TimeSeries()
series.metric.type = self.get_metric_type(metric.descriptor)
for lk, lv in self.options.default_monitoring_labels.items():
series.metric.labels[lk.key] = lv.value
for key, val in zip(metric.descriptor.label_keys, ts.label_values):
if val.value is not None:
safe_key = sanitize_label(key.key)
series.metric.labels[safe_key] = val.value
set_monitored_resource(series, self.options.resource)
for point in ts.points:
sd_point = series.points.add()
# this just modifies points, no return
self._convert_point(metric, ts, point, sd_point)
return series | [
"def",
"_convert_series",
"(",
"self",
",",
"metric",
",",
"ts",
")",
":",
"series",
"=",
"monitoring_v3",
".",
"types",
".",
"TimeSeries",
"(",
")",
"series",
".",
"metric",
".",
"type",
"=",
"self",
".",
"get_metric_type",
"(",
"metric",
".",
"descript... | Convert an OC timeseries to a SD series. | [
"Convert",
"an",
"OC",
"timeseries",
"to",
"a",
"SD",
"series",
"."
] | 992b223f7e34c5dcb65922b7d5c827e7a1351e7d | https://github.com/census-instrumentation/opencensus-python/blob/992b223f7e34c5dcb65922b7d5c827e7a1351e7d/contrib/opencensus-ext-stackdriver/opencensus/ext/stackdriver/stats_exporter/__init__.py#L172-L191 | train | 220,973 |
census-instrumentation/opencensus-python | contrib/opencensus-ext-stackdriver/opencensus/ext/stackdriver/stats_exporter/__init__.py | StackdriverStatsExporter._convert_point | def _convert_point(self, metric, ts, point, sd_point):
"""Convert an OC metric point to a SD point."""
if (metric.descriptor.type == metric_descriptor.MetricDescriptorType
.CUMULATIVE_DISTRIBUTION):
sd_dist_val = sd_point.value.distribution_value
sd_dist_val.count = point.value.count
sd_dist_val.sum_of_squared_deviation =\
point.value.sum_of_squared_deviation
assert sd_dist_val.bucket_options.explicit_buckets.bounds == []
sd_dist_val.bucket_options.explicit_buckets.bounds.extend(
[0.0] +
list(map(float, point.value.bucket_options.type_.bounds))
)
assert sd_dist_val.bucket_counts == []
sd_dist_val.bucket_counts.extend(
[0] +
[bb.count for bb in point.value.buckets]
)
elif (metric.descriptor.type ==
metric_descriptor.MetricDescriptorType.CUMULATIVE_INT64):
sd_point.value.int64_value = int(point.value.value)
elif (metric.descriptor.type ==
metric_descriptor.MetricDescriptorType.CUMULATIVE_DOUBLE):
sd_point.value.double_value = float(point.value.value)
elif (metric.descriptor.type ==
metric_descriptor.MetricDescriptorType.GAUGE_INT64):
sd_point.value.int64_value = int(point.value.value)
elif (metric.descriptor.type ==
metric_descriptor.MetricDescriptorType.GAUGE_DOUBLE):
sd_point.value.double_value = float(point.value.value)
# TODO: handle SUMMARY metrics, #567
else: # pragma: NO COVER
raise TypeError("Unsupported metric type: {}"
.format(metric.descriptor.type))
end = point.timestamp
if ts.start_timestamp is None:
start = end
else:
start = datetime.strptime(ts.start_timestamp, EPOCH_PATTERN)
timestamp_start = (start - EPOCH_DATETIME).total_seconds()
timestamp_end = (end - EPOCH_DATETIME).total_seconds()
sd_point.interval.end_time.seconds = int(timestamp_end)
secs = sd_point.interval.end_time.seconds
sd_point.interval.end_time.nanos = int((timestamp_end - secs) * 1e9)
start_time = sd_point.interval.start_time
start_time.seconds = int(timestamp_start)
start_time.nanos = int((timestamp_start - start_time.seconds) * 1e9) | python | def _convert_point(self, metric, ts, point, sd_point):
"""Convert an OC metric point to a SD point."""
if (metric.descriptor.type == metric_descriptor.MetricDescriptorType
.CUMULATIVE_DISTRIBUTION):
sd_dist_val = sd_point.value.distribution_value
sd_dist_val.count = point.value.count
sd_dist_val.sum_of_squared_deviation =\
point.value.sum_of_squared_deviation
assert sd_dist_val.bucket_options.explicit_buckets.bounds == []
sd_dist_val.bucket_options.explicit_buckets.bounds.extend(
[0.0] +
list(map(float, point.value.bucket_options.type_.bounds))
)
assert sd_dist_val.bucket_counts == []
sd_dist_val.bucket_counts.extend(
[0] +
[bb.count for bb in point.value.buckets]
)
elif (metric.descriptor.type ==
metric_descriptor.MetricDescriptorType.CUMULATIVE_INT64):
sd_point.value.int64_value = int(point.value.value)
elif (metric.descriptor.type ==
metric_descriptor.MetricDescriptorType.CUMULATIVE_DOUBLE):
sd_point.value.double_value = float(point.value.value)
elif (metric.descriptor.type ==
metric_descriptor.MetricDescriptorType.GAUGE_INT64):
sd_point.value.int64_value = int(point.value.value)
elif (metric.descriptor.type ==
metric_descriptor.MetricDescriptorType.GAUGE_DOUBLE):
sd_point.value.double_value = float(point.value.value)
# TODO: handle SUMMARY metrics, #567
else: # pragma: NO COVER
raise TypeError("Unsupported metric type: {}"
.format(metric.descriptor.type))
end = point.timestamp
if ts.start_timestamp is None:
start = end
else:
start = datetime.strptime(ts.start_timestamp, EPOCH_PATTERN)
timestamp_start = (start - EPOCH_DATETIME).total_seconds()
timestamp_end = (end - EPOCH_DATETIME).total_seconds()
sd_point.interval.end_time.seconds = int(timestamp_end)
secs = sd_point.interval.end_time.seconds
sd_point.interval.end_time.nanos = int((timestamp_end - secs) * 1e9)
start_time = sd_point.interval.start_time
start_time.seconds = int(timestamp_start)
start_time.nanos = int((timestamp_start - start_time.seconds) * 1e9) | [
"def",
"_convert_point",
"(",
"self",
",",
"metric",
",",
"ts",
",",
"point",
",",
"sd_point",
")",
":",
"if",
"(",
"metric",
".",
"descriptor",
".",
"type",
"==",
"metric_descriptor",
".",
"MetricDescriptorType",
".",
"CUMULATIVE_DISTRIBUTION",
")",
":",
"s... | Convert an OC metric point to a SD point. | [
"Convert",
"an",
"OC",
"metric",
"point",
"to",
"a",
"SD",
"point",
"."
] | 992b223f7e34c5dcb65922b7d5c827e7a1351e7d | https://github.com/census-instrumentation/opencensus-python/blob/992b223f7e34c5dcb65922b7d5c827e7a1351e7d/contrib/opencensus-ext-stackdriver/opencensus/ext/stackdriver/stats_exporter/__init__.py#L193-L252 | train | 220,974 |
census-instrumentation/opencensus-python | contrib/opencensus-ext-stackdriver/opencensus/ext/stackdriver/stats_exporter/__init__.py | StackdriverStatsExporter.get_metric_descriptor | def get_metric_descriptor(self, oc_md):
"""Convert an OC metric descriptor to a SD metric descriptor."""
try:
metric_kind, value_type = OC_MD_TO_SD_TYPE[oc_md.type]
except KeyError:
raise TypeError("Unsupported metric type: {}".format(oc_md.type))
if self.options.metric_prefix:
display_name_prefix = self.options.metric_prefix
else:
display_name_prefix = DEFAULT_DISPLAY_NAME_PREFIX
desc_labels = new_label_descriptors(
self.options.default_monitoring_labels, oc_md.label_keys)
descriptor = monitoring_v3.types.MetricDescriptor(labels=desc_labels)
metric_type = self.get_metric_type(oc_md)
descriptor.type = metric_type
descriptor.metric_kind = metric_kind
descriptor.value_type = value_type
descriptor.description = oc_md.description
descriptor.unit = oc_md.unit
descriptor.name = ("projects/{}/metricDescriptors/{}"
.format(self.options.project_id, metric_type))
descriptor.display_name = ("{}/{}"
.format(display_name_prefix, oc_md.name))
return descriptor | python | def get_metric_descriptor(self, oc_md):
"""Convert an OC metric descriptor to a SD metric descriptor."""
try:
metric_kind, value_type = OC_MD_TO_SD_TYPE[oc_md.type]
except KeyError:
raise TypeError("Unsupported metric type: {}".format(oc_md.type))
if self.options.metric_prefix:
display_name_prefix = self.options.metric_prefix
else:
display_name_prefix = DEFAULT_DISPLAY_NAME_PREFIX
desc_labels = new_label_descriptors(
self.options.default_monitoring_labels, oc_md.label_keys)
descriptor = monitoring_v3.types.MetricDescriptor(labels=desc_labels)
metric_type = self.get_metric_type(oc_md)
descriptor.type = metric_type
descriptor.metric_kind = metric_kind
descriptor.value_type = value_type
descriptor.description = oc_md.description
descriptor.unit = oc_md.unit
descriptor.name = ("projects/{}/metricDescriptors/{}"
.format(self.options.project_id, metric_type))
descriptor.display_name = ("{}/{}"
.format(display_name_prefix, oc_md.name))
return descriptor | [
"def",
"get_metric_descriptor",
"(",
"self",
",",
"oc_md",
")",
":",
"try",
":",
"metric_kind",
",",
"value_type",
"=",
"OC_MD_TO_SD_TYPE",
"[",
"oc_md",
".",
"type",
"]",
"except",
"KeyError",
":",
"raise",
"TypeError",
"(",
"\"Unsupported metric type: {}\"",
"... | Convert an OC metric descriptor to a SD metric descriptor. | [
"Convert",
"an",
"OC",
"metric",
"descriptor",
"to",
"a",
"SD",
"metric",
"descriptor",
"."
] | 992b223f7e34c5dcb65922b7d5c827e7a1351e7d | https://github.com/census-instrumentation/opencensus-python/blob/992b223f7e34c5dcb65922b7d5c827e7a1351e7d/contrib/opencensus-ext-stackdriver/opencensus/ext/stackdriver/stats_exporter/__init__.py#L258-L285 | train | 220,975 |
census-instrumentation/opencensus-python | contrib/opencensus-ext-stackdriver/opencensus/ext/stackdriver/stats_exporter/__init__.py | StackdriverStatsExporter.register_metric_descriptor | def register_metric_descriptor(self, oc_md):
"""Register a metric descriptor with stackdriver."""
metric_type = self.get_metric_type(oc_md)
with self._md_lock:
if metric_type in self._md_cache:
return self._md_cache[metric_type]
descriptor = self.get_metric_descriptor(oc_md)
project_name = self.client.project_path(self.options.project_id)
sd_md = self.client.create_metric_descriptor(project_name, descriptor)
with self._md_lock:
self._md_cache[metric_type] = sd_md
return sd_md | python | def register_metric_descriptor(self, oc_md):
"""Register a metric descriptor with stackdriver."""
metric_type = self.get_metric_type(oc_md)
with self._md_lock:
if metric_type in self._md_cache:
return self._md_cache[metric_type]
descriptor = self.get_metric_descriptor(oc_md)
project_name = self.client.project_path(self.options.project_id)
sd_md = self.client.create_metric_descriptor(project_name, descriptor)
with self._md_lock:
self._md_cache[metric_type] = sd_md
return sd_md | [
"def",
"register_metric_descriptor",
"(",
"self",
",",
"oc_md",
")",
":",
"metric_type",
"=",
"self",
".",
"get_metric_type",
"(",
"oc_md",
")",
"with",
"self",
".",
"_md_lock",
":",
"if",
"metric_type",
"in",
"self",
".",
"_md_cache",
":",
"return",
"self",... | Register a metric descriptor with stackdriver. | [
"Register",
"a",
"metric",
"descriptor",
"with",
"stackdriver",
"."
] | 992b223f7e34c5dcb65922b7d5c827e7a1351e7d | https://github.com/census-instrumentation/opencensus-python/blob/992b223f7e34c5dcb65922b7d5c827e7a1351e7d/contrib/opencensus-ext-stackdriver/opencensus/ext/stackdriver/stats_exporter/__init__.py#L287-L299 | train | 220,976 |
census-instrumentation/opencensus-python | opencensus/trace/propagation/google_cloud_format.py | GoogleCloudFormatPropagator.from_headers | def from_headers(self, headers):
"""Generate a SpanContext object using the trace context header.
:type headers: dict
:param headers: HTTP request headers.
:rtype: :class:`~opencensus.trace.span_context.SpanContext`
:returns: SpanContext generated from the trace context header.
"""
if headers is None:
return SpanContext()
header = headers.get(_TRACE_CONTEXT_HEADER_NAME)
if header is None:
return SpanContext()
header = str(header.encode('utf-8'))
return self.from_header(header) | python | def from_headers(self, headers):
"""Generate a SpanContext object using the trace context header.
:type headers: dict
:param headers: HTTP request headers.
:rtype: :class:`~opencensus.trace.span_context.SpanContext`
:returns: SpanContext generated from the trace context header.
"""
if headers is None:
return SpanContext()
header = headers.get(_TRACE_CONTEXT_HEADER_NAME)
if header is None:
return SpanContext()
header = str(header.encode('utf-8'))
return self.from_header(header) | [
"def",
"from_headers",
"(",
"self",
",",
"headers",
")",
":",
"if",
"headers",
"is",
"None",
":",
"return",
"SpanContext",
"(",
")",
"header",
"=",
"headers",
".",
"get",
"(",
"_TRACE_CONTEXT_HEADER_NAME",
")",
"if",
"header",
"is",
"None",
":",
"return",
... | Generate a SpanContext object using the trace context header.
:type headers: dict
:param headers: HTTP request headers.
:rtype: :class:`~opencensus.trace.span_context.SpanContext`
:returns: SpanContext generated from the trace context header. | [
"Generate",
"a",
"SpanContext",
"object",
"using",
"the",
"trace",
"context",
"header",
"."
] | 992b223f7e34c5dcb65922b7d5c827e7a1351e7d | https://github.com/census-instrumentation/opencensus-python/blob/992b223f7e34c5dcb65922b7d5c827e7a1351e7d/opencensus/trace/propagation/google_cloud_format.py#L77-L92 | train | 220,977 |
census-instrumentation/opencensus-python | opencensus/trace/propagation/google_cloud_format.py | GoogleCloudFormatPropagator.to_header | def to_header(self, span_context):
"""Convert a SpanContext object to header string.
:type span_context:
:class:`~opencensus.trace.span_context.SpanContext`
:param span_context: SpanContext object.
:rtype: str
:returns: A trace context header string in google cloud format.
"""
trace_id = span_context.trace_id
span_id = span_context.span_id
trace_options = span_context.trace_options.trace_options_byte
header = '{}/{};o={}'.format(
trace_id,
span_id,
int(trace_options))
return header | python | def to_header(self, span_context):
"""Convert a SpanContext object to header string.
:type span_context:
:class:`~opencensus.trace.span_context.SpanContext`
:param span_context: SpanContext object.
:rtype: str
:returns: A trace context header string in google cloud format.
"""
trace_id = span_context.trace_id
span_id = span_context.span_id
trace_options = span_context.trace_options.trace_options_byte
header = '{}/{};o={}'.format(
trace_id,
span_id,
int(trace_options))
return header | [
"def",
"to_header",
"(",
"self",
",",
"span_context",
")",
":",
"trace_id",
"=",
"span_context",
".",
"trace_id",
"span_id",
"=",
"span_context",
".",
"span_id",
"trace_options",
"=",
"span_context",
".",
"trace_options",
".",
"trace_options_byte",
"header",
"=",
... | Convert a SpanContext object to header string.
:type span_context:
:class:`~opencensus.trace.span_context.SpanContext`
:param span_context: SpanContext object.
:rtype: str
:returns: A trace context header string in google cloud format. | [
"Convert",
"a",
"SpanContext",
"object",
"to",
"header",
"string",
"."
] | 992b223f7e34c5dcb65922b7d5c827e7a1351e7d | https://github.com/census-instrumentation/opencensus-python/blob/992b223f7e34c5dcb65922b7d5c827e7a1351e7d/opencensus/trace/propagation/google_cloud_format.py#L94-L112 | train | 220,978 |
census-instrumentation/opencensus-python | contrib/opencensus-ext-zipkin/opencensus/ext/zipkin/trace_exporter/__init__.py | _extract_annotations_from_span | def _extract_annotations_from_span(span):
"""Extract and convert time event annotations to zipkin annotations"""
if span.time_events is None:
return []
annotations = []
for time_event in span.time_events:
annotation = time_event.annotation
if not annotation:
continue
event_timestamp_mus = timestamp_to_microseconds(time_event.timestamp)
annotations.append({'timestamp': int(round(event_timestamp_mus)),
'value': annotation.description})
return annotations | python | def _extract_annotations_from_span(span):
"""Extract and convert time event annotations to zipkin annotations"""
if span.time_events is None:
return []
annotations = []
for time_event in span.time_events:
annotation = time_event.annotation
if not annotation:
continue
event_timestamp_mus = timestamp_to_microseconds(time_event.timestamp)
annotations.append({'timestamp': int(round(event_timestamp_mus)),
'value': annotation.description})
return annotations | [
"def",
"_extract_annotations_from_span",
"(",
"span",
")",
":",
"if",
"span",
".",
"time_events",
"is",
"None",
":",
"return",
"[",
"]",
"annotations",
"=",
"[",
"]",
"for",
"time_event",
"in",
"span",
".",
"time_events",
":",
"annotation",
"=",
"time_event"... | Extract and convert time event annotations to zipkin annotations | [
"Extract",
"and",
"convert",
"time",
"event",
"annotations",
"to",
"zipkin",
"annotations"
] | 992b223f7e34c5dcb65922b7d5c827e7a1351e7d | https://github.com/census-instrumentation/opencensus-python/blob/992b223f7e34c5dcb65922b7d5c827e7a1351e7d/contrib/opencensus-ext-zipkin/opencensus/ext/zipkin/trace_exporter/__init__.py#L202-L217 | train | 220,979 |
census-instrumentation/opencensus-python | contrib/opencensus-ext-zipkin/opencensus/ext/zipkin/trace_exporter/__init__.py | ZipkinExporter.emit | def emit(self, span_datas):
"""Send SpanData tuples to Zipkin server, default using the v2 API.
:type span_datas: list of :class:
`~opencensus.trace.span_data.SpanData`
:param list of opencensus.trace.span_data.SpanData span_datas:
SpanData tuples to emit
"""
try:
zipkin_spans = self.translate_to_zipkin(span_datas)
result = requests.post(
url=self.url,
data=json.dumps(zipkin_spans),
headers=ZIPKIN_HEADERS)
if result.status_code not in SUCCESS_STATUS_CODE:
logging.error(
"Failed to send spans to Zipkin server! Spans are {}"
.format(zipkin_spans))
except Exception as e: # pragma: NO COVER
logging.error(getattr(e, 'message', e)) | python | def emit(self, span_datas):
"""Send SpanData tuples to Zipkin server, default using the v2 API.
:type span_datas: list of :class:
`~opencensus.trace.span_data.SpanData`
:param list of opencensus.trace.span_data.SpanData span_datas:
SpanData tuples to emit
"""
try:
zipkin_spans = self.translate_to_zipkin(span_datas)
result = requests.post(
url=self.url,
data=json.dumps(zipkin_spans),
headers=ZIPKIN_HEADERS)
if result.status_code not in SUCCESS_STATUS_CODE:
logging.error(
"Failed to send spans to Zipkin server! Spans are {}"
.format(zipkin_spans))
except Exception as e: # pragma: NO COVER
logging.error(getattr(e, 'message', e)) | [
"def",
"emit",
"(",
"self",
",",
"span_datas",
")",
":",
"try",
":",
"zipkin_spans",
"=",
"self",
".",
"translate_to_zipkin",
"(",
"span_datas",
")",
"result",
"=",
"requests",
".",
"post",
"(",
"url",
"=",
"self",
".",
"url",
",",
"data",
"=",
"json",... | Send SpanData tuples to Zipkin server, default using the v2 API.
:type span_datas: list of :class:
`~opencensus.trace.span_data.SpanData`
:param list of opencensus.trace.span_data.SpanData span_datas:
SpanData tuples to emit | [
"Send",
"SpanData",
"tuples",
"to",
"Zipkin",
"server",
"default",
"using",
"the",
"v2",
"API",
"."
] | 992b223f7e34c5dcb65922b7d5c827e7a1351e7d | https://github.com/census-instrumentation/opencensus-python/blob/992b223f7e34c5dcb65922b7d5c827e7a1351e7d/contrib/opencensus-ext-zipkin/opencensus/ext/zipkin/trace_exporter/__init__.py#L99-L120 | train | 220,980 |
census-instrumentation/opencensus-python | contrib/opencensus-ext-zipkin/opencensus/ext/zipkin/trace_exporter/__init__.py | ZipkinExporter.translate_to_zipkin | def translate_to_zipkin(self, span_datas):
"""Translate the opencensus spans to zipkin spans.
:type span_datas: list of :class:
`~opencensus.trace.span_data.SpanData`
:param span_datas:
SpanData tuples to emit
:rtype: list
:returns: List of zipkin format spans.
"""
local_endpoint = {
'serviceName': self.service_name,
'port': self.port,
}
if self.ipv4 is not None:
local_endpoint['ipv4'] = self.ipv4
if self.ipv6 is not None:
local_endpoint['ipv6'] = self.ipv6
zipkin_spans = []
for span in span_datas:
# Timestamp in zipkin spans is int of microseconds.
start_timestamp_mus = timestamp_to_microseconds(span.start_time)
end_timestamp_mus = timestamp_to_microseconds(span.end_time)
duration_mus = end_timestamp_mus - start_timestamp_mus
zipkin_span = {
'traceId': span.context.trace_id,
'id': str(span.span_id),
'name': span.name,
'timestamp': int(round(start_timestamp_mus)),
'duration': int(round(duration_mus)),
'localEndpoint': local_endpoint,
'tags': _extract_tags_from_span(span.attributes),
'annotations': _extract_annotations_from_span(span),
}
span_kind = span.span_kind
parent_span_id = span.parent_span_id
if span_kind is not None:
kind = SPAN_KIND_MAP.get(span_kind)
# Zipkin API for span kind only accept
# enum(CLIENT|SERVER|PRODUCER|CONSUMER|Absent)
if kind is not None:
zipkin_span['kind'] = kind
if parent_span_id is not None:
zipkin_span['parentId'] = str(parent_span_id)
zipkin_spans.append(zipkin_span)
return zipkin_spans | python | def translate_to_zipkin(self, span_datas):
"""Translate the opencensus spans to zipkin spans.
:type span_datas: list of :class:
`~opencensus.trace.span_data.SpanData`
:param span_datas:
SpanData tuples to emit
:rtype: list
:returns: List of zipkin format spans.
"""
local_endpoint = {
'serviceName': self.service_name,
'port': self.port,
}
if self.ipv4 is not None:
local_endpoint['ipv4'] = self.ipv4
if self.ipv6 is not None:
local_endpoint['ipv6'] = self.ipv6
zipkin_spans = []
for span in span_datas:
# Timestamp in zipkin spans is int of microseconds.
start_timestamp_mus = timestamp_to_microseconds(span.start_time)
end_timestamp_mus = timestamp_to_microseconds(span.end_time)
duration_mus = end_timestamp_mus - start_timestamp_mus
zipkin_span = {
'traceId': span.context.trace_id,
'id': str(span.span_id),
'name': span.name,
'timestamp': int(round(start_timestamp_mus)),
'duration': int(round(duration_mus)),
'localEndpoint': local_endpoint,
'tags': _extract_tags_from_span(span.attributes),
'annotations': _extract_annotations_from_span(span),
}
span_kind = span.span_kind
parent_span_id = span.parent_span_id
if span_kind is not None:
kind = SPAN_KIND_MAP.get(span_kind)
# Zipkin API for span kind only accept
# enum(CLIENT|SERVER|PRODUCER|CONSUMER|Absent)
if kind is not None:
zipkin_span['kind'] = kind
if parent_span_id is not None:
zipkin_span['parentId'] = str(parent_span_id)
zipkin_spans.append(zipkin_span)
return zipkin_spans | [
"def",
"translate_to_zipkin",
"(",
"self",
",",
"span_datas",
")",
":",
"local_endpoint",
"=",
"{",
"'serviceName'",
":",
"self",
".",
"service_name",
",",
"'port'",
":",
"self",
".",
"port",
",",
"}",
"if",
"self",
".",
"ipv4",
"is",
"not",
"None",
":",... | Translate the opencensus spans to zipkin spans.
:type span_datas: list of :class:
`~opencensus.trace.span_data.SpanData`
:param span_datas:
SpanData tuples to emit
:rtype: list
:returns: List of zipkin format spans. | [
"Translate",
"the",
"opencensus",
"spans",
"to",
"zipkin",
"spans",
"."
] | 992b223f7e34c5dcb65922b7d5c827e7a1351e7d | https://github.com/census-instrumentation/opencensus-python/blob/992b223f7e34c5dcb65922b7d5c827e7a1351e7d/contrib/opencensus-ext-zipkin/opencensus/ext/zipkin/trace_exporter/__init__.py#L125-L182 | train | 220,981 |
census-instrumentation/opencensus-python | contrib/opencensus-ext-sqlalchemy/opencensus/ext/sqlalchemy/trace.py | trace_engine | def trace_engine(engine):
"""Register the event before cursor execute and after cursor execute
to the event listner of the engine.
"""
event.listen(engine, 'before_cursor_execute', _before_cursor_execute)
event.listen(engine, 'after_cursor_execute', _after_cursor_execute) | python | def trace_engine(engine):
"""Register the event before cursor execute and after cursor execute
to the event listner of the engine.
"""
event.listen(engine, 'before_cursor_execute', _before_cursor_execute)
event.listen(engine, 'after_cursor_execute', _after_cursor_execute) | [
"def",
"trace_engine",
"(",
"engine",
")",
":",
"event",
".",
"listen",
"(",
"engine",
",",
"'before_cursor_execute'",
",",
"_before_cursor_execute",
")",
"event",
".",
"listen",
"(",
"engine",
",",
"'after_cursor_execute'",
",",
"_after_cursor_execute",
")"
] | Register the event before cursor execute and after cursor execute
to the event listner of the engine. | [
"Register",
"the",
"event",
"before",
"cursor",
"execute",
"and",
"after",
"cursor",
"execute",
"to",
"the",
"event",
"listner",
"of",
"the",
"engine",
"."
] | 992b223f7e34c5dcb65922b7d5c827e7a1351e7d | https://github.com/census-instrumentation/opencensus-python/blob/992b223f7e34c5dcb65922b7d5c827e7a1351e7d/contrib/opencensus-ext-sqlalchemy/opencensus/ext/sqlalchemy/trace.py#L38-L43 | train | 220,982 |
census-instrumentation/opencensus-python | opencensus/metrics/export/time_series.py | TimeSeries.check_points_type | def check_points_type(self, type_class):
"""Check that each point's value is an instance of `type_class`.
`type_class` should typically be a Value type, i.e. one that extends
:class: `opencensus.metrics.export.value.Value`.
:type type_class: type
:param type_class: Type to check against.
:rtype: bool
:return: Whether all points are instances of `type_class`.
"""
for point in self.points:
if (point.value is not None
and not isinstance(point.value, type_class)):
return False
return True | python | def check_points_type(self, type_class):
"""Check that each point's value is an instance of `type_class`.
`type_class` should typically be a Value type, i.e. one that extends
:class: `opencensus.metrics.export.value.Value`.
:type type_class: type
:param type_class: Type to check against.
:rtype: bool
:return: Whether all points are instances of `type_class`.
"""
for point in self.points:
if (point.value is not None
and not isinstance(point.value, type_class)):
return False
return True | [
"def",
"check_points_type",
"(",
"self",
",",
"type_class",
")",
":",
"for",
"point",
"in",
"self",
".",
"points",
":",
"if",
"(",
"point",
".",
"value",
"is",
"not",
"None",
"and",
"not",
"isinstance",
"(",
"point",
".",
"value",
",",
"type_class",
")... | Check that each point's value is an instance of `type_class`.
`type_class` should typically be a Value type, i.e. one that extends
:class: `opencensus.metrics.export.value.Value`.
:type type_class: type
:param type_class: Type to check against.
:rtype: bool
:return: Whether all points are instances of `type_class`. | [
"Check",
"that",
"each",
"point",
"s",
"value",
"is",
"an",
"instance",
"of",
"type_class",
"."
] | 992b223f7e34c5dcb65922b7d5c827e7a1351e7d | https://github.com/census-instrumentation/opencensus-python/blob/992b223f7e34c5dcb65922b7d5c827e7a1351e7d/opencensus/metrics/export/time_series.py#L74-L90 | train | 220,983 |
census-instrumentation/opencensus-python | opencensus/log/__init__.py | get_log_attrs | def get_log_attrs():
"""Get logging attributes from the opencensus context.
:rtype: :class:`LogAttrs`
:return: The current span's trace ID, span ID, and sampling decision.
"""
try:
tracer = execution_context.get_opencensus_tracer()
if tracer is None:
raise RuntimeError
except Exception: # noqa
_meta_logger.error("Failed to get opencensus tracer")
return ATTR_DEFAULTS
try:
trace_id = tracer.span_context.trace_id
if trace_id is None:
trace_id = ATTR_DEFAULTS.trace_id
except Exception: # noqa
_meta_logger.error("Failed to get opencensus trace ID")
trace_id = ATTR_DEFAULTS.trace_id
try:
span_id = tracer.span_context.span_id
if span_id is None:
span_id = ATTR_DEFAULTS.span_id
except Exception: # noqa
_meta_logger.error("Failed to get opencensus span ID")
span_id = ATTR_DEFAULTS.span_id
try:
sampling_decision = tracer.span_context.trace_options.get_enabled
if sampling_decision is None:
sampling_decision = ATTR_DEFAULTS.sampling_decision
except AttributeError:
sampling_decision = ATTR_DEFAULTS.sampling_decision
except Exception: # noqa
_meta_logger.error("Failed to get opencensus sampling decision")
sampling_decision = ATTR_DEFAULTS.sampling_decision
return LogAttrs(trace_id, span_id, sampling_decision) | python | def get_log_attrs():
"""Get logging attributes from the opencensus context.
:rtype: :class:`LogAttrs`
:return: The current span's trace ID, span ID, and sampling decision.
"""
try:
tracer = execution_context.get_opencensus_tracer()
if tracer is None:
raise RuntimeError
except Exception: # noqa
_meta_logger.error("Failed to get opencensus tracer")
return ATTR_DEFAULTS
try:
trace_id = tracer.span_context.trace_id
if trace_id is None:
trace_id = ATTR_DEFAULTS.trace_id
except Exception: # noqa
_meta_logger.error("Failed to get opencensus trace ID")
trace_id = ATTR_DEFAULTS.trace_id
try:
span_id = tracer.span_context.span_id
if span_id is None:
span_id = ATTR_DEFAULTS.span_id
except Exception: # noqa
_meta_logger.error("Failed to get opencensus span ID")
span_id = ATTR_DEFAULTS.span_id
try:
sampling_decision = tracer.span_context.trace_options.get_enabled
if sampling_decision is None:
sampling_decision = ATTR_DEFAULTS.sampling_decision
except AttributeError:
sampling_decision = ATTR_DEFAULTS.sampling_decision
except Exception: # noqa
_meta_logger.error("Failed to get opencensus sampling decision")
sampling_decision = ATTR_DEFAULTS.sampling_decision
return LogAttrs(trace_id, span_id, sampling_decision) | [
"def",
"get_log_attrs",
"(",
")",
":",
"try",
":",
"tracer",
"=",
"execution_context",
".",
"get_opencensus_tracer",
"(",
")",
"if",
"tracer",
"is",
"None",
":",
"raise",
"RuntimeError",
"except",
"Exception",
":",
"# noqa",
"_meta_logger",
".",
"error",
"(",
... | Get logging attributes from the opencensus context.
:rtype: :class:`LogAttrs`
:return: The current span's trace ID, span ID, and sampling decision. | [
"Get",
"logging",
"attributes",
"from",
"the",
"opencensus",
"context",
"."
] | 992b223f7e34c5dcb65922b7d5c827e7a1351e7d | https://github.com/census-instrumentation/opencensus-python/blob/992b223f7e34c5dcb65922b7d5c827e7a1351e7d/opencensus/log/__init__.py#L33-L73 | train | 220,984 |
census-instrumentation/opencensus-python | contrib/opencensus-ext-google-cloud-clientlibs/opencensus/ext/google_cloud_clientlibs/trace.py | trace_integration | def trace_integration(tracer=None):
"""Trace the Google Cloud Client libraries by integrating with
the transport level including HTTP and gRPC.
"""
log.info('Integrated module: {}'.format(MODULE_NAME))
# Integrate with gRPC
trace_grpc(tracer)
# Integrate with HTTP
trace_http(tracer) | python | def trace_integration(tracer=None):
"""Trace the Google Cloud Client libraries by integrating with
the transport level including HTTP and gRPC.
"""
log.info('Integrated module: {}'.format(MODULE_NAME))
# Integrate with gRPC
trace_grpc(tracer)
# Integrate with HTTP
trace_http(tracer) | [
"def",
"trace_integration",
"(",
"tracer",
"=",
"None",
")",
":",
"log",
".",
"info",
"(",
"'Integrated module: {}'",
".",
"format",
"(",
"MODULE_NAME",
")",
")",
"# Integrate with gRPC",
"trace_grpc",
"(",
"tracer",
")",
"# Integrate with HTTP",
"trace_http",
"("... | Trace the Google Cloud Client libraries by integrating with
the transport level including HTTP and gRPC. | [
"Trace",
"the",
"Google",
"Cloud",
"Client",
"libraries",
"by",
"integrating",
"with",
"the",
"transport",
"level",
"including",
"HTTP",
"and",
"gRPC",
"."
] | 992b223f7e34c5dcb65922b7d5c827e7a1351e7d | https://github.com/census-instrumentation/opencensus-python/blob/992b223f7e34c5dcb65922b7d5c827e7a1351e7d/contrib/opencensus-ext-google-cloud-clientlibs/opencensus/ext/google_cloud_clientlibs/trace.py#L37-L47 | train | 220,985 |
census-instrumentation/opencensus-python | contrib/opencensus-ext-google-cloud-clientlibs/opencensus/ext/google_cloud_clientlibs/trace.py | trace_grpc | def trace_grpc(tracer=None):
"""Integrate with gRPC."""
# Wrap google.cloud._helpers.make_secure_channel
make_secure_channel_func = getattr(_helpers, MAKE_SECURE_CHANNEL)
make_secure_channel_wrapped = wrap_make_secure_channel(
make_secure_channel_func, tracer)
setattr(
_helpers,
MAKE_SECURE_CHANNEL,
make_secure_channel_wrapped)
# Wrap the grpc.insecure_channel.
insecure_channel_func = getattr(grpc, INSECURE_CHANNEL)
insecure_channel_wrapped = wrap_insecure_channel(
insecure_channel_func, tracer)
setattr(
grpc,
INSECURE_CHANNEL,
insecure_channel_wrapped)
# Wrap google.api_core.grpc_helpers.create_channel
create_channel_func = getattr(grpc_helpers, CREATE_CHANNEL)
create_channel_wrapped = wrap_create_channel(create_channel_func, tracer)
setattr(
grpc_helpers,
CREATE_CHANNEL,
create_channel_wrapped) | python | def trace_grpc(tracer=None):
"""Integrate with gRPC."""
# Wrap google.cloud._helpers.make_secure_channel
make_secure_channel_func = getattr(_helpers, MAKE_SECURE_CHANNEL)
make_secure_channel_wrapped = wrap_make_secure_channel(
make_secure_channel_func, tracer)
setattr(
_helpers,
MAKE_SECURE_CHANNEL,
make_secure_channel_wrapped)
# Wrap the grpc.insecure_channel.
insecure_channel_func = getattr(grpc, INSECURE_CHANNEL)
insecure_channel_wrapped = wrap_insecure_channel(
insecure_channel_func, tracer)
setattr(
grpc,
INSECURE_CHANNEL,
insecure_channel_wrapped)
# Wrap google.api_core.grpc_helpers.create_channel
create_channel_func = getattr(grpc_helpers, CREATE_CHANNEL)
create_channel_wrapped = wrap_create_channel(create_channel_func, tracer)
setattr(
grpc_helpers,
CREATE_CHANNEL,
create_channel_wrapped) | [
"def",
"trace_grpc",
"(",
"tracer",
"=",
"None",
")",
":",
"# Wrap google.cloud._helpers.make_secure_channel",
"make_secure_channel_func",
"=",
"getattr",
"(",
"_helpers",
",",
"MAKE_SECURE_CHANNEL",
")",
"make_secure_channel_wrapped",
"=",
"wrap_make_secure_channel",
"(",
... | Integrate with gRPC. | [
"Integrate",
"with",
"gRPC",
"."
] | 992b223f7e34c5dcb65922b7d5c827e7a1351e7d | https://github.com/census-instrumentation/opencensus-python/blob/992b223f7e34c5dcb65922b7d5c827e7a1351e7d/contrib/opencensus-ext-google-cloud-clientlibs/opencensus/ext/google_cloud_clientlibs/trace.py#L50-L76 | train | 220,986 |
census-instrumentation/opencensus-python | contrib/opencensus-ext-google-cloud-clientlibs/opencensus/ext/google_cloud_clientlibs/trace.py | wrap_make_secure_channel | def wrap_make_secure_channel(make_secure_channel_func, tracer=None):
"""Wrap the google.cloud._helpers.make_secure_channel."""
def call(*args, **kwargs):
channel = make_secure_channel_func(*args, **kwargs)
try:
host = kwargs.get('host')
tracer_interceptor = OpenCensusClientInterceptor(tracer, host)
intercepted_channel = grpc.intercept_channel(
channel, tracer_interceptor)
return intercepted_channel # pragma: NO COVER
except Exception:
log.warning(
'Failed to wrap secure channel, '
'clientlibs grpc calls not traced.')
return channel
return call | python | def wrap_make_secure_channel(make_secure_channel_func, tracer=None):
"""Wrap the google.cloud._helpers.make_secure_channel."""
def call(*args, **kwargs):
channel = make_secure_channel_func(*args, **kwargs)
try:
host = kwargs.get('host')
tracer_interceptor = OpenCensusClientInterceptor(tracer, host)
intercepted_channel = grpc.intercept_channel(
channel, tracer_interceptor)
return intercepted_channel # pragma: NO COVER
except Exception:
log.warning(
'Failed to wrap secure channel, '
'clientlibs grpc calls not traced.')
return channel
return call | [
"def",
"wrap_make_secure_channel",
"(",
"make_secure_channel_func",
",",
"tracer",
"=",
"None",
")",
":",
"def",
"call",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"channel",
"=",
"make_secure_channel_func",
"(",
"*",
"args",
",",
"*",
"*",
"kwar... | Wrap the google.cloud._helpers.make_secure_channel. | [
"Wrap",
"the",
"google",
".",
"cloud",
".",
"_helpers",
".",
"make_secure_channel",
"."
] | 992b223f7e34c5dcb65922b7d5c827e7a1351e7d | https://github.com/census-instrumentation/opencensus-python/blob/992b223f7e34c5dcb65922b7d5c827e7a1351e7d/contrib/opencensus-ext-google-cloud-clientlibs/opencensus/ext/google_cloud_clientlibs/trace.py#L84-L100 | train | 220,987 |
census-instrumentation/opencensus-python | contrib/opencensus-ext-google-cloud-clientlibs/opencensus/ext/google_cloud_clientlibs/trace.py | wrap_insecure_channel | def wrap_insecure_channel(insecure_channel_func, tracer=None):
"""Wrap the grpc.insecure_channel."""
def call(*args, **kwargs):
channel = insecure_channel_func(*args, **kwargs)
try:
target = kwargs.get('target')
tracer_interceptor = OpenCensusClientInterceptor(tracer, target)
intercepted_channel = grpc.intercept_channel(
channel, tracer_interceptor)
return intercepted_channel # pragma: NO COVER
except Exception:
log.warning(
'Failed to wrap insecure channel, '
'clientlibs grpc calls not traced.')
return channel
return call | python | def wrap_insecure_channel(insecure_channel_func, tracer=None):
"""Wrap the grpc.insecure_channel."""
def call(*args, **kwargs):
channel = insecure_channel_func(*args, **kwargs)
try:
target = kwargs.get('target')
tracer_interceptor = OpenCensusClientInterceptor(tracer, target)
intercepted_channel = grpc.intercept_channel(
channel, tracer_interceptor)
return intercepted_channel # pragma: NO COVER
except Exception:
log.warning(
'Failed to wrap insecure channel, '
'clientlibs grpc calls not traced.')
return channel
return call | [
"def",
"wrap_insecure_channel",
"(",
"insecure_channel_func",
",",
"tracer",
"=",
"None",
")",
":",
"def",
"call",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"channel",
"=",
"insecure_channel_func",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")... | Wrap the grpc.insecure_channel. | [
"Wrap",
"the",
"grpc",
".",
"insecure_channel",
"."
] | 992b223f7e34c5dcb65922b7d5c827e7a1351e7d | https://github.com/census-instrumentation/opencensus-python/blob/992b223f7e34c5dcb65922b7d5c827e7a1351e7d/contrib/opencensus-ext-google-cloud-clientlibs/opencensus/ext/google_cloud_clientlibs/trace.py#L103-L119 | train | 220,988 |
census-instrumentation/opencensus-python | contrib/opencensus-ext-jaeger/opencensus/ext/jaeger/trace_exporter/__init__.py | _convert_reftype_to_jaeger_reftype | def _convert_reftype_to_jaeger_reftype(ref):
"""Convert opencensus reference types to jaeger reference types."""
if ref == link_module.Type.CHILD_LINKED_SPAN:
return jaeger.SpanRefType.CHILD_OF
if ref == link_module.Type.PARENT_LINKED_SPAN:
return jaeger.SpanRefType.FOLLOWS_FROM
return None | python | def _convert_reftype_to_jaeger_reftype(ref):
"""Convert opencensus reference types to jaeger reference types."""
if ref == link_module.Type.CHILD_LINKED_SPAN:
return jaeger.SpanRefType.CHILD_OF
if ref == link_module.Type.PARENT_LINKED_SPAN:
return jaeger.SpanRefType.FOLLOWS_FROM
return None | [
"def",
"_convert_reftype_to_jaeger_reftype",
"(",
"ref",
")",
":",
"if",
"ref",
"==",
"link_module",
".",
"Type",
".",
"CHILD_LINKED_SPAN",
":",
"return",
"jaeger",
".",
"SpanRefType",
".",
"CHILD_OF",
"if",
"ref",
"==",
"link_module",
".",
"Type",
".",
"PAREN... | Convert opencensus reference types to jaeger reference types. | [
"Convert",
"opencensus",
"reference",
"types",
"to",
"jaeger",
"reference",
"types",
"."
] | 992b223f7e34c5dcb65922b7d5c827e7a1351e7d | https://github.com/census-instrumentation/opencensus-python/blob/992b223f7e34c5dcb65922b7d5c827e7a1351e7d/contrib/opencensus-ext-jaeger/opencensus/ext/jaeger/trace_exporter/__init__.py#L238-L244 | train | 220,989 |
census-instrumentation/opencensus-python | contrib/opencensus-ext-jaeger/opencensus/ext/jaeger/trace_exporter/__init__.py | _convert_hex_str_to_int | def _convert_hex_str_to_int(val):
"""Convert hexadecimal formatted ids to signed int64"""
if val is None:
return None
hex_num = int(val, 16)
# ensure it fits into 64-bit
if hex_num > 0x7FFFFFFFFFFFFFFF:
hex_num -= 0x10000000000000000
assert -9223372036854775808 <= hex_num <= 9223372036854775807
return hex_num | python | def _convert_hex_str_to_int(val):
"""Convert hexadecimal formatted ids to signed int64"""
if val is None:
return None
hex_num = int(val, 16)
# ensure it fits into 64-bit
if hex_num > 0x7FFFFFFFFFFFFFFF:
hex_num -= 0x10000000000000000
assert -9223372036854775808 <= hex_num <= 9223372036854775807
return hex_num | [
"def",
"_convert_hex_str_to_int",
"(",
"val",
")",
":",
"if",
"val",
"is",
"None",
":",
"return",
"None",
"hex_num",
"=",
"int",
"(",
"val",
",",
"16",
")",
"# ensure it fits into 64-bit",
"if",
"hex_num",
">",
"0x7FFFFFFFFFFFFFFF",
":",
"hex_num",
"-=",
"0... | Convert hexadecimal formatted ids to signed int64 | [
"Convert",
"hexadecimal",
"formatted",
"ids",
"to",
"signed",
"int64"
] | 992b223f7e34c5dcb65922b7d5c827e7a1351e7d | https://github.com/census-instrumentation/opencensus-python/blob/992b223f7e34c5dcb65922b7d5c827e7a1351e7d/contrib/opencensus-ext-jaeger/opencensus/ext/jaeger/trace_exporter/__init__.py#L247-L258 | train | 220,990 |
census-instrumentation/opencensus-python | contrib/opencensus-ext-jaeger/opencensus/ext/jaeger/trace_exporter/__init__.py | _convert_attribute_to_tag | def _convert_attribute_to_tag(key, attr):
"""Convert the attributes to jaeger tags."""
if isinstance(attr, bool):
return jaeger.Tag(
key=key,
vBool=attr,
vType=jaeger.TagType.BOOL)
if isinstance(attr, str):
return jaeger.Tag(
key=key,
vStr=attr,
vType=jaeger.TagType.STRING)
if isinstance(attr, int):
return jaeger.Tag(
key=key,
vLong=attr,
vType=jaeger.TagType.LONG)
if isinstance(attr, float):
return jaeger.Tag(
key=key,
vDouble=attr,
vType=jaeger.TagType.DOUBLE)
logging.warn('Could not serialize attribute \
{}:{} to tag'.format(key, attr))
return None | python | def _convert_attribute_to_tag(key, attr):
"""Convert the attributes to jaeger tags."""
if isinstance(attr, bool):
return jaeger.Tag(
key=key,
vBool=attr,
vType=jaeger.TagType.BOOL)
if isinstance(attr, str):
return jaeger.Tag(
key=key,
vStr=attr,
vType=jaeger.TagType.STRING)
if isinstance(attr, int):
return jaeger.Tag(
key=key,
vLong=attr,
vType=jaeger.TagType.LONG)
if isinstance(attr, float):
return jaeger.Tag(
key=key,
vDouble=attr,
vType=jaeger.TagType.DOUBLE)
logging.warn('Could not serialize attribute \
{}:{} to tag'.format(key, attr))
return None | [
"def",
"_convert_attribute_to_tag",
"(",
"key",
",",
"attr",
")",
":",
"if",
"isinstance",
"(",
"attr",
",",
"bool",
")",
":",
"return",
"jaeger",
".",
"Tag",
"(",
"key",
"=",
"key",
",",
"vBool",
"=",
"attr",
",",
"vType",
"=",
"jaeger",
".",
"TagTy... | Convert the attributes to jaeger tags. | [
"Convert",
"the",
"attributes",
"to",
"jaeger",
"tags",
"."
] | 992b223f7e34c5dcb65922b7d5c827e7a1351e7d | https://github.com/census-instrumentation/opencensus-python/blob/992b223f7e34c5dcb65922b7d5c827e7a1351e7d/contrib/opencensus-ext-jaeger/opencensus/ext/jaeger/trace_exporter/__init__.py#L298-L322 | train | 220,991 |
census-instrumentation/opencensus-python | contrib/opencensus-ext-jaeger/opencensus/ext/jaeger/trace_exporter/__init__.py | JaegerExporter.translate_to_jaeger | def translate_to_jaeger(self, span_datas):
"""Translate the spans to Jaeger format.
:type span_datas: list of :class:
`~opencensus.trace.span_data.SpanData`
:param span_datas:
SpanData tuples to emit
"""
top_span = span_datas[0]
trace_id = top_span.context.trace_id if top_span.context is not None \
else None
jaeger_spans = []
for span in span_datas:
start_timestamp_ms = timestamp_to_microseconds(span.start_time)
end_timestamp_ms = timestamp_to_microseconds(span.end_time)
duration_ms = end_timestamp_ms - start_timestamp_ms
tags = _extract_tags(span.attributes)
status = span.status
if status is not None:
tags.append(jaeger.Tag(
key='status.code',
vType=jaeger.TagType.LONG,
vLong=status.code))
tags.append(jaeger.Tag(
key='status.message',
vType=jaeger.TagType.STRING,
vStr=status.message))
refs = _extract_refs_from_span(span)
logs = _extract_logs_from_span(span)
context = span.context
flags = None
if context is not None:
flags = int(context.trace_options.trace_options_byte)
span_id = span.span_id
parent_span_id = span.parent_span_id
jaeger_span = jaeger.Span(
traceIdHigh=_convert_hex_str_to_int(trace_id[0:16]),
traceIdLow=_convert_hex_str_to_int(trace_id[16:32]),
spanId=_convert_hex_str_to_int(span_id),
operationName=span.name,
startTime=int(round(start_timestamp_ms)),
duration=int(round(duration_ms)),
tags=tags,
logs=logs,
references=refs,
flags=flags,
parentSpanId=_convert_hex_str_to_int(parent_span_id or '0'))
jaeger_spans.append(jaeger_span)
return jaeger_spans | python | def translate_to_jaeger(self, span_datas):
"""Translate the spans to Jaeger format.
:type span_datas: list of :class:
`~opencensus.trace.span_data.SpanData`
:param span_datas:
SpanData tuples to emit
"""
top_span = span_datas[0]
trace_id = top_span.context.trace_id if top_span.context is not None \
else None
jaeger_spans = []
for span in span_datas:
start_timestamp_ms = timestamp_to_microseconds(span.start_time)
end_timestamp_ms = timestamp_to_microseconds(span.end_time)
duration_ms = end_timestamp_ms - start_timestamp_ms
tags = _extract_tags(span.attributes)
status = span.status
if status is not None:
tags.append(jaeger.Tag(
key='status.code',
vType=jaeger.TagType.LONG,
vLong=status.code))
tags.append(jaeger.Tag(
key='status.message',
vType=jaeger.TagType.STRING,
vStr=status.message))
refs = _extract_refs_from_span(span)
logs = _extract_logs_from_span(span)
context = span.context
flags = None
if context is not None:
flags = int(context.trace_options.trace_options_byte)
span_id = span.span_id
parent_span_id = span.parent_span_id
jaeger_span = jaeger.Span(
traceIdHigh=_convert_hex_str_to_int(trace_id[0:16]),
traceIdLow=_convert_hex_str_to_int(trace_id[16:32]),
spanId=_convert_hex_str_to_int(span_id),
operationName=span.name,
startTime=int(round(start_timestamp_ms)),
duration=int(round(duration_ms)),
tags=tags,
logs=logs,
references=refs,
flags=flags,
parentSpanId=_convert_hex_str_to_int(parent_span_id or '0'))
jaeger_spans.append(jaeger_span)
return jaeger_spans | [
"def",
"translate_to_jaeger",
"(",
"self",
",",
"span_datas",
")",
":",
"top_span",
"=",
"span_datas",
"[",
"0",
"]",
"trace_id",
"=",
"top_span",
".",
"context",
".",
"trace_id",
"if",
"top_span",
".",
"context",
"is",
"not",
"None",
"else",
"None",
"jaeg... | Translate the spans to Jaeger format.
:type span_datas: list of :class:
`~opencensus.trace.span_data.SpanData`
:param span_datas:
SpanData tuples to emit | [
"Translate",
"the",
"spans",
"to",
"Jaeger",
"format",
"."
] | 992b223f7e34c5dcb65922b7d5c827e7a1351e7d | https://github.com/census-instrumentation/opencensus-python/blob/992b223f7e34c5dcb65922b7d5c827e7a1351e7d/contrib/opencensus-ext-jaeger/opencensus/ext/jaeger/trace_exporter/__init__.py#L159-L220 | train | 220,992 |
census-instrumentation/opencensus-python | contrib/opencensus-ext-jaeger/opencensus/ext/jaeger/trace_exporter/__init__.py | Collector.emit | def emit(self, batch):
"""Submits batches to Thrift HTTP Server through Binary Protocol.
:type batch:
:class:`~opencensus.ext.jaeger.trace_exporter.gen.jaeger.Batch`
:param batch: Object to emit Jaeger spans.
"""
try:
self.client.submitBatches([batch])
# it will call http_transport.flush() and
# status code and message will be updated
code = self.http_transport.code
msg = self.http_transport.message
if code >= 300 or code < 200:
logging.error("Traces cannot be uploaded;\
HTTP status code: {}, message {}".format(code, msg))
except Exception as e: # pragma: NO COVER
logging.error(getattr(e, 'message', e))
finally:
if self.http_transport.isOpen():
self.http_transport.close() | python | def emit(self, batch):
"""Submits batches to Thrift HTTP Server through Binary Protocol.
:type batch:
:class:`~opencensus.ext.jaeger.trace_exporter.gen.jaeger.Batch`
:param batch: Object to emit Jaeger spans.
"""
try:
self.client.submitBatches([batch])
# it will call http_transport.flush() and
# status code and message will be updated
code = self.http_transport.code
msg = self.http_transport.message
if code >= 300 or code < 200:
logging.error("Traces cannot be uploaded;\
HTTP status code: {}, message {}".format(code, msg))
except Exception as e: # pragma: NO COVER
logging.error(getattr(e, 'message', e))
finally:
if self.http_transport.isOpen():
self.http_transport.close() | [
"def",
"emit",
"(",
"self",
",",
"batch",
")",
":",
"try",
":",
"self",
".",
"client",
".",
"submitBatches",
"(",
"[",
"batch",
"]",
")",
"# it will call http_transport.flush() and",
"# status code and message will be updated",
"code",
"=",
"self",
".",
"http_tran... | Submits batches to Thrift HTTP Server through Binary Protocol.
:type batch:
:class:`~opencensus.ext.jaeger.trace_exporter.gen.jaeger.Batch`
:param batch: Object to emit Jaeger spans. | [
"Submits",
"batches",
"to",
"Thrift",
"HTTP",
"Server",
"through",
"Binary",
"Protocol",
"."
] | 992b223f7e34c5dcb65922b7d5c827e7a1351e7d | https://github.com/census-instrumentation/opencensus-python/blob/992b223f7e34c5dcb65922b7d5c827e7a1351e7d/contrib/opencensus-ext-jaeger/opencensus/ext/jaeger/trace_exporter/__init__.py#L369-L390 | train | 220,993 |
census-instrumentation/opencensus-python | opencensus/stats/view_data.py | ViewData.get_tag_values | def get_tag_values(self, tags, columns):
"""function to get the tag values from tags and columns"""
tag_values = []
i = 0
while i < len(columns):
tag_key = columns[i]
if tag_key in tags:
tag_values.append(tags.get(tag_key))
else:
tag_values.append(None)
i += 1
return tag_values | python | def get_tag_values(self, tags, columns):
"""function to get the tag values from tags and columns"""
tag_values = []
i = 0
while i < len(columns):
tag_key = columns[i]
if tag_key in tags:
tag_values.append(tags.get(tag_key))
else:
tag_values.append(None)
i += 1
return tag_values | [
"def",
"get_tag_values",
"(",
"self",
",",
"tags",
",",
"columns",
")",
":",
"tag_values",
"=",
"[",
"]",
"i",
"=",
"0",
"while",
"i",
"<",
"len",
"(",
"columns",
")",
":",
"tag_key",
"=",
"columns",
"[",
"i",
"]",
"if",
"tag_key",
"in",
"tags",
... | function to get the tag values from tags and columns | [
"function",
"to",
"get",
"the",
"tag",
"values",
"from",
"tags",
"and",
"columns"
] | 992b223f7e34c5dcb65922b7d5c827e7a1351e7d | https://github.com/census-instrumentation/opencensus-python/blob/992b223f7e34c5dcb65922b7d5c827e7a1351e7d/opencensus/stats/view_data.py#L72-L83 | train | 220,994 |
census-instrumentation/opencensus-python | opencensus/stats/view_data.py | ViewData.record | def record(self, context, value, timestamp, attachments=None):
"""records the view data against context"""
if context is None:
tags = dict()
else:
tags = context.map
tag_values = self.get_tag_values(tags=tags,
columns=self.view.columns)
tuple_vals = tuple(tag_values)
if tuple_vals not in self.tag_value_aggregation_data_map:
self.tag_value_aggregation_data_map[tuple_vals] = copy.deepcopy(
self.view.aggregation.aggregation_data)
self.tag_value_aggregation_data_map.get(tuple_vals).\
add_sample(value, timestamp, attachments) | python | def record(self, context, value, timestamp, attachments=None):
"""records the view data against context"""
if context is None:
tags = dict()
else:
tags = context.map
tag_values = self.get_tag_values(tags=tags,
columns=self.view.columns)
tuple_vals = tuple(tag_values)
if tuple_vals not in self.tag_value_aggregation_data_map:
self.tag_value_aggregation_data_map[tuple_vals] = copy.deepcopy(
self.view.aggregation.aggregation_data)
self.tag_value_aggregation_data_map.get(tuple_vals).\
add_sample(value, timestamp, attachments) | [
"def",
"record",
"(",
"self",
",",
"context",
",",
"value",
",",
"timestamp",
",",
"attachments",
"=",
"None",
")",
":",
"if",
"context",
"is",
"None",
":",
"tags",
"=",
"dict",
"(",
")",
"else",
":",
"tags",
"=",
"context",
".",
"map",
"tag_values",... | records the view data against context | [
"records",
"the",
"view",
"data",
"against",
"context"
] | 992b223f7e34c5dcb65922b7d5c827e7a1351e7d | https://github.com/census-instrumentation/opencensus-python/blob/992b223f7e34c5dcb65922b7d5c827e7a1351e7d/opencensus/stats/view_data.py#L85-L98 | train | 220,995 |
census-instrumentation/opencensus-python | contrib/opencensus-ext-httplib/opencensus/ext/httplib/trace.py | trace_integration | def trace_integration(tracer=None):
"""Wrap the httplib to trace."""
log.info('Integrated module: {}'.format(MODULE_NAME))
# Wrap the httplib request function
request_func = getattr(
httplib.HTTPConnection, HTTPLIB_REQUEST_FUNC)
wrapped_request = wrap_httplib_request(request_func)
setattr(httplib.HTTPConnection, request_func.__name__, wrapped_request)
# Wrap the httplib response function
response_func = getattr(
httplib.HTTPConnection, HTTPLIB_RESPONSE_FUNC)
wrapped_response = wrap_httplib_response(response_func)
setattr(httplib.HTTPConnection, response_func.__name__, wrapped_response) | python | def trace_integration(tracer=None):
"""Wrap the httplib to trace."""
log.info('Integrated module: {}'.format(MODULE_NAME))
# Wrap the httplib request function
request_func = getattr(
httplib.HTTPConnection, HTTPLIB_REQUEST_FUNC)
wrapped_request = wrap_httplib_request(request_func)
setattr(httplib.HTTPConnection, request_func.__name__, wrapped_request)
# Wrap the httplib response function
response_func = getattr(
httplib.HTTPConnection, HTTPLIB_RESPONSE_FUNC)
wrapped_response = wrap_httplib_response(response_func)
setattr(httplib.HTTPConnection, response_func.__name__, wrapped_response) | [
"def",
"trace_integration",
"(",
"tracer",
"=",
"None",
")",
":",
"log",
".",
"info",
"(",
"'Integrated module: {}'",
".",
"format",
"(",
"MODULE_NAME",
")",
")",
"# Wrap the httplib request function",
"request_func",
"=",
"getattr",
"(",
"httplib",
".",
"HTTPConn... | Wrap the httplib to trace. | [
"Wrap",
"the",
"httplib",
"to",
"trace",
"."
] | 992b223f7e34c5dcb65922b7d5c827e7a1351e7d | https://github.com/census-instrumentation/opencensus-python/blob/992b223f7e34c5dcb65922b7d5c827e7a1351e7d/contrib/opencensus-ext-httplib/opencensus/ext/httplib/trace.py#L41-L55 | train | 220,996 |
census-instrumentation/opencensus-python | contrib/opencensus-ext-httplib/opencensus/ext/httplib/trace.py | wrap_httplib_request | def wrap_httplib_request(request_func):
"""Wrap the httplib request function to trace. Create a new span and update
and close the span in the response later.
"""
def call(self, method, url, body, headers, *args, **kwargs):
_tracer = execution_context.get_opencensus_tracer()
blacklist_hostnames = execution_context.get_opencensus_attr(
'blacklist_hostnames')
dest_url = '{}:{}'.format(self._dns_host, self.port)
if utils.disable_tracing_hostname(dest_url, blacklist_hostnames):
return request_func(self, method, url, body,
headers, *args, **kwargs)
_span = _tracer.start_span()
_span.span_kind = span_module.SpanKind.CLIENT
_span.name = '[httplib]{}'.format(request_func.__name__)
# Add the request url to attributes
_tracer.add_attribute_to_current_span(HTTP_URL, url)
# Add the request method to attributes
_tracer.add_attribute_to_current_span(HTTP_METHOD, method)
# Store the current span id to thread local.
execution_context.set_opencensus_attr(
'httplib/current_span_id', _span.span_id)
try:
headers = headers.copy()
headers.update(_tracer.propagator.to_headers(
_span.context_tracer.span_context))
except Exception: # pragma: NO COVER
pass
return request_func(self, method, url, body, headers, *args, **kwargs)
return call | python | def wrap_httplib_request(request_func):
"""Wrap the httplib request function to trace. Create a new span and update
and close the span in the response later.
"""
def call(self, method, url, body, headers, *args, **kwargs):
_tracer = execution_context.get_opencensus_tracer()
blacklist_hostnames = execution_context.get_opencensus_attr(
'blacklist_hostnames')
dest_url = '{}:{}'.format(self._dns_host, self.port)
if utils.disable_tracing_hostname(dest_url, blacklist_hostnames):
return request_func(self, method, url, body,
headers, *args, **kwargs)
_span = _tracer.start_span()
_span.span_kind = span_module.SpanKind.CLIENT
_span.name = '[httplib]{}'.format(request_func.__name__)
# Add the request url to attributes
_tracer.add_attribute_to_current_span(HTTP_URL, url)
# Add the request method to attributes
_tracer.add_attribute_to_current_span(HTTP_METHOD, method)
# Store the current span id to thread local.
execution_context.set_opencensus_attr(
'httplib/current_span_id', _span.span_id)
try:
headers = headers.copy()
headers.update(_tracer.propagator.to_headers(
_span.context_tracer.span_context))
except Exception: # pragma: NO COVER
pass
return request_func(self, method, url, body, headers, *args, **kwargs)
return call | [
"def",
"wrap_httplib_request",
"(",
"request_func",
")",
":",
"def",
"call",
"(",
"self",
",",
"method",
",",
"url",
",",
"body",
",",
"headers",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"_tracer",
"=",
"execution_context",
".",
"get_opencens... | Wrap the httplib request function to trace. Create a new span and update
and close the span in the response later. | [
"Wrap",
"the",
"httplib",
"request",
"function",
"to",
"trace",
".",
"Create",
"a",
"new",
"span",
"and",
"update",
"and",
"close",
"the",
"span",
"in",
"the",
"response",
"later",
"."
] | 992b223f7e34c5dcb65922b7d5c827e7a1351e7d | https://github.com/census-instrumentation/opencensus-python/blob/992b223f7e34c5dcb65922b7d5c827e7a1351e7d/contrib/opencensus-ext-httplib/opencensus/ext/httplib/trace.py#L58-L92 | train | 220,997 |
census-instrumentation/opencensus-python | contrib/opencensus-ext-httplib/opencensus/ext/httplib/trace.py | wrap_httplib_response | def wrap_httplib_response(response_func):
"""Wrap the httplib response function to trace.
If there is a corresponding httplib request span, update and close it.
If not, return the response.
"""
def call(self, *args, **kwargs):
_tracer = execution_context.get_opencensus_tracer()
current_span_id = execution_context.get_opencensus_attr(
'httplib/current_span_id')
span = _tracer.current_span()
# No corresponding request span is found, request not traced.
if not span or span.span_id != current_span_id:
return response_func(self, *args, **kwargs)
result = response_func(self, *args, **kwargs)
# Add the status code to attributes
_tracer.add_attribute_to_current_span(
HTTP_STATUS_CODE, str(result.status))
_tracer.end_span()
return result
return call | python | def wrap_httplib_response(response_func):
"""Wrap the httplib response function to trace.
If there is a corresponding httplib request span, update and close it.
If not, return the response.
"""
def call(self, *args, **kwargs):
_tracer = execution_context.get_opencensus_tracer()
current_span_id = execution_context.get_opencensus_attr(
'httplib/current_span_id')
span = _tracer.current_span()
# No corresponding request span is found, request not traced.
if not span or span.span_id != current_span_id:
return response_func(self, *args, **kwargs)
result = response_func(self, *args, **kwargs)
# Add the status code to attributes
_tracer.add_attribute_to_current_span(
HTTP_STATUS_CODE, str(result.status))
_tracer.end_span()
return result
return call | [
"def",
"wrap_httplib_response",
"(",
"response_func",
")",
":",
"def",
"call",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"_tracer",
"=",
"execution_context",
".",
"get_opencensus_tracer",
"(",
")",
"current_span_id",
"=",
"execution_con... | Wrap the httplib response function to trace.
If there is a corresponding httplib request span, update and close it.
If not, return the response. | [
"Wrap",
"the",
"httplib",
"response",
"function",
"to",
"trace",
"."
] | 992b223f7e34c5dcb65922b7d5c827e7a1351e7d | https://github.com/census-instrumentation/opencensus-python/blob/992b223f7e34c5dcb65922b7d5c827e7a1351e7d/contrib/opencensus-ext-httplib/opencensus/ext/httplib/trace.py#L95-L122 | train | 220,998 |
census-instrumentation/opencensus-python | opencensus/metrics/export/gauge.py | get_timeseries_list | def get_timeseries_list(points, timestamp):
"""Convert a list of `GaugePoint`s into a list of `TimeSeries`.
Get a :class:`opencensus.metrics.export.time_series.TimeSeries` for each
measurement in `points`. Each series contains a single
:class:`opencensus.metrics.export.point.Point` that represents the last
recorded value of the measurement.
:type points: list(:class:`GaugePoint`)
:param points: The list of measurements to convert.
:type timestamp: :class:`datetime.datetime`
:param timestamp: Recording time to report, usually the current time.
:rtype: list(:class:`opencensus.metrics.export.time_series.TimeSeries`)
:return: A list of one `TimeSeries` for each point in `points`.
"""
ts_list = []
for lv, gp in points.items():
point = point_module.Point(gp.to_point_value(), timestamp)
ts_list.append(time_series.TimeSeries(lv, [point], timestamp))
return ts_list | python | def get_timeseries_list(points, timestamp):
"""Convert a list of `GaugePoint`s into a list of `TimeSeries`.
Get a :class:`opencensus.metrics.export.time_series.TimeSeries` for each
measurement in `points`. Each series contains a single
:class:`opencensus.metrics.export.point.Point` that represents the last
recorded value of the measurement.
:type points: list(:class:`GaugePoint`)
:param points: The list of measurements to convert.
:type timestamp: :class:`datetime.datetime`
:param timestamp: Recording time to report, usually the current time.
:rtype: list(:class:`opencensus.metrics.export.time_series.TimeSeries`)
:return: A list of one `TimeSeries` for each point in `points`.
"""
ts_list = []
for lv, gp in points.items():
point = point_module.Point(gp.to_point_value(), timestamp)
ts_list.append(time_series.TimeSeries(lv, [point], timestamp))
return ts_list | [
"def",
"get_timeseries_list",
"(",
"points",
",",
"timestamp",
")",
":",
"ts_list",
"=",
"[",
"]",
"for",
"lv",
",",
"gp",
"in",
"points",
".",
"items",
"(",
")",
":",
"point",
"=",
"point_module",
".",
"Point",
"(",
"gp",
".",
"to_point_value",
"(",
... | Convert a list of `GaugePoint`s into a list of `TimeSeries`.
Get a :class:`opencensus.metrics.export.time_series.TimeSeries` for each
measurement in `points`. Each series contains a single
:class:`opencensus.metrics.export.point.Point` that represents the last
recorded value of the measurement.
:type points: list(:class:`GaugePoint`)
:param points: The list of measurements to convert.
:type timestamp: :class:`datetime.datetime`
:param timestamp: Recording time to report, usually the current time.
:rtype: list(:class:`opencensus.metrics.export.time_series.TimeSeries`)
:return: A list of one `TimeSeries` for each point in `points`. | [
"Convert",
"a",
"list",
"of",
"GaugePoint",
"s",
"into",
"a",
"list",
"of",
"TimeSeries",
"."
] | 992b223f7e34c5dcb65922b7d5c827e7a1351e7d | https://github.com/census-instrumentation/opencensus-python/blob/992b223f7e34c5dcb65922b7d5c827e7a1351e7d/opencensus/metrics/export/gauge.py#L29-L50 | train | 220,999 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.