repo stringlengths 7 55 | path stringlengths 4 223 | func_name stringlengths 1 134 | original_string stringlengths 75 104k | language stringclasses 1 value | code stringlengths 75 104k | code_tokens listlengths 19 28.4k | docstring stringlengths 1 46.9k | docstring_tokens listlengths 1 1.97k | sha stringlengths 40 40 | url stringlengths 87 315 | partition stringclasses 1 value |
|---|---|---|---|---|---|---|---|---|---|---|---|
singularityhub/singularity-python | singularity/package/clone.py | package_node | def package_node(root=None, name=None):
'''package node aims to package a (present working node) for a user into
a container. This assumes that the node is a single partition.
:param root: the root of the node to package, default is /
:param name: the name for the image. If not specified, will use machine's
psutil.disk_partitions()
'''
if name is None:
name = platform.node()
if root is None:
root = "/"
tmpdir = tempfile.mkdtemp()
image = "%s/%s.tgz" %(tmpdir,name)
print("Preparing to package root %s into %s" %(root,name))
cmd = ["tar","--one-file-system","-czvSf", image, root,"--exclude",image]
output = run_command(cmd)
return image | python | def package_node(root=None, name=None):
'''package node aims to package a (present working node) for a user into
a container. This assumes that the node is a single partition.
:param root: the root of the node to package, default is /
:param name: the name for the image. If not specified, will use machine's
psutil.disk_partitions()
'''
if name is None:
name = platform.node()
if root is None:
root = "/"
tmpdir = tempfile.mkdtemp()
image = "%s/%s.tgz" %(tmpdir,name)
print("Preparing to package root %s into %s" %(root,name))
cmd = ["tar","--one-file-system","-czvSf", image, root,"--exclude",image]
output = run_command(cmd)
return image | [
"def",
"package_node",
"(",
"root",
"=",
"None",
",",
"name",
"=",
"None",
")",
":",
"if",
"name",
"is",
"None",
":",
"name",
"=",
"platform",
".",
"node",
"(",
")",
"if",
"root",
"is",
"None",
":",
"root",
"=",
"\"/\"",
"tmpdir",
"=",
"tempfile",
... | package node aims to package a (present working node) for a user into
a container. This assumes that the node is a single partition.
:param root: the root of the node to package, default is /
:param name: the name for the image. If not specified, will use machine's
psutil.disk_partitions() | [
"package",
"node",
"aims",
"to",
"package",
"a",
"(",
"present",
"working",
"node",
")",
"for",
"a",
"user",
"into",
"a",
"container",
".",
"This",
"assumes",
"that",
"the",
"node",
"is",
"a",
"single",
"partition",
".",
":",
"param",
"root",
":",
"the... | 498c3433724b332f7493fec632d8daf479f47b82 | https://github.com/singularityhub/singularity-python/blob/498c3433724b332f7493fec632d8daf479f47b82/singularity/package/clone.py#L30-L53 | train |
singularityhub/singularity-python | singularity/package/clone.py | unpack_node | def unpack_node(image_path,name=None,output_folder=None,size=None):
'''unpackage node is intended to unpackage a node that was packaged with
package_node. The image should be a .tgz file. The general steps are to:
1. Package the node using the package_node function
2. Transfer the package somewhere that Singularity is installed'''
if not image_path.endswith(".tgz"):
bot.error("The image_path should end with .tgz. Did you create with package_node?")
sys.exit(1)
if output_folder is None:
output_folder = os.path.dirname(os.path.abspath(image_path))
image_name = os.path.basename(image_path)
if name is None:
name = image_name.replace('.tgz','.img')
if not name.endswith('.img'):
name = "%s.img" %(name)
bot.debug("Preparing to unpack %s to %s." %(image_name,name))
unpacked_image = "%s/%s" %(output_folder,name)
if not os.path.exists(unpacked_image):
os.mkdir(unpacked_image)
cmd = ["gunzip","-dc",image_path,"|","sudo","singularity","import", unpacked_image]
output = run_command(cmd)
# TODO: singularity mount the container, cleanup files (/etc/fstab,...)
# and add your custom singularity files.
return unpacked_image | python | def unpack_node(image_path,name=None,output_folder=None,size=None):
'''unpackage node is intended to unpackage a node that was packaged with
package_node. The image should be a .tgz file. The general steps are to:
1. Package the node using the package_node function
2. Transfer the package somewhere that Singularity is installed'''
if not image_path.endswith(".tgz"):
bot.error("The image_path should end with .tgz. Did you create with package_node?")
sys.exit(1)
if output_folder is None:
output_folder = os.path.dirname(os.path.abspath(image_path))
image_name = os.path.basename(image_path)
if name is None:
name = image_name.replace('.tgz','.img')
if not name.endswith('.img'):
name = "%s.img" %(name)
bot.debug("Preparing to unpack %s to %s." %(image_name,name))
unpacked_image = "%s/%s" %(output_folder,name)
if not os.path.exists(unpacked_image):
os.mkdir(unpacked_image)
cmd = ["gunzip","-dc",image_path,"|","sudo","singularity","import", unpacked_image]
output = run_command(cmd)
# TODO: singularity mount the container, cleanup files (/etc/fstab,...)
# and add your custom singularity files.
return unpacked_image | [
"def",
"unpack_node",
"(",
"image_path",
",",
"name",
"=",
"None",
",",
"output_folder",
"=",
"None",
",",
"size",
"=",
"None",
")",
":",
"if",
"not",
"image_path",
".",
"endswith",
"(",
"\".tgz\"",
")",
":",
"bot",
".",
"error",
"(",
"\"The image_path s... | unpackage node is intended to unpackage a node that was packaged with
package_node. The image should be a .tgz file. The general steps are to:
1. Package the node using the package_node function
2. Transfer the package somewhere that Singularity is installed | [
"unpackage",
"node",
"is",
"intended",
"to",
"unpackage",
"a",
"node",
"that",
"was",
"packaged",
"with",
"package_node",
".",
"The",
"image",
"should",
"be",
"a",
".",
"tgz",
"file",
".",
"The",
"general",
"steps",
"are",
"to",
":",
"1",
".",
"Package",... | 498c3433724b332f7493fec632d8daf479f47b82 | https://github.com/singularityhub/singularity-python/blob/498c3433724b332f7493fec632d8daf479f47b82/singularity/package/clone.py#L56-L87 | train |
singularityhub/singularity-python | singularity/build/utils.py | get_build_template | def get_build_template(template_name,params=None,to_file=None):
'''get_build template returns a string or file for a particular build template, which is
intended to build a version of a Singularity image on a cloud resource.
:param template_name: the name of the template to retrieve in build/scripts
:param params: (if needed) a dictionary of parameters to substitute in the file
:param to_file: if defined, will write to file. Default returns string.
'''
base = get_installdir()
template_folder = "%s/build/scripts" %(base)
template_file = "%s/%s" %(template_folder,template_name)
if os.path.exists(template_file):
bot.debug("Found template %s" %template_file)
# Implement when needed - substitute params here
# Will need to read in file instead of copying below
# if params != None:
if to_file is not None:
shutil.copyfile(template_file,to_file)
bot.debug("Template file saved to %s" %to_file)
return to_file
# If the user wants a string
content = ''.join(read_file(template_file))
return content
else:
bot.warning("Template %s not found." %template_file) | python | def get_build_template(template_name,params=None,to_file=None):
'''get_build template returns a string or file for a particular build template, which is
intended to build a version of a Singularity image on a cloud resource.
:param template_name: the name of the template to retrieve in build/scripts
:param params: (if needed) a dictionary of parameters to substitute in the file
:param to_file: if defined, will write to file. Default returns string.
'''
base = get_installdir()
template_folder = "%s/build/scripts" %(base)
template_file = "%s/%s" %(template_folder,template_name)
if os.path.exists(template_file):
bot.debug("Found template %s" %template_file)
# Implement when needed - substitute params here
# Will need to read in file instead of copying below
# if params != None:
if to_file is not None:
shutil.copyfile(template_file,to_file)
bot.debug("Template file saved to %s" %to_file)
return to_file
# If the user wants a string
content = ''.join(read_file(template_file))
return content
else:
bot.warning("Template %s not found." %template_file) | [
"def",
"get_build_template",
"(",
"template_name",
",",
"params",
"=",
"None",
",",
"to_file",
"=",
"None",
")",
":",
"base",
"=",
"get_installdir",
"(",
")",
"template_folder",
"=",
"\"%s/build/scripts\"",
"%",
"(",
"base",
")",
"template_file",
"=",
"\"%s/%s... | get_build template returns a string or file for a particular build template, which is
intended to build a version of a Singularity image on a cloud resource.
:param template_name: the name of the template to retrieve in build/scripts
:param params: (if needed) a dictionary of parameters to substitute in the file
:param to_file: if defined, will write to file. Default returns string. | [
"get_build",
"template",
"returns",
"a",
"string",
"or",
"file",
"for",
"a",
"particular",
"build",
"template",
"which",
"is",
"intended",
"to",
"build",
"a",
"version",
"of",
"a",
"Singularity",
"image",
"on",
"a",
"cloud",
"resource",
".",
":",
"param",
... | 498c3433724b332f7493fec632d8daf479f47b82 | https://github.com/singularityhub/singularity-python/blob/498c3433724b332f7493fec632d8daf479f47b82/singularity/build/utils.py#L72-L100 | train |
singularityhub/singularity-python | singularity/build/utils.py | sniff_extension | def sniff_extension(file_path,verbose=True):
'''sniff_extension will attempt to determine the file type based on the extension,
and return the proper mimetype
:param file_path: the full path to the file to sniff
:param verbose: print stuff out
'''
mime_types = { "xls": 'application/vnd.ms-excel',
"xlsx": 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet',
"xml": 'text/xml',
"ods": 'application/vnd.oasis.opendocument.spreadsheet',
"csv": 'text/plain',
"tmpl": 'text/plain',
"pdf": 'application/pdf',
"php": 'application/x-httpd-php',
"jpg": 'image/jpeg',
"png": 'image/png',
"gif": 'image/gif',
"bmp": 'image/bmp',
"txt": 'text/plain',
"doc": 'application/msword',
"js": 'text/js',
"swf": 'application/x-shockwave-flash',
"mp3": 'audio/mpeg',
"zip": 'application/zip',
"simg": 'application/zip',
"rar": 'application/rar',
"tar": 'application/tar',
"arj": 'application/arj',
"cab": 'application/cab',
"html": 'text/html',
"htm": 'text/html',
"default": 'application/octet-stream',
"folder": 'application/vnd.google-apps.folder',
"img" : "application/octet-stream" }
ext = os.path.basename(file_path).split('.')[-1]
mime_type = mime_types.get(ext,None)
if mime_type == None:
mime_type = mime_types['txt']
if verbose==True:
bot.info("%s --> %s" %(file_path, mime_type))
return mime_type | python | def sniff_extension(file_path,verbose=True):
'''sniff_extension will attempt to determine the file type based on the extension,
and return the proper mimetype
:param file_path: the full path to the file to sniff
:param verbose: print stuff out
'''
mime_types = { "xls": 'application/vnd.ms-excel',
"xlsx": 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet',
"xml": 'text/xml',
"ods": 'application/vnd.oasis.opendocument.spreadsheet',
"csv": 'text/plain',
"tmpl": 'text/plain',
"pdf": 'application/pdf',
"php": 'application/x-httpd-php',
"jpg": 'image/jpeg',
"png": 'image/png',
"gif": 'image/gif',
"bmp": 'image/bmp',
"txt": 'text/plain',
"doc": 'application/msword',
"js": 'text/js',
"swf": 'application/x-shockwave-flash',
"mp3": 'audio/mpeg',
"zip": 'application/zip',
"simg": 'application/zip',
"rar": 'application/rar',
"tar": 'application/tar',
"arj": 'application/arj',
"cab": 'application/cab',
"html": 'text/html',
"htm": 'text/html',
"default": 'application/octet-stream',
"folder": 'application/vnd.google-apps.folder',
"img" : "application/octet-stream" }
ext = os.path.basename(file_path).split('.')[-1]
mime_type = mime_types.get(ext,None)
if mime_type == None:
mime_type = mime_types['txt']
if verbose==True:
bot.info("%s --> %s" %(file_path, mime_type))
return mime_type | [
"def",
"sniff_extension",
"(",
"file_path",
",",
"verbose",
"=",
"True",
")",
":",
"mime_types",
"=",
"{",
"\"xls\"",
":",
"'application/vnd.ms-excel'",
",",
"\"xlsx\"",
":",
"'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'",
",",
"\"xml\"",
":",
"'t... | sniff_extension will attempt to determine the file type based on the extension,
and return the proper mimetype
:param file_path: the full path to the file to sniff
:param verbose: print stuff out | [
"sniff_extension",
"will",
"attempt",
"to",
"determine",
"the",
"file",
"type",
"based",
"on",
"the",
"extension",
"and",
"return",
"the",
"proper",
"mimetype",
":",
"param",
"file_path",
":",
"the",
"full",
"path",
"to",
"the",
"file",
"to",
"sniff",
":",
... | 498c3433724b332f7493fec632d8daf479f47b82 | https://github.com/singularityhub/singularity-python/blob/498c3433724b332f7493fec632d8daf479f47b82/singularity/build/utils.py#L140-L185 | train |
singularityhub/singularity-python | singularity/build/utils.py | get_script | def get_script(script_name):
'''get_script will return a build script_name, if it is included
in singularity/build/scripts, otherwise will alert the user and return None
:param script_name: the name of the script to look for
'''
install_dir = get_installdir()
script_path = "%s/build/scripts/%s" %(install_dir,script_name)
if os.path.exists(script_path):
return script_path
else:
bot.error("Script %s is not included in singularity-python!" %script_path)
return None | python | def get_script(script_name):
'''get_script will return a build script_name, if it is included
in singularity/build/scripts, otherwise will alert the user and return None
:param script_name: the name of the script to look for
'''
install_dir = get_installdir()
script_path = "%s/build/scripts/%s" %(install_dir,script_name)
if os.path.exists(script_path):
return script_path
else:
bot.error("Script %s is not included in singularity-python!" %script_path)
return None | [
"def",
"get_script",
"(",
"script_name",
")",
":",
"install_dir",
"=",
"get_installdir",
"(",
")",
"script_path",
"=",
"\"%s/build/scripts/%s\"",
"%",
"(",
"install_dir",
",",
"script_name",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"script_path",
")",... | get_script will return a build script_name, if it is included
in singularity/build/scripts, otherwise will alert the user and return None
:param script_name: the name of the script to look for | [
"get_script",
"will",
"return",
"a",
"build",
"script_name",
"if",
"it",
"is",
"included",
"in",
"singularity",
"/",
"build",
"/",
"scripts",
"otherwise",
"will",
"alert",
"the",
"user",
"and",
"return",
"None",
":",
"param",
"script_name",
":",
"the",
"name... | 498c3433724b332f7493fec632d8daf479f47b82 | https://github.com/singularityhub/singularity-python/blob/498c3433724b332f7493fec632d8daf479f47b82/singularity/build/utils.py#L188-L199 | train |
singularityhub/singularity-python | singularity/package/utils.py | zip_up | def zip_up(file_list,zip_name,output_folder=None):
'''zip_up will zip up some list of files into a package (.zip)
:param file_list: a list of files to include in the zip.
:param output_folder: the output folder to create the zip in. If not
:param zip_name: the name of the zipfile to return.
specified, a temporary folder will be given.
'''
tmpdir = tempfile.mkdtemp()
# Make a new archive
output_zip = "%s/%s" %(tmpdir,zip_name)
zf = zipfile.ZipFile(output_zip, "w", zipfile.ZIP_DEFLATED, allowZip64=True)
# Write files to zip, depending on type
for filename,content in file_list.items():
bot.debug("Adding %s to package..." %filename)
# If it's the files list, move files into the archive
if filename.lower() == "files":
if not isinstance(content,list):
content = [content]
for copyfile in content:
zf.write(copyfile,os.path.basename(copyfile))
os.remove(copyfile)
else:
output_file = "%s/%s" %(tmpdir, filename)
# If it's a list, write to new file, and save
if isinstance(content,list):
write_file(output_file,"\n".join(content))
# If it's a dict, save to json
elif isinstance(content,dict):
write_json(content,output_file)
# If bytes, need to decode
elif isinstance(content,bytes):
write_file(output_file,content.decode('utf-8'))
# String or other
else:
output_file = write_file(output_file,content)
if os.path.exists(output_file):
zf.write(output_file,filename)
os.remove(output_file)
# Close the zip file
zf.close()
if output_folder is not None:
shutil.copyfile(output_zip,"%s/%s"%(output_folder,zip_name))
shutil.rmtree(tmpdir)
output_zip = "%s/%s"%(output_folder,zip_name)
return output_zip | python | def zip_up(file_list,zip_name,output_folder=None):
'''zip_up will zip up some list of files into a package (.zip)
:param file_list: a list of files to include in the zip.
:param output_folder: the output folder to create the zip in. If not
:param zip_name: the name of the zipfile to return.
specified, a temporary folder will be given.
'''
tmpdir = tempfile.mkdtemp()
# Make a new archive
output_zip = "%s/%s" %(tmpdir,zip_name)
zf = zipfile.ZipFile(output_zip, "w", zipfile.ZIP_DEFLATED, allowZip64=True)
# Write files to zip, depending on type
for filename,content in file_list.items():
bot.debug("Adding %s to package..." %filename)
# If it's the files list, move files into the archive
if filename.lower() == "files":
if not isinstance(content,list):
content = [content]
for copyfile in content:
zf.write(copyfile,os.path.basename(copyfile))
os.remove(copyfile)
else:
output_file = "%s/%s" %(tmpdir, filename)
# If it's a list, write to new file, and save
if isinstance(content,list):
write_file(output_file,"\n".join(content))
# If it's a dict, save to json
elif isinstance(content,dict):
write_json(content,output_file)
# If bytes, need to decode
elif isinstance(content,bytes):
write_file(output_file,content.decode('utf-8'))
# String or other
else:
output_file = write_file(output_file,content)
if os.path.exists(output_file):
zf.write(output_file,filename)
os.remove(output_file)
# Close the zip file
zf.close()
if output_folder is not None:
shutil.copyfile(output_zip,"%s/%s"%(output_folder,zip_name))
shutil.rmtree(tmpdir)
output_zip = "%s/%s"%(output_folder,zip_name)
return output_zip | [
"def",
"zip_up",
"(",
"file_list",
",",
"zip_name",
",",
"output_folder",
"=",
"None",
")",
":",
"tmpdir",
"=",
"tempfile",
".",
"mkdtemp",
"(",
")",
"# Make a new archive ",
"output_zip",
"=",
"\"%s/%s\"",
"%",
"(",
"tmpdir",
",",
"zip_name",
")",
"zf",
... | zip_up will zip up some list of files into a package (.zip)
:param file_list: a list of files to include in the zip.
:param output_folder: the output folder to create the zip in. If not
:param zip_name: the name of the zipfile to return.
specified, a temporary folder will be given. | [
"zip_up",
"will",
"zip",
"up",
"some",
"list",
"of",
"files",
"into",
"a",
"package",
"(",
".",
"zip",
")",
":",
"param",
"file_list",
":",
"a",
"list",
"of",
"files",
"to",
"include",
"in",
"the",
"zip",
".",
":",
"param",
"output_folder",
":",
"the... | 498c3433724b332f7493fec632d8daf479f47b82 | https://github.com/singularityhub/singularity-python/blob/498c3433724b332f7493fec632d8daf479f47b82/singularity/package/utils.py#L52-L110 | train |
singularityhub/singularity-python | singularity/package/utils.py | get_container_contents | def get_container_contents(container, split_delim=None):
'''get_container_contents will return a list of folders and or files
for a container. The environmental variable SINGULARITY_HUB being set
means that container objects are referenced instead of packages
:param container: the container to get content for
:param gets: a list of file names to return, without parent folders
:param split_delim: if defined, will split text by split delimiter
'''
# We will look for everything in guts, then return it
guts = dict()
SINGULARITY_HUB = os.environ.get('SINGULARITY_HUB',"False")
# Visualization deployed local or elsewhere
if SINGULARITY_HUB == "False":
file_obj,tar = get_image_tar(container)
guts = extract_guts(image_path=container, tar=tar)
delete_image_tar(file_obj, tar)
# Visualization deployed by singularity hub
else:
# user has provided a package, but not a container
if container == None:
guts = load_package(image_package,get=gets)
# user has provided a container, but not a package
else:
for sfile in container.files:
for gut_key in gets:
if os.path.basename(sfile['name']) == gut_key:
if split_delim == None:
guts[gut_key] = requests.get(sfile['mediaLink']).text
else:
guts[gut_key] = requests.get(sfile['mediaLink']).text.split(split_delim)
return guts | python | def get_container_contents(container, split_delim=None):
'''get_container_contents will return a list of folders and or files
for a container. The environmental variable SINGULARITY_HUB being set
means that container objects are referenced instead of packages
:param container: the container to get content for
:param gets: a list of file names to return, without parent folders
:param split_delim: if defined, will split text by split delimiter
'''
# We will look for everything in guts, then return it
guts = dict()
SINGULARITY_HUB = os.environ.get('SINGULARITY_HUB',"False")
# Visualization deployed local or elsewhere
if SINGULARITY_HUB == "False":
file_obj,tar = get_image_tar(container)
guts = extract_guts(image_path=container, tar=tar)
delete_image_tar(file_obj, tar)
# Visualization deployed by singularity hub
else:
# user has provided a package, but not a container
if container == None:
guts = load_package(image_package,get=gets)
# user has provided a container, but not a package
else:
for sfile in container.files:
for gut_key in gets:
if os.path.basename(sfile['name']) == gut_key:
if split_delim == None:
guts[gut_key] = requests.get(sfile['mediaLink']).text
else:
guts[gut_key] = requests.get(sfile['mediaLink']).text.split(split_delim)
return guts | [
"def",
"get_container_contents",
"(",
"container",
",",
"split_delim",
"=",
"None",
")",
":",
"# We will look for everything in guts, then return it",
"guts",
"=",
"dict",
"(",
")",
"SINGULARITY_HUB",
"=",
"os",
".",
"environ",
".",
"get",
"(",
"'SINGULARITY_HUB'",
... | get_container_contents will return a list of folders and or files
for a container. The environmental variable SINGULARITY_HUB being set
means that container objects are referenced instead of packages
:param container: the container to get content for
:param gets: a list of file names to return, without parent folders
:param split_delim: if defined, will split text by split delimiter | [
"get_container_contents",
"will",
"return",
"a",
"list",
"of",
"folders",
"and",
"or",
"files",
"for",
"a",
"container",
".",
"The",
"environmental",
"variable",
"SINGULARITY_HUB",
"being",
"set",
"means",
"that",
"container",
"objects",
"are",
"referenced",
"inst... | 498c3433724b332f7493fec632d8daf479f47b82 | https://github.com/singularityhub/singularity-python/blob/498c3433724b332f7493fec632d8daf479f47b82/singularity/package/utils.py#L119-L156 | train |
singularityhub/singularity-python | singularity/analysis/reproduce/hash.py | get_image_hashes | def get_image_hashes(image_path, version=None, levels=None):
'''get_image_hashes returns the hash for an image across all levels. This is the quickest,
easiest way to define a container's reproducibility on each level.
'''
if levels is None:
levels = get_levels(version=version)
hashes = dict()
for level_name,level_filter in levels.items():
hashes[level_name] = get_image_hash(image_path,
level_filter=level_filter)
return hashes | python | def get_image_hashes(image_path, version=None, levels=None):
'''get_image_hashes returns the hash for an image across all levels. This is the quickest,
easiest way to define a container's reproducibility on each level.
'''
if levels is None:
levels = get_levels(version=version)
hashes = dict()
for level_name,level_filter in levels.items():
hashes[level_name] = get_image_hash(image_path,
level_filter=level_filter)
return hashes | [
"def",
"get_image_hashes",
"(",
"image_path",
",",
"version",
"=",
"None",
",",
"levels",
"=",
"None",
")",
":",
"if",
"levels",
"is",
"None",
":",
"levels",
"=",
"get_levels",
"(",
"version",
"=",
"version",
")",
"hashes",
"=",
"dict",
"(",
")",
"for"... | get_image_hashes returns the hash for an image across all levels. This is the quickest,
easiest way to define a container's reproducibility on each level. | [
"get_image_hashes",
"returns",
"the",
"hash",
"for",
"an",
"image",
"across",
"all",
"levels",
".",
"This",
"is",
"the",
"quickest",
"easiest",
"way",
"to",
"define",
"a",
"container",
"s",
"reproducibility",
"on",
"each",
"level",
"."
] | 498c3433724b332f7493fec632d8daf479f47b82 | https://github.com/singularityhub/singularity-python/blob/498c3433724b332f7493fec632d8daf479f47b82/singularity/analysis/reproduce/hash.py#L42-L52 | train |
singularityhub/singularity-python | singularity/analysis/reproduce/hash.py | get_image_hash | def get_image_hash(image_path,
level=None,level_filter=None,
include_files=None,
skip_files=None,
version=None):
'''get_image_hash will generate a sha1 hash of an image, depending on a level
of reproducibility specified by the user. (see function get_levels for descriptions)
the user can also provide a level_filter manually with level_filter (for custom levels)
:param level: the level of reproducibility to use, which maps to a set regular
expression to match particular files/folders in the image. Choices are in notes.
:param skip_files: an optional list of files to skip
:param include_files: an optional list of files to keep (only if level not defined)
:param version: the version to use. If not defined, default is 2.3
::notes
LEVEL DEFINITIONS
The level definitions come down to including folders/files in the comparison. For files
that Singularity produces on the fly that might be different (timestamps) but equal content
(eg for a replication) we hash the content ("assess_content") instead of the file.
'''
# First get a level dictionary, with description and regexp
if level_filter is not None:
file_filter = level_filter
elif level is None:
file_filter = get_level("RECIPE",
version=version,
include_files=include_files,
skip_files=skip_files)
else:
file_filter = get_level(level,version=version,
skip_files=skip_files,
include_files=include_files)
file_obj, tar = get_image_tar(image_path)
hasher = hashlib.md5()
for member in tar:
member_name = member.name.replace('.','',1)
# For files, we either assess content, or include the file
if member.isdir() or member.issym():
continue
elif assess_content(member,file_filter):
content = extract_content(image_path,member.name)
hasher.update(content)
elif include_file(member,file_filter):
buf = member.tobuf()
hasher.update(buf)
digest = hasher.hexdigest()
# Close up / remove files
try:
file_obj.close()
except:
tar.close()
if os.path.exists(file_obj):
os.remove(file_obj)
return digest | python | def get_image_hash(image_path,
level=None,level_filter=None,
include_files=None,
skip_files=None,
version=None):
'''get_image_hash will generate a sha1 hash of an image, depending on a level
of reproducibility specified by the user. (see function get_levels for descriptions)
the user can also provide a level_filter manually with level_filter (for custom levels)
:param level: the level of reproducibility to use, which maps to a set regular
expression to match particular files/folders in the image. Choices are in notes.
:param skip_files: an optional list of files to skip
:param include_files: an optional list of files to keep (only if level not defined)
:param version: the version to use. If not defined, default is 2.3
::notes
LEVEL DEFINITIONS
The level definitions come down to including folders/files in the comparison. For files
that Singularity produces on the fly that might be different (timestamps) but equal content
(eg for a replication) we hash the content ("assess_content") instead of the file.
'''
# First get a level dictionary, with description and regexp
if level_filter is not None:
file_filter = level_filter
elif level is None:
file_filter = get_level("RECIPE",
version=version,
include_files=include_files,
skip_files=skip_files)
else:
file_filter = get_level(level,version=version,
skip_files=skip_files,
include_files=include_files)
file_obj, tar = get_image_tar(image_path)
hasher = hashlib.md5()
for member in tar:
member_name = member.name.replace('.','',1)
# For files, we either assess content, or include the file
if member.isdir() or member.issym():
continue
elif assess_content(member,file_filter):
content = extract_content(image_path,member.name)
hasher.update(content)
elif include_file(member,file_filter):
buf = member.tobuf()
hasher.update(buf)
digest = hasher.hexdigest()
# Close up / remove files
try:
file_obj.close()
except:
tar.close()
if os.path.exists(file_obj):
os.remove(file_obj)
return digest | [
"def",
"get_image_hash",
"(",
"image_path",
",",
"level",
"=",
"None",
",",
"level_filter",
"=",
"None",
",",
"include_files",
"=",
"None",
",",
"skip_files",
"=",
"None",
",",
"version",
"=",
"None",
")",
":",
"# First get a level dictionary, with description and... | get_image_hash will generate a sha1 hash of an image, depending on a level
of reproducibility specified by the user. (see function get_levels for descriptions)
the user can also provide a level_filter manually with level_filter (for custom levels)
:param level: the level of reproducibility to use, which maps to a set regular
expression to match particular files/folders in the image. Choices are in notes.
:param skip_files: an optional list of files to skip
:param include_files: an optional list of files to keep (only if level not defined)
:param version: the version to use. If not defined, default is 2.3
::notes
LEVEL DEFINITIONS
The level definitions come down to including folders/files in the comparison. For files
that Singularity produces on the fly that might be different (timestamps) but equal content
(eg for a replication) we hash the content ("assess_content") instead of the file. | [
"get_image_hash",
"will",
"generate",
"a",
"sha1",
"hash",
"of",
"an",
"image",
"depending",
"on",
"a",
"level",
"of",
"reproducibility",
"specified",
"by",
"the",
"user",
".",
"(",
"see",
"function",
"get_levels",
"for",
"descriptions",
")",
"the",
"user",
... | 498c3433724b332f7493fec632d8daf479f47b82 | https://github.com/singularityhub/singularity-python/blob/498c3433724b332f7493fec632d8daf479f47b82/singularity/analysis/reproduce/hash.py#L56-L121 | train |
singularityhub/singularity-python | singularity/analysis/reproduce/hash.py | get_content_hashes | def get_content_hashes(image_path,
level=None,
regexp=None,
include_files=None,
tag_root=True,
level_filter=None,
skip_files=None,
version=None,
include_sizes=True):
'''get_content_hashes is like get_image_hash, but it returns a complete dictionary
of file names (keys) and their respective hashes (values). This function is intended
for more research purposes and was used to generate the levels in the first place.
If include_sizes is True, we include a second data structure with sizes
'''
if level_filter is not None:
file_filter = level_filter
elif level is None:
file_filter = get_level("REPLICATE",version=version,
skip_files=skip_files,
include_files=include_files)
else:
file_filter = get_level(level,version=version,
skip_files=skip_files,
include_files=include_files)
file_obj,tar = get_image_tar(image_path)
results = extract_guts(image_path=image_path,
tar=tar,
file_filter=file_filter,
tag_root=tag_root,
include_sizes=include_sizes)
delete_image_tar(file_obj, tar)
return results | python | def get_content_hashes(image_path,
level=None,
regexp=None,
include_files=None,
tag_root=True,
level_filter=None,
skip_files=None,
version=None,
include_sizes=True):
'''get_content_hashes is like get_image_hash, but it returns a complete dictionary
of file names (keys) and their respective hashes (values). This function is intended
for more research purposes and was used to generate the levels in the first place.
If include_sizes is True, we include a second data structure with sizes
'''
if level_filter is not None:
file_filter = level_filter
elif level is None:
file_filter = get_level("REPLICATE",version=version,
skip_files=skip_files,
include_files=include_files)
else:
file_filter = get_level(level,version=version,
skip_files=skip_files,
include_files=include_files)
file_obj,tar = get_image_tar(image_path)
results = extract_guts(image_path=image_path,
tar=tar,
file_filter=file_filter,
tag_root=tag_root,
include_sizes=include_sizes)
delete_image_tar(file_obj, tar)
return results | [
"def",
"get_content_hashes",
"(",
"image_path",
",",
"level",
"=",
"None",
",",
"regexp",
"=",
"None",
",",
"include_files",
"=",
"None",
",",
"tag_root",
"=",
"True",
",",
"level_filter",
"=",
"None",
",",
"skip_files",
"=",
"None",
",",
"version",
"=",
... | get_content_hashes is like get_image_hash, but it returns a complete dictionary
of file names (keys) and their respective hashes (values). This function is intended
for more research purposes and was used to generate the levels in the first place.
If include_sizes is True, we include a second data structure with sizes | [
"get_content_hashes",
"is",
"like",
"get_image_hash",
"but",
"it",
"returns",
"a",
"complete",
"dictionary",
"of",
"file",
"names",
"(",
"keys",
")",
"and",
"their",
"respective",
"hashes",
"(",
"values",
")",
".",
"This",
"function",
"is",
"intended",
"for",
... | 498c3433724b332f7493fec632d8daf479f47b82 | https://github.com/singularityhub/singularity-python/blob/498c3433724b332f7493fec632d8daf479f47b82/singularity/analysis/reproduce/hash.py#L124-L162 | train |
singularityhub/singularity-python | singularity/analysis/reproduce/hash.py | get_image_file_hash | def get_image_file_hash(image_path):
'''get_image_hash will return an md5 hash of the file based on a criteria level.
:param level: one of LOW, MEDIUM, HIGH
:param image_path: full path to the singularity image
'''
hasher = hashlib.md5()
with open(image_path, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hasher.update(chunk)
return hasher.hexdigest() | python | def get_image_file_hash(image_path):
'''get_image_hash will return an md5 hash of the file based on a criteria level.
:param level: one of LOW, MEDIUM, HIGH
:param image_path: full path to the singularity image
'''
hasher = hashlib.md5()
with open(image_path, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hasher.update(chunk)
return hasher.hexdigest() | [
"def",
"get_image_file_hash",
"(",
"image_path",
")",
":",
"hasher",
"=",
"hashlib",
".",
"md5",
"(",
")",
"with",
"open",
"(",
"image_path",
",",
"\"rb\"",
")",
"as",
"f",
":",
"for",
"chunk",
"in",
"iter",
"(",
"lambda",
":",
"f",
".",
"read",
"(",... | get_image_hash will return an md5 hash of the file based on a criteria level.
:param level: one of LOW, MEDIUM, HIGH
:param image_path: full path to the singularity image | [
"get_image_hash",
"will",
"return",
"an",
"md5",
"hash",
"of",
"the",
"file",
"based",
"on",
"a",
"criteria",
"level",
".",
":",
"param",
"level",
":",
"one",
"of",
"LOW",
"MEDIUM",
"HIGH",
":",
"param",
"image_path",
":",
"full",
"path",
"to",
"the",
... | 498c3433724b332f7493fec632d8daf479f47b82 | https://github.com/singularityhub/singularity-python/blob/498c3433724b332f7493fec632d8daf479f47b82/singularity/analysis/reproduce/hash.py#L166-L175 | train |
singularityhub/singularity-python | singularity/views/trees.py | container_difference | def container_difference(container=None,container_subtract=None,image_package=None,
image_package_subtract=None,comparison=None):
'''container_difference will return a data structure to render an html
tree (graph) of the differences between two images or packages. The second
container is subtracted from the first
:param container: the primary container object (to subtract from)
:param container_subtract: the second container object to remove
:param image_package: a zipped package for image 1, created with package
:param image_package_subtract: a zipped package for subtraction image, created with package
:param comparison: the comparison result object for the tree. If provided,
will skip over function to obtain it.
'''
if comparison == None:
comparison = compare_containers(container1=container,
container2=container_subtract,
image_package1=image_package,
image_package2=image_package_subtract,
by=['files.txt','folders.txt'])
files = comparison["files.txt"]['unique1']
folders = comparison['folders.txt']['unique1']
tree = make_container_tree(folders=folders,
files=files)
return tree | python | def container_difference(container=None,container_subtract=None,image_package=None,
image_package_subtract=None,comparison=None):
'''container_difference will return a data structure to render an html
tree (graph) of the differences between two images or packages. The second
container is subtracted from the first
:param container: the primary container object (to subtract from)
:param container_subtract: the second container object to remove
:param image_package: a zipped package for image 1, created with package
:param image_package_subtract: a zipped package for subtraction image, created with package
:param comparison: the comparison result object for the tree. If provided,
will skip over function to obtain it.
'''
if comparison == None:
comparison = compare_containers(container1=container,
container2=container_subtract,
image_package1=image_package,
image_package2=image_package_subtract,
by=['files.txt','folders.txt'])
files = comparison["files.txt"]['unique1']
folders = comparison['folders.txt']['unique1']
tree = make_container_tree(folders=folders,
files=files)
return tree | [
"def",
"container_difference",
"(",
"container",
"=",
"None",
",",
"container_subtract",
"=",
"None",
",",
"image_package",
"=",
"None",
",",
"image_package_subtract",
"=",
"None",
",",
"comparison",
"=",
"None",
")",
":",
"if",
"comparison",
"==",
"None",
":"... | container_difference will return a data structure to render an html
tree (graph) of the differences between two images or packages. The second
container is subtracted from the first
:param container: the primary container object (to subtract from)
:param container_subtract: the second container object to remove
:param image_package: a zipped package for image 1, created with package
:param image_package_subtract: a zipped package for subtraction image, created with package
:param comparison: the comparison result object for the tree. If provided,
will skip over function to obtain it. | [
"container_difference",
"will",
"return",
"a",
"data",
"structure",
"to",
"render",
"an",
"html",
"tree",
"(",
"graph",
")",
"of",
"the",
"differences",
"between",
"two",
"images",
"or",
"packages",
".",
"The",
"second",
"container",
"is",
"subtracted",
"from"... | 498c3433724b332f7493fec632d8daf479f47b82 | https://github.com/singularityhub/singularity-python/blob/498c3433724b332f7493fec632d8daf479f47b82/singularity/views/trees.py#L55-L78 | train |
singularityhub/singularity-python | singularity/views/trees.py | container_similarity | def container_similarity(container1=None,container2=None,image_package1=None,
image_package2=None,comparison=None):
'''container_sim will return a data structure to render an html tree
(graph) of the intersection (commonalities) between two images or packages
:param container1: the first container object
:param container2: the second container object if either not defined, need
:param image_package1: a packaged container1 (produced by package)
:param image_package2: a packaged container2 (produced by package)
:param comparison: the comparison result object for the tree. If provided,
will skip over function to obtain it.
'''
if comparison == None:
comparison = compare_containers(container1=container1,
container2=container2,
image_package1=image_package1,
image_package2=image_package2,
by=['files.txt','folders.txt'])
files = comparison["files.txt"]['intersect']
folders = comparison['folders.txt']['intersect']
tree = make_container_tree(folders=folders,
files=files)
return tree | python | def container_similarity(container1=None,container2=None,image_package1=None,
image_package2=None,comparison=None):
'''container_sim will return a data structure to render an html tree
(graph) of the intersection (commonalities) between two images or packages
:param container1: the first container object
:param container2: the second container object if either not defined, need
:param image_package1: a packaged container1 (produced by package)
:param image_package2: a packaged container2 (produced by package)
:param comparison: the comparison result object for the tree. If provided,
will skip over function to obtain it.
'''
if comparison == None:
comparison = compare_containers(container1=container1,
container2=container2,
image_package1=image_package1,
image_package2=image_package2,
by=['files.txt','folders.txt'])
files = comparison["files.txt"]['intersect']
folders = comparison['folders.txt']['intersect']
tree = make_container_tree(folders=folders,
files=files)
return tree | [
"def",
"container_similarity",
"(",
"container1",
"=",
"None",
",",
"container2",
"=",
"None",
",",
"image_package1",
"=",
"None",
",",
"image_package2",
"=",
"None",
",",
"comparison",
"=",
"None",
")",
":",
"if",
"comparison",
"==",
"None",
":",
"compariso... | container_sim will return a data structure to render an html tree
(graph) of the intersection (commonalities) between two images or packages
:param container1: the first container object
:param container2: the second container object if either not defined, need
:param image_package1: a packaged container1 (produced by package)
:param image_package2: a packaged container2 (produced by package)
:param comparison: the comparison result object for the tree. If provided,
will skip over function to obtain it. | [
"container_sim",
"will",
"return",
"a",
"data",
"structure",
"to",
"render",
"an",
"html",
"tree",
"(",
"graph",
")",
"of",
"the",
"intersection",
"(",
"commonalities",
")",
"between",
"two",
"images",
"or",
"packages",
":",
"param",
"container1",
":",
"the"... | 498c3433724b332f7493fec632d8daf479f47b82 | https://github.com/singularityhub/singularity-python/blob/498c3433724b332f7493fec632d8daf479f47b82/singularity/views/trees.py#L82-L103 | train |
singularityhub/singularity-python | singularity/views/trees.py | container_tree | def container_tree(container=None,image_package=None):
'''tree will render an html tree (graph) of a container
'''
guts = get_container_contents(container=container,
image_package=image_package,
split_delim="\n")
# Make the tree and return it
tree = make_container_tree(folders = guts["folders.txt"],
files = guts['files.txt'])
return tree | python | def container_tree(container=None,image_package=None):
'''tree will render an html tree (graph) of a container
'''
guts = get_container_contents(container=container,
image_package=image_package,
split_delim="\n")
# Make the tree and return it
tree = make_container_tree(folders = guts["folders.txt"],
files = guts['files.txt'])
return tree | [
"def",
"container_tree",
"(",
"container",
"=",
"None",
",",
"image_package",
"=",
"None",
")",
":",
"guts",
"=",
"get_container_contents",
"(",
"container",
"=",
"container",
",",
"image_package",
"=",
"image_package",
",",
"split_delim",
"=",
"\"\\n\"",
")",
... | tree will render an html tree (graph) of a container | [
"tree",
"will",
"render",
"an",
"html",
"tree",
"(",
"graph",
")",
"of",
"a",
"container"
] | 498c3433724b332f7493fec632d8daf479f47b82 | https://github.com/singularityhub/singularity-python/blob/498c3433724b332f7493fec632d8daf479f47b82/singularity/views/trees.py#L106-L117 | train |
singularityhub/singularity-python | singularity/views/trees.py | make_container_tree | def make_container_tree(folders,files,path_delim="/",parse_files=True):
'''make_container_tree will convert a list of folders and files into a json structure that represents a graph.
:param folders: a list of folders in the image
:param files: a list of files in the folder
:param parse_files: return 'files' lookup in result, to associate ID of node with files (default True)
:param path_delim: the path delimiter, default is '/'
'''
nodes = {} # first we will make a list of nodes
lookup = {}
count = 1 # count will hold an id for nodes
max_depth = 0
for folder in folders:
if folder != ".":
folder = re.sub("^[.]/","",folder)
path_components = folder.split(path_delim)
for p in range(len(path_components)):
path_component = path_components[p]
fullpath = path_delim.join(path_components[0:p+1])
# Have we created the node yet?
if fullpath not in lookup:
lookup[fullpath] = count
node = {"id":count,"name":path_component,"path":fullpath,"level":p,"children":[]}
count +=1
# Did we find a deeper level?
if p > max_depth:
max_depth = p
# Does the node have a parent?
if p==0: # base node, no parent
parent_id = 0
else: # look up the parent id
parent_path = path_delim.join(path_components[0:p])
parent_id = lookup[parent_path]
node["parent"] = parent_id
nodes[node['id']] = node
# Now make the graph, we simply append children to their parents
seen = []
graph = []
iters = list(range(max_depth+1)) # 0,1,2,3...
iters.reverse() # ...3,2,1,0
iters.pop() # remove 0
for level in iters:
children = {x:y for x,y in nodes.items() if y['level'] == level}
seen = seen + [y['id'] for x,y in children.items()]
nodes = {x:y for x,y in nodes.items() if y['id'] not in seen}
for node_id,child_node in children.items():
if node_id == 0: #base node
graph[node_id] = child_node
else:
parent_id = child_node['parent']
nodes[parent_id]["children"].append(child_node)
# Now add the parents to graph, with name as main lookup
for parent,parent_info in nodes.items():
graph.append(parent_info)
graph = {"name":"base","children":graph}
result = {"graph":graph,"lookup":lookup,"depth":max_depth+1}
# Parse files to include in tree
if parse_files == True:
file_lookup = {}
for filey in files:
filey = re.sub("^[.]/","",filey)
filepath,filename = os.path.split(filey)
if filepath in lookup:
folder_id = lookup[filepath]
if folder_id in file_lookup:
file_lookup[folder_id].append(filename)
else:
file_lookup[folder_id] = [filename]
elif filepath == '': # base folder
if 0 in file_lookup:
file_lookup[0].append(filename)
else:
file_lookup[0] = [filename]
result['files'] = file_lookup
return result | python | def make_container_tree(folders,files,path_delim="/",parse_files=True):
'''make_container_tree will convert a list of folders and files into a json structure that represents a graph.
:param folders: a list of folders in the image
:param files: a list of files in the folder
:param parse_files: return 'files' lookup in result, to associate ID of node with files (default True)
:param path_delim: the path delimiter, default is '/'
'''
nodes = {} # first we will make a list of nodes
lookup = {}
count = 1 # count will hold an id for nodes
max_depth = 0
for folder in folders:
if folder != ".":
folder = re.sub("^[.]/","",folder)
path_components = folder.split(path_delim)
for p in range(len(path_components)):
path_component = path_components[p]
fullpath = path_delim.join(path_components[0:p+1])
# Have we created the node yet?
if fullpath not in lookup:
lookup[fullpath] = count
node = {"id":count,"name":path_component,"path":fullpath,"level":p,"children":[]}
count +=1
# Did we find a deeper level?
if p > max_depth:
max_depth = p
# Does the node have a parent?
if p==0: # base node, no parent
parent_id = 0
else: # look up the parent id
parent_path = path_delim.join(path_components[0:p])
parent_id = lookup[parent_path]
node["parent"] = parent_id
nodes[node['id']] = node
# Now make the graph, we simply append children to their parents
seen = []
graph = []
iters = list(range(max_depth+1)) # 0,1,2,3...
iters.reverse() # ...3,2,1,0
iters.pop() # remove 0
for level in iters:
children = {x:y for x,y in nodes.items() if y['level'] == level}
seen = seen + [y['id'] for x,y in children.items()]
nodes = {x:y for x,y in nodes.items() if y['id'] not in seen}
for node_id,child_node in children.items():
if node_id == 0: #base node
graph[node_id] = child_node
else:
parent_id = child_node['parent']
nodes[parent_id]["children"].append(child_node)
# Now add the parents to graph, with name as main lookup
for parent,parent_info in nodes.items():
graph.append(parent_info)
graph = {"name":"base","children":graph}
result = {"graph":graph,"lookup":lookup,"depth":max_depth+1}
# Parse files to include in tree
if parse_files == True:
file_lookup = {}
for filey in files:
filey = re.sub("^[.]/","",filey)
filepath,filename = os.path.split(filey)
if filepath in lookup:
folder_id = lookup[filepath]
if folder_id in file_lookup:
file_lookup[folder_id].append(filename)
else:
file_lookup[folder_id] = [filename]
elif filepath == '': # base folder
if 0 in file_lookup:
file_lookup[0].append(filename)
else:
file_lookup[0] = [filename]
result['files'] = file_lookup
return result | [
"def",
"make_container_tree",
"(",
"folders",
",",
"files",
",",
"path_delim",
"=",
"\"/\"",
",",
"parse_files",
"=",
"True",
")",
":",
"nodes",
"=",
"{",
"}",
"# first we will make a list of nodes",
"lookup",
"=",
"{",
"}",
"count",
"=",
"1",
"# count will ho... | make_container_tree will convert a list of folders and files into a json structure that represents a graph.
:param folders: a list of folders in the image
:param files: a list of files in the folder
:param parse_files: return 'files' lookup in result, to associate ID of node with files (default True)
:param path_delim: the path delimiter, default is '/' | [
"make_container_tree",
"will",
"convert",
"a",
"list",
"of",
"folders",
"and",
"files",
"into",
"a",
"json",
"structure",
"that",
"represents",
"a",
"graph",
".",
":",
"param",
"folders",
":",
"a",
"list",
"of",
"folders",
"in",
"the",
"image",
":",
"param... | 498c3433724b332f7493fec632d8daf479f47b82 | https://github.com/singularityhub/singularity-python/blob/498c3433724b332f7493fec632d8daf479f47b82/singularity/views/trees.py#L120-L197 | train |
singularityhub/singularity-python | singularity/views/trees.py | make_package_tree | def make_package_tree(matrix=None,labels=None,width=25,height=10,title=None,font_size=None):
'''make package tree will make a dendrogram comparing a matrix of packages
:param matrix: a pandas df of packages, with names in index and columns
:param labels: a list of labels corresponding to row names, will be
pulled from rows if not defined
:param title: a title for the plot, if not defined, will be left out.
:returns a plot that can be saved with savefig
'''
from matplotlib import pyplot as plt
from scipy.cluster.hierarchy import (
dendrogram,
linkage
)
if font_size is None:
font_size = 8.
from scipy.cluster.hierarchy import cophenet
from scipy.spatial.distance import pdist
if not isinstance(matrix,pandas.DataFrame):
bot.info("No pandas DataFrame (matrix) of similarities defined, will use default.")
matrix = compare_packages()['files.txt']
title = 'Docker Library Similarity to Base OS'
Z = linkage(matrix, 'ward')
c, coph_dists = cophenet(Z, pdist(matrix))
if labels == None:
labels = matrix.index.tolist()
plt.figure(figsize=(width, height))
if title != None:
plt.title(title)
plt.xlabel('image index')
plt.ylabel('distance')
dendrogram(Z,
leaf_rotation=90., # rotates the x axis labels
leaf_font_size=font_size, # font size for the x axis labels
labels=labels)
return plt | python | def make_package_tree(matrix=None,labels=None,width=25,height=10,title=None,font_size=None):
'''make package tree will make a dendrogram comparing a matrix of packages
:param matrix: a pandas df of packages, with names in index and columns
:param labels: a list of labels corresponding to row names, will be
pulled from rows if not defined
:param title: a title for the plot, if not defined, will be left out.
:returns a plot that can be saved with savefig
'''
from matplotlib import pyplot as plt
from scipy.cluster.hierarchy import (
dendrogram,
linkage
)
if font_size is None:
font_size = 8.
from scipy.cluster.hierarchy import cophenet
from scipy.spatial.distance import pdist
if not isinstance(matrix,pandas.DataFrame):
bot.info("No pandas DataFrame (matrix) of similarities defined, will use default.")
matrix = compare_packages()['files.txt']
title = 'Docker Library Similarity to Base OS'
Z = linkage(matrix, 'ward')
c, coph_dists = cophenet(Z, pdist(matrix))
if labels == None:
labels = matrix.index.tolist()
plt.figure(figsize=(width, height))
if title != None:
plt.title(title)
plt.xlabel('image index')
plt.ylabel('distance')
dendrogram(Z,
leaf_rotation=90., # rotates the x axis labels
leaf_font_size=font_size, # font size for the x axis labels
labels=labels)
return plt | [
"def",
"make_package_tree",
"(",
"matrix",
"=",
"None",
",",
"labels",
"=",
"None",
",",
"width",
"=",
"25",
",",
"height",
"=",
"10",
",",
"title",
"=",
"None",
",",
"font_size",
"=",
"None",
")",
":",
"from",
"matplotlib",
"import",
"pyplot",
"as",
... | make package tree will make a dendrogram comparing a matrix of packages
:param matrix: a pandas df of packages, with names in index and columns
:param labels: a list of labels corresponding to row names, will be
pulled from rows if not defined
:param title: a title for the plot, if not defined, will be left out.
:returns a plot that can be saved with savefig | [
"make",
"package",
"tree",
"will",
"make",
"a",
"dendrogram",
"comparing",
"a",
"matrix",
"of",
"packages",
":",
"param",
"matrix",
":",
"a",
"pandas",
"df",
"of",
"packages",
"with",
"names",
"in",
"index",
"and",
"columns",
":",
"param",
"labels",
":",
... | 498c3433724b332f7493fec632d8daf479f47b82 | https://github.com/singularityhub/singularity-python/blob/498c3433724b332f7493fec632d8daf479f47b82/singularity/views/trees.py#L205-L247 | train |
singularityhub/singularity-python | singularity/views/trees.py | make_interactive_tree | def make_interactive_tree(matrix=None,labels=None):
'''make interactive tree will return complete html for an interactive tree
:param title: a title for the plot, if not defined, will be left out.
'''
from scipy.cluster.hierarchy import (
dendrogram,
linkage,
to_tree
)
d3 = None
from scipy.cluster.hierarchy import cophenet
from scipy.spatial.distance import pdist
if isinstance(matrix,pandas.DataFrame):
Z = linkage(matrix, 'ward') # clusters
T = to_tree(Z, rd=False)
if labels == None:
labels = matrix.index.tolist()
lookup = dict(zip(range(len(labels)), labels))
# Create a dendrogram object without plotting
dend = dendrogram(Z,no_plot=True,
orientation="right",
leaf_rotation=90., # rotates the x axis labels
leaf_font_size=8., # font size for the x axis labels
labels=labels)
d3 = dict(children=[], name="root")
add_node(T, d3)
label_tree(d3["children"][0],lookup)
else:
bot.warning('Please provide data as pandas Data Frame.')
return d3 | python | def make_interactive_tree(matrix=None,labels=None):
'''make interactive tree will return complete html for an interactive tree
:param title: a title for the plot, if not defined, will be left out.
'''
from scipy.cluster.hierarchy import (
dendrogram,
linkage,
to_tree
)
d3 = None
from scipy.cluster.hierarchy import cophenet
from scipy.spatial.distance import pdist
if isinstance(matrix,pandas.DataFrame):
Z = linkage(matrix, 'ward') # clusters
T = to_tree(Z, rd=False)
if labels == None:
labels = matrix.index.tolist()
lookup = dict(zip(range(len(labels)), labels))
# Create a dendrogram object without plotting
dend = dendrogram(Z,no_plot=True,
orientation="right",
leaf_rotation=90., # rotates the x axis labels
leaf_font_size=8., # font size for the x axis labels
labels=labels)
d3 = dict(children=[], name="root")
add_node(T, d3)
label_tree(d3["children"][0],lookup)
else:
bot.warning('Please provide data as pandas Data Frame.')
return d3 | [
"def",
"make_interactive_tree",
"(",
"matrix",
"=",
"None",
",",
"labels",
"=",
"None",
")",
":",
"from",
"scipy",
".",
"cluster",
".",
"hierarchy",
"import",
"(",
"dendrogram",
",",
"linkage",
",",
"to_tree",
")",
"d3",
"=",
"None",
"from",
"scipy",
"."... | make interactive tree will return complete html for an interactive tree
:param title: a title for the plot, if not defined, will be left out. | [
"make",
"interactive",
"tree",
"will",
"return",
"complete",
"html",
"for",
"an",
"interactive",
"tree",
":",
"param",
"title",
":",
"a",
"title",
"for",
"the",
"plot",
"if",
"not",
"defined",
"will",
"be",
"left",
"out",
"."
] | 498c3433724b332f7493fec632d8daf479f47b82 | https://github.com/singularityhub/singularity-python/blob/498c3433724b332f7493fec632d8daf479f47b82/singularity/views/trees.py#L250-L284 | train |
singularityhub/singularity-python | singularity/views/trees.py | add_node | def add_node(node, parent):
'''add_node will add a node to it's parent
'''
newNode = dict(node_id=node.id, children=[])
parent["children"].append(newNode)
if node.left: add_node(node.left, newNode)
if node.right: add_node(node.right, newNode) | python | def add_node(node, parent):
'''add_node will add a node to it's parent
'''
newNode = dict(node_id=node.id, children=[])
parent["children"].append(newNode)
if node.left: add_node(node.left, newNode)
if node.right: add_node(node.right, newNode) | [
"def",
"add_node",
"(",
"node",
",",
"parent",
")",
":",
"newNode",
"=",
"dict",
"(",
"node_id",
"=",
"node",
".",
"id",
",",
"children",
"=",
"[",
"]",
")",
"parent",
"[",
"\"children\"",
"]",
".",
"append",
"(",
"newNode",
")",
"if",
"node",
".",... | add_node will add a node to it's parent | [
"add_node",
"will",
"add",
"a",
"node",
"to",
"it",
"s",
"parent"
] | 498c3433724b332f7493fec632d8daf479f47b82 | https://github.com/singularityhub/singularity-python/blob/498c3433724b332f7493fec632d8daf479f47b82/singularity/views/trees.py#L287-L293 | train |
singularityhub/singularity-python | singularity/views/trees.py | label_tree | def label_tree(n,lookup):
'''label tree will again recursively label the tree
:param n: the root node, usually d3['children'][0]
:param lookup: the node/id lookup
'''
if len(n["children"]) == 0:
leaves = [lookup[n["node_id"]]]
else:
leaves = reduce(lambda ls, c: ls + label_tree(c,lookup), n["children"], [])
del n["node_id"]
n["name"] = name = "|||".join(sorted(map(str, leaves)))
return leaves | python | def label_tree(n,lookup):
'''label tree will again recursively label the tree
:param n: the root node, usually d3['children'][0]
:param lookup: the node/id lookup
'''
if len(n["children"]) == 0:
leaves = [lookup[n["node_id"]]]
else:
leaves = reduce(lambda ls, c: ls + label_tree(c,lookup), n["children"], [])
del n["node_id"]
n["name"] = name = "|||".join(sorted(map(str, leaves)))
return leaves | [
"def",
"label_tree",
"(",
"n",
",",
"lookup",
")",
":",
"if",
"len",
"(",
"n",
"[",
"\"children\"",
"]",
")",
"==",
"0",
":",
"leaves",
"=",
"[",
"lookup",
"[",
"n",
"[",
"\"node_id\"",
"]",
"]",
"]",
"else",
":",
"leaves",
"=",
"reduce",
"(",
... | label tree will again recursively label the tree
:param n: the root node, usually d3['children'][0]
:param lookup: the node/id lookup | [
"label",
"tree",
"will",
"again",
"recursively",
"label",
"the",
"tree",
":",
"param",
"n",
":",
"the",
"root",
"node",
"usually",
"d3",
"[",
"children",
"]",
"[",
"0",
"]",
":",
"param",
"lookup",
":",
"the",
"node",
"/",
"id",
"lookup"
] | 498c3433724b332f7493fec632d8daf479f47b82 | https://github.com/singularityhub/singularity-python/blob/498c3433724b332f7493fec632d8daf479f47b82/singularity/views/trees.py#L296-L307 | train |
singularityhub/singularity-python | singularity/analysis/apps.py | extract_apps | def extract_apps(image, app_names):
''' extract app will extract metadata for one or more apps
Parameters
==========
image: the absolute path to the image
app_name: the name of the app under /scif/apps
'''
apps = dict()
if isinstance(app_names, tuple):
app_names = list(app_names)
if not isinstance(app_names, list):
app_names = [app_names]
if len(app_names) == 0:
return apps
for app_name in app_names:
metadata = dict()
# Inspect: labels, env, runscript, tests, help
try:
inspection = json.loads(Client.inspect(image, app=app_name))
del inspection['data']['attributes']['deffile']
metadata['inspect'] = inspection
# If illegal characters prevent load, not much we can do
except:
pass
apps[app_name] = metadata
return apps | python | def extract_apps(image, app_names):
''' extract app will extract metadata for one or more apps
Parameters
==========
image: the absolute path to the image
app_name: the name of the app under /scif/apps
'''
apps = dict()
if isinstance(app_names, tuple):
app_names = list(app_names)
if not isinstance(app_names, list):
app_names = [app_names]
if len(app_names) == 0:
return apps
for app_name in app_names:
metadata = dict()
# Inspect: labels, env, runscript, tests, help
try:
inspection = json.loads(Client.inspect(image, app=app_name))
del inspection['data']['attributes']['deffile']
metadata['inspect'] = inspection
# If illegal characters prevent load, not much we can do
except:
pass
apps[app_name] = metadata
return apps | [
"def",
"extract_apps",
"(",
"image",
",",
"app_names",
")",
":",
"apps",
"=",
"dict",
"(",
")",
"if",
"isinstance",
"(",
"app_names",
",",
"tuple",
")",
":",
"app_names",
"=",
"list",
"(",
"app_names",
")",
"if",
"not",
"isinstance",
"(",
"app_names",
... | extract app will extract metadata for one or more apps
Parameters
==========
image: the absolute path to the image
app_name: the name of the app under /scif/apps | [
"extract",
"app",
"will",
"extract",
"metadata",
"for",
"one",
"or",
"more",
"apps",
"Parameters",
"==========",
"image",
":",
"the",
"absolute",
"path",
"to",
"the",
"image",
"app_name",
":",
"the",
"name",
"of",
"the",
"app",
"under",
"/",
"scif",
"/",
... | 498c3433724b332f7493fec632d8daf479f47b82 | https://github.com/singularityhub/singularity-python/blob/498c3433724b332f7493fec632d8daf479f47b82/singularity/analysis/apps.py#L31-L61 | train |
singularityhub/singularity-python | singularity/utils.py | run_command | def run_command(cmd, sudo=False):
'''run_command uses subprocess to send a command to the terminal.
:param cmd: the command to send, should be a list for subprocess
:param error_message: the error message to give to user if fails,
if none specified, will alert that command failed.
:param sudopw: if specified (not None) command will be run asking for sudo
'''
if sudo is True:
cmd = ['sudo'] + cmd
output = Popen(cmd,stderr=STDOUT,stdout=PIPE)
t = output.communicate()[0],output.returncode
output = {'message':t[0],
'return_code':t[1]}
return output | python | def run_command(cmd, sudo=False):
'''run_command uses subprocess to send a command to the terminal.
:param cmd: the command to send, should be a list for subprocess
:param error_message: the error message to give to user if fails,
if none specified, will alert that command failed.
:param sudopw: if specified (not None) command will be run asking for sudo
'''
if sudo is True:
cmd = ['sudo'] + cmd
output = Popen(cmd,stderr=STDOUT,stdout=PIPE)
t = output.communicate()[0],output.returncode
output = {'message':t[0],
'return_code':t[1]}
return output | [
"def",
"run_command",
"(",
"cmd",
",",
"sudo",
"=",
"False",
")",
":",
"if",
"sudo",
"is",
"True",
":",
"cmd",
"=",
"[",
"'sudo'",
"]",
"+",
"cmd",
"output",
"=",
"Popen",
"(",
"cmd",
",",
"stderr",
"=",
"STDOUT",
",",
"stdout",
"=",
"PIPE",
")",... | run_command uses subprocess to send a command to the terminal.
:param cmd: the command to send, should be a list for subprocess
:param error_message: the error message to give to user if fails,
if none specified, will alert that command failed.
:param sudopw: if specified (not None) command will be run asking for sudo | [
"run_command",
"uses",
"subprocess",
"to",
"send",
"a",
"command",
"to",
"the",
"terminal",
".",
":",
"param",
"cmd",
":",
"the",
"command",
"to",
"send",
"should",
"be",
"a",
"list",
"for",
"subprocess",
":",
"param",
"error_message",
":",
"the",
"error",... | 498c3433724b332f7493fec632d8daf479f47b82 | https://github.com/singularityhub/singularity-python/blob/498c3433724b332f7493fec632d8daf479f47b82/singularity/utils.py#L70-L85 | train |
singularityhub/singularity-python | singularity/utils.py | download_repo | def download_repo(repo_url, destination, commit=None):
'''download_repo
:param repo_url: the url of the repo to clone from
:param destination: the full path to the destination for the repo
'''
command = "git clone %s %s" % (repo_url, destination)
os.system(command)
return destination | python | def download_repo(repo_url, destination, commit=None):
'''download_repo
:param repo_url: the url of the repo to clone from
:param destination: the full path to the destination for the repo
'''
command = "git clone %s %s" % (repo_url, destination)
os.system(command)
return destination | [
"def",
"download_repo",
"(",
"repo_url",
",",
"destination",
",",
"commit",
"=",
"None",
")",
":",
"command",
"=",
"\"git clone %s %s\"",
"%",
"(",
"repo_url",
",",
"destination",
")",
"os",
".",
"system",
"(",
"command",
")",
"return",
"destination"
] | download_repo
:param repo_url: the url of the repo to clone from
:param destination: the full path to the destination for the repo | [
"download_repo",
":",
"param",
"repo_url",
":",
"the",
"url",
"of",
"the",
"repo",
"to",
"clone",
"from",
":",
"param",
"destination",
":",
"the",
"full",
"path",
"to",
"the",
"destination",
"for",
"the",
"repo"
] | 498c3433724b332f7493fec632d8daf479f47b82 | https://github.com/singularityhub/singularity-python/blob/498c3433724b332f7493fec632d8daf479f47b82/singularity/utils.py#L190-L197 | train |
singularityhub/singularity-python | singularity/analysis/classify.py | get_tags | def get_tags(container=None,
search_folders=None,
file_list=None,
return_unique=True):
'''get tags will return a list of tags that describe the software in an image,
meaning inside of a paricular folder. If search_folder is not defined, uses lib
:param container: if provided, will use container as image. Can also provide
:param image_package: if provided, can be used instead of container
:param search_folders: specify one or more folders to look for tags
:param file_list: the list of files
:param return_unique: return unique files in folders. Default True.
Default is 'bin'
::notes
The algorithm works as follows:
1) first compare package to set of base OS (provided with shub)
2) subtract the most similar os from image, leaving "custom" files
3) organize custom files into dict based on folder name
4) return search_folders as tags
'''
if file_list is None:
file_list = get_container_contents(container, split_delim='\n')['all']
if search_folders == None:
search_folders = 'bin'
if not isinstance(search_folders,list):
search_folders = [search_folders]
tags = []
for search_folder in search_folders:
for file_name in file_list:
if search_folder in file_name:
tags.append(file_name)
if return_unique == True:
tags = list(set(tags))
return tags | python | def get_tags(container=None,
search_folders=None,
file_list=None,
return_unique=True):
'''get tags will return a list of tags that describe the software in an image,
meaning inside of a paricular folder. If search_folder is not defined, uses lib
:param container: if provided, will use container as image. Can also provide
:param image_package: if provided, can be used instead of container
:param search_folders: specify one or more folders to look for tags
:param file_list: the list of files
:param return_unique: return unique files in folders. Default True.
Default is 'bin'
::notes
The algorithm works as follows:
1) first compare package to set of base OS (provided with shub)
2) subtract the most similar os from image, leaving "custom" files
3) organize custom files into dict based on folder name
4) return search_folders as tags
'''
if file_list is None:
file_list = get_container_contents(container, split_delim='\n')['all']
if search_folders == None:
search_folders = 'bin'
if not isinstance(search_folders,list):
search_folders = [search_folders]
tags = []
for search_folder in search_folders:
for file_name in file_list:
if search_folder in file_name:
tags.append(file_name)
if return_unique == True:
tags = list(set(tags))
return tags | [
"def",
"get_tags",
"(",
"container",
"=",
"None",
",",
"search_folders",
"=",
"None",
",",
"file_list",
"=",
"None",
",",
"return_unique",
"=",
"True",
")",
":",
"if",
"file_list",
"is",
"None",
":",
"file_list",
"=",
"get_container_contents",
"(",
"containe... | get tags will return a list of tags that describe the software in an image,
meaning inside of a paricular folder. If search_folder is not defined, uses lib
:param container: if provided, will use container as image. Can also provide
:param image_package: if provided, can be used instead of container
:param search_folders: specify one or more folders to look for tags
:param file_list: the list of files
:param return_unique: return unique files in folders. Default True.
Default is 'bin'
::notes
The algorithm works as follows:
1) first compare package to set of base OS (provided with shub)
2) subtract the most similar os from image, leaving "custom" files
3) organize custom files into dict based on folder name
4) return search_folders as tags | [
"get",
"tags",
"will",
"return",
"a",
"list",
"of",
"tags",
"that",
"describe",
"the",
"software",
"in",
"an",
"image",
"meaning",
"inside",
"of",
"a",
"paricular",
"folder",
".",
"If",
"search_folder",
"is",
"not",
"defined",
"uses",
"lib",
":",
"param",
... | 498c3433724b332f7493fec632d8daf479f47b82 | https://github.com/singularityhub/singularity-python/blob/498c3433724b332f7493fec632d8daf479f47b82/singularity/analysis/classify.py#L51-L91 | train |
singularityhub/singularity-python | singularity/analysis/classify.py | file_counts | def file_counts(container=None,
patterns=None,
image_package=None,
file_list=None):
'''file counts will return a list of files that match one or more regular expressions.
if no patterns is defined, a default of readme is used. All patterns and files are made
case insensitive.
Parameters
==========
:param container: if provided, will use container as image. Can also provide
:param image_package: if provided, can be used instead of container
:param patterns: one or more patterns (str or list) of files to search for.
:param diff: the difference between a container and it's parent OS from get_diff
if not provided, will be generated.
'''
if file_list is None:
file_list = get_container_contents(container, split_delim='\n')['all']
if patterns == None:
patterns = 'readme'
if not isinstance(patterns,list):
patterns = [patterns]
count = 0
for pattern in patterns:
count += len([x for x in file_list if re.search(pattern.lower(),x.lower())])
bot.info("Total files matching patterns is %s" %count)
return count | python | def file_counts(container=None,
patterns=None,
image_package=None,
file_list=None):
'''file counts will return a list of files that match one or more regular expressions.
if no patterns is defined, a default of readme is used. All patterns and files are made
case insensitive.
Parameters
==========
:param container: if provided, will use container as image. Can also provide
:param image_package: if provided, can be used instead of container
:param patterns: one or more patterns (str or list) of files to search for.
:param diff: the difference between a container and it's parent OS from get_diff
if not provided, will be generated.
'''
if file_list is None:
file_list = get_container_contents(container, split_delim='\n')['all']
if patterns == None:
patterns = 'readme'
if not isinstance(patterns,list):
patterns = [patterns]
count = 0
for pattern in patterns:
count += len([x for x in file_list if re.search(pattern.lower(),x.lower())])
bot.info("Total files matching patterns is %s" %count)
return count | [
"def",
"file_counts",
"(",
"container",
"=",
"None",
",",
"patterns",
"=",
"None",
",",
"image_package",
"=",
"None",
",",
"file_list",
"=",
"None",
")",
":",
"if",
"file_list",
"is",
"None",
":",
"file_list",
"=",
"get_container_contents",
"(",
"container",... | file counts will return a list of files that match one or more regular expressions.
if no patterns is defined, a default of readme is used. All patterns and files are made
case insensitive.
Parameters
==========
:param container: if provided, will use container as image. Can also provide
:param image_package: if provided, can be used instead of container
:param patterns: one or more patterns (str or list) of files to search for.
:param diff: the difference between a container and it's parent OS from get_diff
if not provided, will be generated. | [
"file",
"counts",
"will",
"return",
"a",
"list",
"of",
"files",
"that",
"match",
"one",
"or",
"more",
"regular",
"expressions",
".",
"if",
"no",
"patterns",
"is",
"defined",
"a",
"default",
"of",
"readme",
"is",
"used",
".",
"All",
"patterns",
"and",
"fi... | 498c3433724b332f7493fec632d8daf479f47b82 | https://github.com/singularityhub/singularity-python/blob/498c3433724b332f7493fec632d8daf479f47b82/singularity/analysis/classify.py#L99-L130 | train |
singularityhub/singularity-python | singularity/analysis/classify.py | extension_counts | def extension_counts(container=None, file_list=None, return_counts=True):
'''extension counts will return a dictionary with counts of file extensions for
an image.
:param container: if provided, will use container as image. Can also provide
:param image_package: if provided, can be used instead of container
:param file_list: the complete list of files
:param return_counts: return counts over dict with files. Default True
'''
if file_list is None:
file_list = get_container_contents(container, split_delim='\n')['all']
extensions = dict()
for item in file_list:
filename,ext = os.path.splitext(item)
if ext == '':
if return_counts == False:
extensions = update_dict(extensions,'no-extension',item)
else:
extensions = update_dict_sum(extensions,'no-extension')
else:
if return_counts == False:
extensions = update_dict(extensions,ext,item)
else:
extensions = update_dict_sum(extensions,ext)
return extensions | python | def extension_counts(container=None, file_list=None, return_counts=True):
'''extension counts will return a dictionary with counts of file extensions for
an image.
:param container: if provided, will use container as image. Can also provide
:param image_package: if provided, can be used instead of container
:param file_list: the complete list of files
:param return_counts: return counts over dict with files. Default True
'''
if file_list is None:
file_list = get_container_contents(container, split_delim='\n')['all']
extensions = dict()
for item in file_list:
filename,ext = os.path.splitext(item)
if ext == '':
if return_counts == False:
extensions = update_dict(extensions,'no-extension',item)
else:
extensions = update_dict_sum(extensions,'no-extension')
else:
if return_counts == False:
extensions = update_dict(extensions,ext,item)
else:
extensions = update_dict_sum(extensions,ext)
return extensions | [
"def",
"extension_counts",
"(",
"container",
"=",
"None",
",",
"file_list",
"=",
"None",
",",
"return_counts",
"=",
"True",
")",
":",
"if",
"file_list",
"is",
"None",
":",
"file_list",
"=",
"get_container_contents",
"(",
"container",
",",
"split_delim",
"=",
... | extension counts will return a dictionary with counts of file extensions for
an image.
:param container: if provided, will use container as image. Can also provide
:param image_package: if provided, can be used instead of container
:param file_list: the complete list of files
:param return_counts: return counts over dict with files. Default True | [
"extension",
"counts",
"will",
"return",
"a",
"dictionary",
"with",
"counts",
"of",
"file",
"extensions",
"for",
"an",
"image",
".",
":",
"param",
"container",
":",
"if",
"provided",
"will",
"use",
"container",
"as",
"image",
".",
"Can",
"also",
"provide",
... | 498c3433724b332f7493fec632d8daf479f47b82 | https://github.com/singularityhub/singularity-python/blob/498c3433724b332f7493fec632d8daf479f47b82/singularity/analysis/classify.py#L133-L158 | train |
singularityhub/singularity-python | singularity/analysis/reproduce/metrics.py | assess_differences | def assess_differences(image_file1,
image_file2,
levels=None,
version=None,
size_heuristic=False,
guts1=None,
guts2=None):
'''assess_differences will compare two images on each level of
reproducibility, returning for each level a dictionary with files
that are the same, different, and an overall score.
:param size_heuristic: if True, assess root owned files based on size
:param guts1,guts2: the result (dict with sizes,roots,etc) from get_content_hashes
'''
if levels is None:
levels = get_levels(version=version)
reports = dict()
scores = dict()
for level_name, level_filter in levels.items():
contenders = []
different = []
setdiff = []
same = 0
# Compare the dictionary of file:hash between two images, and get root owned lookup
if guts1 is None:
guts1 = get_content_hashes(image_path=image_file1,
level_filter=level_filter)
# tag_root=True
# include_sizes=True
if guts2 is None:
guts2 = get_content_hashes(image_path=image_file2,
level_filter=level_filter)
print(level_name)
files = list(set(list(guts1['hashes'].keys()) + list(guts2['hashes'].keys())))
for file_name in files:
# If it's not in one or the other
if file_name not in guts1['hashes'] or file_name not in guts2['hashes']:
setdiff.append(file_name)
else:
if guts1['hashes'][file_name] == guts2['hashes'][file_name]:
same+=1
else:
# If the file is root owned, we compare based on size
if size_heuristic == True:
if guts1['root_owned'][file_name] or guts2['root_owned'][file_name]:
if guts1['sizes'][file_name] == guts2['sizes'][file_name]:
same+=1
else:
different.append(file_name)
else:
# Otherwise, we can assess the bytes content by reading it
contenders.append(file_name)
else:
contenders.append(file_name)
# If the user wants identical (meaning extraction order and timestamps)
if level_name == "IDENTICAL":
different = different + contenders
# Otherwise we need to check based on byte content
else:
if len(contenders) > 0:
for rogue in contenders:
hashy1 = extract_content(image_file1, rogue, return_hash=True)
hashy2 = extract_content(image_file2, rogue, return_hash=True)
# If we can't compare, we use size as a heuristic
if hashy1 is None or hashy2 is None: # if one is symlink, could be None
different.append(file_name)
elif len(hashy1) == 0 or len(hashy2) == 0:
if guts1['sizes'][file_name] == guts2['sizes'][file_name]:
same+=1
else:
different.append(file_name)
elif hashy1 != hashy2:
different.append(rogue)
else:
same+=1
# We use a similar Jacaard coefficient, twice the shared information in the numerator
# (the intersection, same), as a proportion of the total summed files
union = len(guts1['hashes']) + len(guts2['hashes'])
report = {'difference': setdiff,
'intersect_different': different,
'same':same,
'union': union}
if union == 0:
scores[level_name] = 0
else:
scores[level_name] = 2*(same) / union
reports[level_name] = report
reports['scores'] = scores
return reports | python | def assess_differences(image_file1,
image_file2,
levels=None,
version=None,
size_heuristic=False,
guts1=None,
guts2=None):
'''assess_differences will compare two images on each level of
reproducibility, returning for each level a dictionary with files
that are the same, different, and an overall score.
:param size_heuristic: if True, assess root owned files based on size
:param guts1,guts2: the result (dict with sizes,roots,etc) from get_content_hashes
'''
if levels is None:
levels = get_levels(version=version)
reports = dict()
scores = dict()
for level_name, level_filter in levels.items():
contenders = []
different = []
setdiff = []
same = 0
# Compare the dictionary of file:hash between two images, and get root owned lookup
if guts1 is None:
guts1 = get_content_hashes(image_path=image_file1,
level_filter=level_filter)
# tag_root=True
# include_sizes=True
if guts2 is None:
guts2 = get_content_hashes(image_path=image_file2,
level_filter=level_filter)
print(level_name)
files = list(set(list(guts1['hashes'].keys()) + list(guts2['hashes'].keys())))
for file_name in files:
# If it's not in one or the other
if file_name not in guts1['hashes'] or file_name not in guts2['hashes']:
setdiff.append(file_name)
else:
if guts1['hashes'][file_name] == guts2['hashes'][file_name]:
same+=1
else:
# If the file is root owned, we compare based on size
if size_heuristic == True:
if guts1['root_owned'][file_name] or guts2['root_owned'][file_name]:
if guts1['sizes'][file_name] == guts2['sizes'][file_name]:
same+=1
else:
different.append(file_name)
else:
# Otherwise, we can assess the bytes content by reading it
contenders.append(file_name)
else:
contenders.append(file_name)
# If the user wants identical (meaning extraction order and timestamps)
if level_name == "IDENTICAL":
different = different + contenders
# Otherwise we need to check based on byte content
else:
if len(contenders) > 0:
for rogue in contenders:
hashy1 = extract_content(image_file1, rogue, return_hash=True)
hashy2 = extract_content(image_file2, rogue, return_hash=True)
# If we can't compare, we use size as a heuristic
if hashy1 is None or hashy2 is None: # if one is symlink, could be None
different.append(file_name)
elif len(hashy1) == 0 or len(hashy2) == 0:
if guts1['sizes'][file_name] == guts2['sizes'][file_name]:
same+=1
else:
different.append(file_name)
elif hashy1 != hashy2:
different.append(rogue)
else:
same+=1
# We use a similar Jacaard coefficient, twice the shared information in the numerator
# (the intersection, same), as a proportion of the total summed files
union = len(guts1['hashes']) + len(guts2['hashes'])
report = {'difference': setdiff,
'intersect_different': different,
'same':same,
'union': union}
if union == 0:
scores[level_name] = 0
else:
scores[level_name] = 2*(same) / union
reports[level_name] = report
reports['scores'] = scores
return reports | [
"def",
"assess_differences",
"(",
"image_file1",
",",
"image_file2",
",",
"levels",
"=",
"None",
",",
"version",
"=",
"None",
",",
"size_heuristic",
"=",
"False",
",",
"guts1",
"=",
"None",
",",
"guts2",
"=",
"None",
")",
":",
"if",
"levels",
"is",
"None... | assess_differences will compare two images on each level of
reproducibility, returning for each level a dictionary with files
that are the same, different, and an overall score.
:param size_heuristic: if True, assess root owned files based on size
:param guts1,guts2: the result (dict with sizes,roots,etc) from get_content_hashes | [
"assess_differences",
"will",
"compare",
"two",
"images",
"on",
"each",
"level",
"of",
"reproducibility",
"returning",
"for",
"each",
"level",
"a",
"dictionary",
"with",
"files",
"that",
"are",
"the",
"same",
"different",
"and",
"an",
"overall",
"score",
".",
... | 498c3433724b332f7493fec632d8daf479f47b82 | https://github.com/singularityhub/singularity-python/blob/498c3433724b332f7493fec632d8daf479f47b82/singularity/analysis/reproduce/metrics.py#L30-L134 | train |
singularityhub/singularity-python | singularity/analysis/reproduce/criteria.py | include_file | def include_file(member,file_filter):
'''include_file will look at a path and determine
if it matches a regular expression from a level
'''
member_path = member.name.replace('.','',1)
if len(member_path) == 0:
return False
# Does the filter skip it explicitly?
if "skip_files" in file_filter:
if member_path in file_filter['skip_files']:
return False
# Include explicitly?
if "include_files" in file_filter:
if member_path in file_filter['include_files']:
return True
# Regular expression?
if "regexp" in file_filter:
if re.search(file_filter["regexp"],member_path):
return True
return False | python | def include_file(member,file_filter):
'''include_file will look at a path and determine
if it matches a regular expression from a level
'''
member_path = member.name.replace('.','',1)
if len(member_path) == 0:
return False
# Does the filter skip it explicitly?
if "skip_files" in file_filter:
if member_path in file_filter['skip_files']:
return False
# Include explicitly?
if "include_files" in file_filter:
if member_path in file_filter['include_files']:
return True
# Regular expression?
if "regexp" in file_filter:
if re.search(file_filter["regexp"],member_path):
return True
return False | [
"def",
"include_file",
"(",
"member",
",",
"file_filter",
")",
":",
"member_path",
"=",
"member",
".",
"name",
".",
"replace",
"(",
"'.'",
",",
"''",
",",
"1",
")",
"if",
"len",
"(",
"member_path",
")",
"==",
"0",
":",
"return",
"False",
"# Does the fi... | include_file will look at a path and determine
if it matches a regular expression from a level | [
"include_file",
"will",
"look",
"at",
"a",
"path",
"and",
"determine",
"if",
"it",
"matches",
"a",
"regular",
"expression",
"from",
"a",
"level"
] | 498c3433724b332f7493fec632d8daf479f47b82 | https://github.com/singularityhub/singularity-python/blob/498c3433724b332f7493fec632d8daf479f47b82/singularity/analysis/reproduce/criteria.py#L27-L50 | train |
singularityhub/singularity-python | singularity/analysis/reproduce/criteria.py | is_root_owned | def is_root_owned(member):
'''assess if a file is root owned, meaning "root" or user/group
id of 0'''
if member.uid == 0 or member.gid == 0:
return True
elif member.uname == 'root' or member.gname == 'root':
return True
return False | python | def is_root_owned(member):
'''assess if a file is root owned, meaning "root" or user/group
id of 0'''
if member.uid == 0 or member.gid == 0:
return True
elif member.uname == 'root' or member.gname == 'root':
return True
return False | [
"def",
"is_root_owned",
"(",
"member",
")",
":",
"if",
"member",
".",
"uid",
"==",
"0",
"or",
"member",
".",
"gid",
"==",
"0",
":",
"return",
"True",
"elif",
"member",
".",
"uname",
"==",
"'root'",
"or",
"member",
".",
"gname",
"==",
"'root'",
":",
... | assess if a file is root owned, meaning "root" or user/group
id of 0 | [
"assess",
"if",
"a",
"file",
"is",
"root",
"owned",
"meaning",
"root",
"or",
"user",
"/",
"group",
"id",
"of",
"0"
] | 498c3433724b332f7493fec632d8daf479f47b82 | https://github.com/singularityhub/singularity-python/blob/498c3433724b332f7493fec632d8daf479f47b82/singularity/analysis/reproduce/criteria.py#L53-L60 | train |
singularityhub/singularity-python | singularity/analysis/reproduce/criteria.py | assess_content | def assess_content(member,file_filter):
'''Determine if the filter wants the file to be read for content.
In the case of yes, we would then want to add the content to the
hash and not the file object.
'''
member_path = member.name.replace('.','',1)
if len(member_path) == 0:
return False
# Does the filter skip it explicitly?
if "skip_files" in file_filter:
if member_path in file_filter['skip_files']:
return False
if "assess_content" in file_filter:
if member_path in file_filter['assess_content']:
return True
return False | python | def assess_content(member,file_filter):
'''Determine if the filter wants the file to be read for content.
In the case of yes, we would then want to add the content to the
hash and not the file object.
'''
member_path = member.name.replace('.','',1)
if len(member_path) == 0:
return False
# Does the filter skip it explicitly?
if "skip_files" in file_filter:
if member_path in file_filter['skip_files']:
return False
if "assess_content" in file_filter:
if member_path in file_filter['assess_content']:
return True
return False | [
"def",
"assess_content",
"(",
"member",
",",
"file_filter",
")",
":",
"member_path",
"=",
"member",
".",
"name",
".",
"replace",
"(",
"'.'",
",",
"''",
",",
"1",
")",
"if",
"len",
"(",
"member_path",
")",
"==",
"0",
":",
"return",
"False",
"# Does the ... | Determine if the filter wants the file to be read for content.
In the case of yes, we would then want to add the content to the
hash and not the file object. | [
"Determine",
"if",
"the",
"filter",
"wants",
"the",
"file",
"to",
"be",
"read",
"for",
"content",
".",
"In",
"the",
"case",
"of",
"yes",
"we",
"would",
"then",
"want",
"to",
"add",
"the",
"content",
"to",
"the",
"hash",
"and",
"not",
"the",
"file",
"... | 498c3433724b332f7493fec632d8daf479f47b82 | https://github.com/singularityhub/singularity-python/blob/498c3433724b332f7493fec632d8daf479f47b82/singularity/analysis/reproduce/criteria.py#L63-L81 | train |
singularityhub/singularity-python | singularity/analysis/reproduce/levels.py | get_custom_level | def get_custom_level(regexp=None,description=None,skip_files=None,include_files=None):
'''get_custom_level will generate a custom level for the user,
based on a regular expression. If used outside the context of tarsum, the user
can generate their own named and described filters.
:param regexp: must be defined, the file filter regular expression
:param description: optional description
'''
if regexp == None:
regexp = "."
if description is None:
description = "This is a custom filter generated by the user."
custom = {"description":description,
"regexp":regexp}
# Include extra files?
if include_files is not None:
if not isinstance(include_files,set):
include_files = set(include_files)
custom['include_files'] = include_files
# Skip files?
if skip_files is not None:
if not isinstance(skip_files,set):
skip_files = set(skip_files)
custom['skip_files'] = skip_files
return custom | python | def get_custom_level(regexp=None,description=None,skip_files=None,include_files=None):
'''get_custom_level will generate a custom level for the user,
based on a regular expression. If used outside the context of tarsum, the user
can generate their own named and described filters.
:param regexp: must be defined, the file filter regular expression
:param description: optional description
'''
if regexp == None:
regexp = "."
if description is None:
description = "This is a custom filter generated by the user."
custom = {"description":description,
"regexp":regexp}
# Include extra files?
if include_files is not None:
if not isinstance(include_files,set):
include_files = set(include_files)
custom['include_files'] = include_files
# Skip files?
if skip_files is not None:
if not isinstance(skip_files,set):
skip_files = set(skip_files)
custom['skip_files'] = skip_files
return custom | [
"def",
"get_custom_level",
"(",
"regexp",
"=",
"None",
",",
"description",
"=",
"None",
",",
"skip_files",
"=",
"None",
",",
"include_files",
"=",
"None",
")",
":",
"if",
"regexp",
"==",
"None",
":",
"regexp",
"=",
"\".\"",
"if",
"description",
"is",
"No... | get_custom_level will generate a custom level for the user,
based on a regular expression. If used outside the context of tarsum, the user
can generate their own named and described filters.
:param regexp: must be defined, the file filter regular expression
:param description: optional description | [
"get_custom_level",
"will",
"generate",
"a",
"custom",
"level",
"for",
"the",
"user",
"based",
"on",
"a",
"regular",
"expression",
".",
"If",
"used",
"outside",
"the",
"context",
"of",
"tarsum",
"the",
"user",
"can",
"generate",
"their",
"own",
"named",
"and... | 498c3433724b332f7493fec632d8daf479f47b82 | https://github.com/singularityhub/singularity-python/blob/498c3433724b332f7493fec632d8daf479f47b82/singularity/analysis/reproduce/levels.py#L34-L61 | train |
singularityhub/singularity-python | singularity/analysis/reproduce/levels.py | get_level | def get_level(level,version=None,include_files=None,skip_files=None):
'''get_level returns a single level, with option to customize files
added and skipped.
'''
levels = get_levels(version=version)
level_names = list(levels.keys())
if level.upper() in level_names:
level = levels[level]
else:
bot.warning("%s is not a valid level. Options are %s" %(level.upper(),
"\n".join(levels)))
return None
# Add additional files to skip or remove, if defined
if skip_files is not None:
level = modify_level(level,'skip_files',skip_files)
if include_files is not None:
level = modify_level(level,'include_files',include_files)
level = make_level_set(level)
return level | python | def get_level(level,version=None,include_files=None,skip_files=None):
'''get_level returns a single level, with option to customize files
added and skipped.
'''
levels = get_levels(version=version)
level_names = list(levels.keys())
if level.upper() in level_names:
level = levels[level]
else:
bot.warning("%s is not a valid level. Options are %s" %(level.upper(),
"\n".join(levels)))
return None
# Add additional files to skip or remove, if defined
if skip_files is not None:
level = modify_level(level,'skip_files',skip_files)
if include_files is not None:
level = modify_level(level,'include_files',include_files)
level = make_level_set(level)
return level | [
"def",
"get_level",
"(",
"level",
",",
"version",
"=",
"None",
",",
"include_files",
"=",
"None",
",",
"skip_files",
"=",
"None",
")",
":",
"levels",
"=",
"get_levels",
"(",
"version",
"=",
"version",
")",
"level_names",
"=",
"list",
"(",
"levels",
".",
... | get_level returns a single level, with option to customize files
added and skipped. | [
"get_level",
"returns",
"a",
"single",
"level",
"with",
"option",
"to",
"customize",
"files",
"added",
"and",
"skipped",
"."
] | 498c3433724b332f7493fec632d8daf479f47b82 | https://github.com/singularityhub/singularity-python/blob/498c3433724b332f7493fec632d8daf479f47b82/singularity/analysis/reproduce/levels.py#L64-L86 | train |
singularityhub/singularity-python | singularity/analysis/reproduce/levels.py | modify_level | def modify_level(level,field,values,append=True):
'''modify level is intended to add / modify a content type.
Default content type is list, meaning the entry is appended.
If you set append to False, the content will be overwritten
For any other content type, the entry is overwritten.
'''
field = field.lower()
valid_fields = ['regexp','skip_files','include_files']
if field not in valid_fields:
bot.warning("%s is not a valid field, skipping. Choices are %s" %(field,",".join(valid_fields)))
return level
if append:
if not isinstance(values,list):
values = [values]
if field in level:
level[field] = level[field] + values
else:
level[field] = values
else:
level[field] = values
level = make_level_set(level)
return level | python | def modify_level(level,field,values,append=True):
'''modify level is intended to add / modify a content type.
Default content type is list, meaning the entry is appended.
If you set append to False, the content will be overwritten
For any other content type, the entry is overwritten.
'''
field = field.lower()
valid_fields = ['regexp','skip_files','include_files']
if field not in valid_fields:
bot.warning("%s is not a valid field, skipping. Choices are %s" %(field,",".join(valid_fields)))
return level
if append:
if not isinstance(values,list):
values = [values]
if field in level:
level[field] = level[field] + values
else:
level[field] = values
else:
level[field] = values
level = make_level_set(level)
return level | [
"def",
"modify_level",
"(",
"level",
",",
"field",
",",
"values",
",",
"append",
"=",
"True",
")",
":",
"field",
"=",
"field",
".",
"lower",
"(",
")",
"valid_fields",
"=",
"[",
"'regexp'",
",",
"'skip_files'",
",",
"'include_files'",
"]",
"if",
"field",
... | modify level is intended to add / modify a content type.
Default content type is list, meaning the entry is appended.
If you set append to False, the content will be overwritten
For any other content type, the entry is overwritten. | [
"modify",
"level",
"is",
"intended",
"to",
"add",
"/",
"modify",
"a",
"content",
"type",
".",
"Default",
"content",
"type",
"is",
"list",
"meaning",
"the",
"entry",
"is",
"appended",
".",
"If",
"you",
"set",
"append",
"to",
"False",
"the",
"content",
"wi... | 498c3433724b332f7493fec632d8daf479f47b82 | https://github.com/singularityhub/singularity-python/blob/498c3433724b332f7493fec632d8daf479f47b82/singularity/analysis/reproduce/levels.py#L89-L112 | train |
singularityhub/singularity-python | singularity/analysis/reproduce/levels.py | get_levels | def get_levels(version=None):
'''get_levels returns a dictionary of levels (key) and values (dictionaries with
descriptions and regular expressions for files) for the user.
:param version: the version of singularity to use (default is 2.2)
:param include_files: files to add to the level, only relvant if
'''
valid_versions = ['2.3','2.2']
if version is None:
version = "2.3"
version = str(version)
if version not in valid_versions:
bot.error("Unsupported version %s, valid versions are %s" %(version,
",".join(valid_versions)))
levels_file = os.path.abspath(os.path.join(get_installdir(),
'analysis',
'reproduce',
'data',
'reproduce_levels.json'))
levels = read_json(levels_file)
if version == "2.2":
# Labels not added until 2.3
del levels['LABELS']
levels = make_levels_set(levels)
return levels | python | def get_levels(version=None):
'''get_levels returns a dictionary of levels (key) and values (dictionaries with
descriptions and regular expressions for files) for the user.
:param version: the version of singularity to use (default is 2.2)
:param include_files: files to add to the level, only relvant if
'''
valid_versions = ['2.3','2.2']
if version is None:
version = "2.3"
version = str(version)
if version not in valid_versions:
bot.error("Unsupported version %s, valid versions are %s" %(version,
",".join(valid_versions)))
levels_file = os.path.abspath(os.path.join(get_installdir(),
'analysis',
'reproduce',
'data',
'reproduce_levels.json'))
levels = read_json(levels_file)
if version == "2.2":
# Labels not added until 2.3
del levels['LABELS']
levels = make_levels_set(levels)
return levels | [
"def",
"get_levels",
"(",
"version",
"=",
"None",
")",
":",
"valid_versions",
"=",
"[",
"'2.3'",
",",
"'2.2'",
"]",
"if",
"version",
"is",
"None",
":",
"version",
"=",
"\"2.3\"",
"version",
"=",
"str",
"(",
"version",
")",
"if",
"version",
"not",
"in",... | get_levels returns a dictionary of levels (key) and values (dictionaries with
descriptions and regular expressions for files) for the user.
:param version: the version of singularity to use (default is 2.2)
:param include_files: files to add to the level, only relvant if | [
"get_levels",
"returns",
"a",
"dictionary",
"of",
"levels",
"(",
"key",
")",
"and",
"values",
"(",
"dictionaries",
"with",
"descriptions",
"and",
"regular",
"expressions",
"for",
"files",
")",
"for",
"the",
"user",
".",
":",
"param",
"version",
":",
"the",
... | 498c3433724b332f7493fec632d8daf479f47b82 | https://github.com/singularityhub/singularity-python/blob/498c3433724b332f7493fec632d8daf479f47b82/singularity/analysis/reproduce/levels.py#L115-L143 | train |
singularityhub/singularity-python | singularity/analysis/reproduce/levels.py | make_levels_set | def make_levels_set(levels):
'''make set efficient will convert all lists of items
in levels to a set to speed up operations'''
for level_key,level_filters in levels.items():
levels[level_key] = make_level_set(level_filters)
return levels | python | def make_levels_set(levels):
'''make set efficient will convert all lists of items
in levels to a set to speed up operations'''
for level_key,level_filters in levels.items():
levels[level_key] = make_level_set(level_filters)
return levels | [
"def",
"make_levels_set",
"(",
"levels",
")",
":",
"for",
"level_key",
",",
"level_filters",
"in",
"levels",
".",
"items",
"(",
")",
":",
"levels",
"[",
"level_key",
"]",
"=",
"make_level_set",
"(",
"level_filters",
")",
"return",
"levels"
] | make set efficient will convert all lists of items
in levels to a set to speed up operations | [
"make",
"set",
"efficient",
"will",
"convert",
"all",
"lists",
"of",
"items",
"in",
"levels",
"to",
"a",
"set",
"to",
"speed",
"up",
"operations"
] | 498c3433724b332f7493fec632d8daf479f47b82 | https://github.com/singularityhub/singularity-python/blob/498c3433724b332f7493fec632d8daf479f47b82/singularity/analysis/reproduce/levels.py#L146-L151 | train |
singularityhub/singularity-python | singularity/analysis/reproduce/levels.py | make_level_set | def make_level_set(level):
'''make level set will convert one level into
a set'''
new_level = dict()
for key,value in level.items():
if isinstance(value,list):
new_level[key] = set(value)
else:
new_level[key] = value
return new_level | python | def make_level_set(level):
'''make level set will convert one level into
a set'''
new_level = dict()
for key,value in level.items():
if isinstance(value,list):
new_level[key] = set(value)
else:
new_level[key] = value
return new_level | [
"def",
"make_level_set",
"(",
"level",
")",
":",
"new_level",
"=",
"dict",
"(",
")",
"for",
"key",
",",
"value",
"in",
"level",
".",
"items",
"(",
")",
":",
"if",
"isinstance",
"(",
"value",
",",
"list",
")",
":",
"new_level",
"[",
"key",
"]",
"=",... | make level set will convert one level into
a set | [
"make",
"level",
"set",
"will",
"convert",
"one",
"level",
"into",
"a",
"set"
] | 498c3433724b332f7493fec632d8daf479f47b82 | https://github.com/singularityhub/singularity-python/blob/498c3433724b332f7493fec632d8daf479f47b82/singularity/analysis/reproduce/levels.py#L155-L164 | train |
singularityhub/singularity-python | singularity/analysis/reproduce/utils.py | extract_guts | def extract_guts(image_path,
tar,
file_filter=None,
tag_root=True,
include_sizes=True):
'''extract the file guts from an in memory tarfile. The file is not closed.
This should not be done for large images.
'''
if file_filter is None:
file_filter = get_level('IDENTICAL')
results = dict()
digest = dict()
allfiles = []
if tag_root:
roots = dict()
if include_sizes:
sizes = dict()
for member in tar:
member_name = member.name.replace('.','',1)
allfiles.append(member_name)
included = False
if member.isdir() or member.issym():
continue
elif assess_content(member,file_filter):
digest[member_name] = extract_content(image_path, member.name, return_hash=True)
included = True
elif include_file(member,file_filter):
hasher = hashlib.md5()
buf = member.tobuf()
hasher.update(buf)
digest[member_name] = hasher.hexdigest()
included = True
if included:
if include_sizes:
sizes[member_name] = member.size
if tag_root:
roots[member_name] = is_root_owned(member)
results['all'] = allfiles
results['hashes'] = digest
if include_sizes:
results['sizes'] = sizes
if tag_root:
results['root_owned'] = roots
return results | python | def extract_guts(image_path,
tar,
file_filter=None,
tag_root=True,
include_sizes=True):
'''extract the file guts from an in memory tarfile. The file is not closed.
This should not be done for large images.
'''
if file_filter is None:
file_filter = get_level('IDENTICAL')
results = dict()
digest = dict()
allfiles = []
if tag_root:
roots = dict()
if include_sizes:
sizes = dict()
for member in tar:
member_name = member.name.replace('.','',1)
allfiles.append(member_name)
included = False
if member.isdir() or member.issym():
continue
elif assess_content(member,file_filter):
digest[member_name] = extract_content(image_path, member.name, return_hash=True)
included = True
elif include_file(member,file_filter):
hasher = hashlib.md5()
buf = member.tobuf()
hasher.update(buf)
digest[member_name] = hasher.hexdigest()
included = True
if included:
if include_sizes:
sizes[member_name] = member.size
if tag_root:
roots[member_name] = is_root_owned(member)
results['all'] = allfiles
results['hashes'] = digest
if include_sizes:
results['sizes'] = sizes
if tag_root:
results['root_owned'] = roots
return results | [
"def",
"extract_guts",
"(",
"image_path",
",",
"tar",
",",
"file_filter",
"=",
"None",
",",
"tag_root",
"=",
"True",
",",
"include_sizes",
"=",
"True",
")",
":",
"if",
"file_filter",
"is",
"None",
":",
"file_filter",
"=",
"get_level",
"(",
"'IDENTICAL'",
"... | extract the file guts from an in memory tarfile. The file is not closed.
This should not be done for large images. | [
"extract",
"the",
"file",
"guts",
"from",
"an",
"in",
"memory",
"tarfile",
".",
"The",
"file",
"is",
"not",
"closed",
".",
"This",
"should",
"not",
"be",
"done",
"for",
"large",
"images",
"."
] | 498c3433724b332f7493fec632d8daf479f47b82 | https://github.com/singularityhub/singularity-python/blob/498c3433724b332f7493fec632d8daf479f47b82/singularity/analysis/reproduce/utils.py#L39-L88 | train |
singularityhub/singularity-python | singularity/analysis/reproduce/utils.py | get_memory_tar | def get_memory_tar(image_path):
'''get an in memory tar of an image. Use carefully, not as reliable
as get_image_tar
'''
byte_array = Client.image.export(image_path)
file_object = io.BytesIO(byte_array)
tar = tarfile.open(mode="r|*", fileobj=file_object)
return (file_object,tar) | python | def get_memory_tar(image_path):
'''get an in memory tar of an image. Use carefully, not as reliable
as get_image_tar
'''
byte_array = Client.image.export(image_path)
file_object = io.BytesIO(byte_array)
tar = tarfile.open(mode="r|*", fileobj=file_object)
return (file_object,tar) | [
"def",
"get_memory_tar",
"(",
"image_path",
")",
":",
"byte_array",
"=",
"Client",
".",
"image",
".",
"export",
"(",
"image_path",
")",
"file_object",
"=",
"io",
".",
"BytesIO",
"(",
"byte_array",
")",
"tar",
"=",
"tarfile",
".",
"open",
"(",
"mode",
"="... | get an in memory tar of an image. Use carefully, not as reliable
as get_image_tar | [
"get",
"an",
"in",
"memory",
"tar",
"of",
"an",
"image",
".",
"Use",
"carefully",
"not",
"as",
"reliable",
"as",
"get_image_tar"
] | 498c3433724b332f7493fec632d8daf479f47b82 | https://github.com/singularityhub/singularity-python/blob/498c3433724b332f7493fec632d8daf479f47b82/singularity/analysis/reproduce/utils.py#L92-L99 | train |
singularityhub/singularity-python | singularity/analysis/reproduce/utils.py | get_image_tar | def get_image_tar(image_path):
'''get an image tar, either written in memory or to
the file system. file_obj will either be the file object,
or the file itself.
'''
bot.debug('Generate file system tar...')
file_obj = Client.image.export(image_path=image_path)
if file_obj is None:
bot.error("Error generating tar, exiting.")
sys.exit(1)
tar = tarfile.open(file_obj)
return file_obj, tar | python | def get_image_tar(image_path):
'''get an image tar, either written in memory or to
the file system. file_obj will either be the file object,
or the file itself.
'''
bot.debug('Generate file system tar...')
file_obj = Client.image.export(image_path=image_path)
if file_obj is None:
bot.error("Error generating tar, exiting.")
sys.exit(1)
tar = tarfile.open(file_obj)
return file_obj, tar | [
"def",
"get_image_tar",
"(",
"image_path",
")",
":",
"bot",
".",
"debug",
"(",
"'Generate file system tar...'",
")",
"file_obj",
"=",
"Client",
".",
"image",
".",
"export",
"(",
"image_path",
"=",
"image_path",
")",
"if",
"file_obj",
"is",
"None",
":",
"bot"... | get an image tar, either written in memory or to
the file system. file_obj will either be the file object,
or the file itself. | [
"get",
"an",
"image",
"tar",
"either",
"written",
"in",
"memory",
"or",
"to",
"the",
"file",
"system",
".",
"file_obj",
"will",
"either",
"be",
"the",
"file",
"object",
"or",
"the",
"file",
"itself",
"."
] | 498c3433724b332f7493fec632d8daf479f47b82 | https://github.com/singularityhub/singularity-python/blob/498c3433724b332f7493fec632d8daf479f47b82/singularity/analysis/reproduce/utils.py#L102-L113 | train |
singularityhub/singularity-python | singularity/analysis/reproduce/utils.py | delete_image_tar | def delete_image_tar(file_obj, tar):
'''delete image tar will close a file object (if extracted into
memory) or delete from the file system (if saved to disk)'''
try:
file_obj.close()
except:
tar.close()
if os.path.exists(file_obj):
os.remove(file_obj)
deleted = True
bot.debug('Deleted temporary tar.')
return deleted | python | def delete_image_tar(file_obj, tar):
'''delete image tar will close a file object (if extracted into
memory) or delete from the file system (if saved to disk)'''
try:
file_obj.close()
except:
tar.close()
if os.path.exists(file_obj):
os.remove(file_obj)
deleted = True
bot.debug('Deleted temporary tar.')
return deleted | [
"def",
"delete_image_tar",
"(",
"file_obj",
",",
"tar",
")",
":",
"try",
":",
"file_obj",
".",
"close",
"(",
")",
"except",
":",
"tar",
".",
"close",
"(",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"file_obj",
")",
":",
"os",
".",
"remove",
... | delete image tar will close a file object (if extracted into
memory) or delete from the file system (if saved to disk) | [
"delete",
"image",
"tar",
"will",
"close",
"a",
"file",
"object",
"(",
"if",
"extracted",
"into",
"memory",
")",
"or",
"delete",
"from",
"the",
"file",
"system",
"(",
"if",
"saved",
"to",
"disk",
")"
] | 498c3433724b332f7493fec632d8daf479f47b82 | https://github.com/singularityhub/singularity-python/blob/498c3433724b332f7493fec632d8daf479f47b82/singularity/analysis/reproduce/utils.py#L116-L127 | train |
singularityhub/singularity-python | singularity/analysis/reproduce/utils.py | extract_content | def extract_content(image_path, member_name, return_hash=False):
'''extract_content will extract content from an image using cat.
If hash=True, a hash sum is returned instead
'''
if member_name.startswith('./'):
member_name = member_name.replace('.','',1)
if return_hash:
hashy = hashlib.md5()
try:
content = Client.execute(image_path,'cat %s' %(member_name))
except:
return None
if not isinstance(content,bytes):
content = content.encode('utf-8')
content = bytes(content)
# If permissions don't allow read, return None
if len(content) == 0:
return None
if return_hash:
hashy.update(content)
return hashy.hexdigest()
return content | python | def extract_content(image_path, member_name, return_hash=False):
'''extract_content will extract content from an image using cat.
If hash=True, a hash sum is returned instead
'''
if member_name.startswith('./'):
member_name = member_name.replace('.','',1)
if return_hash:
hashy = hashlib.md5()
try:
content = Client.execute(image_path,'cat %s' %(member_name))
except:
return None
if not isinstance(content,bytes):
content = content.encode('utf-8')
content = bytes(content)
# If permissions don't allow read, return None
if len(content) == 0:
return None
if return_hash:
hashy.update(content)
return hashy.hexdigest()
return content | [
"def",
"extract_content",
"(",
"image_path",
",",
"member_name",
",",
"return_hash",
"=",
"False",
")",
":",
"if",
"member_name",
".",
"startswith",
"(",
"'./'",
")",
":",
"member_name",
"=",
"member_name",
".",
"replace",
"(",
"'.'",
",",
"''",
",",
"1",
... | extract_content will extract content from an image using cat.
If hash=True, a hash sum is returned instead | [
"extract_content",
"will",
"extract",
"content",
"from",
"an",
"image",
"using",
"cat",
".",
"If",
"hash",
"=",
"True",
"a",
"hash",
"sum",
"is",
"returned",
"instead"
] | 498c3433724b332f7493fec632d8daf479f47b82 | https://github.com/singularityhub/singularity-python/blob/498c3433724b332f7493fec632d8daf479f47b82/singularity/analysis/reproduce/utils.py#L130-L154 | train |
singularityhub/singularity-python | singularity/build/main.py | run_build | def run_build(build_dir, params, verbose=True):
'''run_build takes a build directory and params dictionary, and does the following:
- downloads repo to a temporary directory
- changes branch or commit, if needed
- creates and bootstraps singularity image from Singularity file
- returns a dictionary with:
image (path), metadata (dict)
The following must be included in params:
spec_file, repo_url, branch, commit
'''
# Download the repository
download_repo(repo_url=params['repo_url'],
destination=build_dir)
os.chdir(build_dir)
if params['branch'] != None:
bot.info('Checking out branch %s' %params['branch'])
os.system('git checkout %s' %(params['branch']))
else:
params['branch'] = "master"
# Set the debug level
Client.debug = params['debug']
# Commit
if params['commit'] not in [None,'']:
bot.info('Checking out commit %s' %params['commit'])
os.system('git checkout %s .' %(params['commit']))
# From here on out commit is used as a unique id, if we don't have one, we use current
else:
params['commit'] = os.popen('git log -n 1 --pretty=format:"%H"').read()
bot.warning("commit not specified, setting to current %s" %params['commit'])
# Dump some params for the builder, in case it fails after this
passing_params = "/tmp/params.pkl"
pickle.dump(params, open(passing_params,'wb'))
# Now look for spec file
if os.path.exists(params['spec_file']):
bot.info("Found spec file %s in repository" %params['spec_file'])
# If the user has a symbolic link
if os.path.islink(params['spec_file']):
bot.info("%s is a symbolic link." %params['spec_file'])
params['spec_file'] = os.path.realpath(params['spec_file'])
# START TIMING
start_time = datetime.now()
# Secure Build
image = Client.build(recipe=params['spec_file'],
build_folder=build_dir,
isolated=True)
# Save has for metadata (also is image name)
version = get_image_file_hash(image)
params['version'] = version
pickle.dump(params, open(passing_params,'wb'))
# Rename image to be hash
finished_image = "%s/%s.simg" %(os.path.dirname(image), version)
image = shutil.move(image, finished_image)
final_time = (datetime.now() - start_time).seconds
bot.info("Final time of build %s seconds." %final_time)
# Did the container build successfully?
test_result = test_container(image)
if test_result['return_code'] != 0:
bot.error("Image failed to build, cancelling.")
sys.exit(1)
# Get singularity version
singularity_version = Client.version()
Client.debug = False
inspect = Client.inspect(image) # this is a string
Client.debug = params['debug']
# Get information on apps
Client.debug = False
app_names = Client.apps(image)
Client.debug = params['debug']
apps = extract_apps(image, app_names)
metrics = {'build_time_seconds': final_time,
'singularity_version': singularity_version,
'singularity_python_version': singularity_python_version,
'inspect': inspect,
'version': version,
'apps': apps}
output = {'image':image,
'metadata':metrics,
'params':params }
return output
else:
# Tell the user what is actually there
present_files = glob("*")
bot.error("Build file %s not found in repository" %params['spec_file'])
bot.info("Found files are %s" %"\n".join(present_files))
# Params have been exported, will be found by log
sys.exit(1) | python | def run_build(build_dir, params, verbose=True):
'''run_build takes a build directory and params dictionary, and does the following:
- downloads repo to a temporary directory
- changes branch or commit, if needed
- creates and bootstraps singularity image from Singularity file
- returns a dictionary with:
image (path), metadata (dict)
The following must be included in params:
spec_file, repo_url, branch, commit
'''
# Download the repository
download_repo(repo_url=params['repo_url'],
destination=build_dir)
os.chdir(build_dir)
if params['branch'] != None:
bot.info('Checking out branch %s' %params['branch'])
os.system('git checkout %s' %(params['branch']))
else:
params['branch'] = "master"
# Set the debug level
Client.debug = params['debug']
# Commit
if params['commit'] not in [None,'']:
bot.info('Checking out commit %s' %params['commit'])
os.system('git checkout %s .' %(params['commit']))
# From here on out commit is used as a unique id, if we don't have one, we use current
else:
params['commit'] = os.popen('git log -n 1 --pretty=format:"%H"').read()
bot.warning("commit not specified, setting to current %s" %params['commit'])
# Dump some params for the builder, in case it fails after this
passing_params = "/tmp/params.pkl"
pickle.dump(params, open(passing_params,'wb'))
# Now look for spec file
if os.path.exists(params['spec_file']):
bot.info("Found spec file %s in repository" %params['spec_file'])
# If the user has a symbolic link
if os.path.islink(params['spec_file']):
bot.info("%s is a symbolic link." %params['spec_file'])
params['spec_file'] = os.path.realpath(params['spec_file'])
# START TIMING
start_time = datetime.now()
# Secure Build
image = Client.build(recipe=params['spec_file'],
build_folder=build_dir,
isolated=True)
# Save has for metadata (also is image name)
version = get_image_file_hash(image)
params['version'] = version
pickle.dump(params, open(passing_params,'wb'))
# Rename image to be hash
finished_image = "%s/%s.simg" %(os.path.dirname(image), version)
image = shutil.move(image, finished_image)
final_time = (datetime.now() - start_time).seconds
bot.info("Final time of build %s seconds." %final_time)
# Did the container build successfully?
test_result = test_container(image)
if test_result['return_code'] != 0:
bot.error("Image failed to build, cancelling.")
sys.exit(1)
# Get singularity version
singularity_version = Client.version()
Client.debug = False
inspect = Client.inspect(image) # this is a string
Client.debug = params['debug']
# Get information on apps
Client.debug = False
app_names = Client.apps(image)
Client.debug = params['debug']
apps = extract_apps(image, app_names)
metrics = {'build_time_seconds': final_time,
'singularity_version': singularity_version,
'singularity_python_version': singularity_python_version,
'inspect': inspect,
'version': version,
'apps': apps}
output = {'image':image,
'metadata':metrics,
'params':params }
return output
else:
# Tell the user what is actually there
present_files = glob("*")
bot.error("Build file %s not found in repository" %params['spec_file'])
bot.info("Found files are %s" %"\n".join(present_files))
# Params have been exported, will be found by log
sys.exit(1) | [
"def",
"run_build",
"(",
"build_dir",
",",
"params",
",",
"verbose",
"=",
"True",
")",
":",
"# Download the repository",
"download_repo",
"(",
"repo_url",
"=",
"params",
"[",
"'repo_url'",
"]",
",",
"destination",
"=",
"build_dir",
")",
"os",
".",
"chdir",
"... | run_build takes a build directory and params dictionary, and does the following:
- downloads repo to a temporary directory
- changes branch or commit, if needed
- creates and bootstraps singularity image from Singularity file
- returns a dictionary with:
image (path), metadata (dict)
The following must be included in params:
spec_file, repo_url, branch, commit | [
"run_build",
"takes",
"a",
"build",
"directory",
"and",
"params",
"dictionary",
"and",
"does",
"the",
"following",
":",
"-",
"downloads",
"repo",
"to",
"a",
"temporary",
"directory",
"-",
"changes",
"branch",
"or",
"commit",
"if",
"needed",
"-",
"creates",
"... | 498c3433724b332f7493fec632d8daf479f47b82 | https://github.com/singularityhub/singularity-python/blob/498c3433724b332f7493fec632d8daf479f47b82/singularity/build/main.py#L53-L166 | train |
singularityhub/singularity-python | singularity/build/main.py | send_build_data | def send_build_data(build_dir, data, secret,
response_url=None,clean_up=True):
'''finish build sends the build and data (response) to a response url
:param build_dir: the directory of the build
:response_url: where to send the response. If None, won't send
:param data: the data object to send as a post
:param clean_up: If true (default) removes build directory
'''
# Send with Authentication header
body = '%s|%s|%s|%s|%s' %(data['container_id'],
data['commit'],
data['branch'],
data['token'],
data['tag'])
signature = generate_header_signature(secret=secret,
payload=body,
request_type="push")
headers = {'Authorization': signature }
if response_url is not None:
finish = requests.post(response_url,data=data, headers=headers)
bot.debug("RECEIVE POST TO SINGULARITY HUB ---------------------")
bot.debug(finish.status_code)
bot.debug(finish.reason)
else:
bot.warning("response_url set to None, skipping sending of build.")
if clean_up == True:
shutil.rmtree(build_dir)
# Delay a bit, to give buffer between bringing instance down
time.sleep(20) | python | def send_build_data(build_dir, data, secret,
response_url=None,clean_up=True):
'''finish build sends the build and data (response) to a response url
:param build_dir: the directory of the build
:response_url: where to send the response. If None, won't send
:param data: the data object to send as a post
:param clean_up: If true (default) removes build directory
'''
# Send with Authentication header
body = '%s|%s|%s|%s|%s' %(data['container_id'],
data['commit'],
data['branch'],
data['token'],
data['tag'])
signature = generate_header_signature(secret=secret,
payload=body,
request_type="push")
headers = {'Authorization': signature }
if response_url is not None:
finish = requests.post(response_url,data=data, headers=headers)
bot.debug("RECEIVE POST TO SINGULARITY HUB ---------------------")
bot.debug(finish.status_code)
bot.debug(finish.reason)
else:
bot.warning("response_url set to None, skipping sending of build.")
if clean_up == True:
shutil.rmtree(build_dir)
# Delay a bit, to give buffer between bringing instance down
time.sleep(20) | [
"def",
"send_build_data",
"(",
"build_dir",
",",
"data",
",",
"secret",
",",
"response_url",
"=",
"None",
",",
"clean_up",
"=",
"True",
")",
":",
"# Send with Authentication header",
"body",
"=",
"'%s|%s|%s|%s|%s'",
"%",
"(",
"data",
"[",
"'container_id'",
"]",
... | finish build sends the build and data (response) to a response url
:param build_dir: the directory of the build
:response_url: where to send the response. If None, won't send
:param data: the data object to send as a post
:param clean_up: If true (default) removes build directory | [
"finish",
"build",
"sends",
"the",
"build",
"and",
"data",
"(",
"response",
")",
"to",
"a",
"response",
"url",
":",
"param",
"build_dir",
":",
"the",
"directory",
"of",
"the",
"build",
":",
"response_url",
":",
"where",
"to",
"send",
"the",
"response",
"... | 498c3433724b332f7493fec632d8daf479f47b82 | https://github.com/singularityhub/singularity-python/blob/498c3433724b332f7493fec632d8daf479f47b82/singularity/build/main.py#L170-L203 | train |
singularityhub/singularity-python | singularity/build/main.py | send_build_close | def send_build_close(params,response_url):
'''send build close sends a final response (post) to the server to bring down
the instance. The following must be included in params:
repo_url, logfile, repo_id, secret, log_file, token
'''
# Finally, package everything to send back to shub
response = {"log": json.dumps(params['log_file']),
"repo_url": params['repo_url'],
"logfile": params['logfile'],
"repo_id": params['repo_id'],
"container_id": params['container_id']}
body = '%s|%s|%s|%s|%s' %(params['container_id'],
params['commit'],
params['branch'],
params['token'],
params['tag'])
signature = generate_header_signature(secret=params['token'],
payload=body,
request_type="finish")
headers = {'Authorization': signature }
finish = requests.post(response_url,data=response, headers=headers)
bot.debug("FINISH POST TO SINGULARITY HUB ---------------------")
bot.debug(finish.status_code)
bot.debug(finish.reason)
return finish | python | def send_build_close(params,response_url):
'''send build close sends a final response (post) to the server to bring down
the instance. The following must be included in params:
repo_url, logfile, repo_id, secret, log_file, token
'''
# Finally, package everything to send back to shub
response = {"log": json.dumps(params['log_file']),
"repo_url": params['repo_url'],
"logfile": params['logfile'],
"repo_id": params['repo_id'],
"container_id": params['container_id']}
body = '%s|%s|%s|%s|%s' %(params['container_id'],
params['commit'],
params['branch'],
params['token'],
params['tag'])
signature = generate_header_signature(secret=params['token'],
payload=body,
request_type="finish")
headers = {'Authorization': signature }
finish = requests.post(response_url,data=response, headers=headers)
bot.debug("FINISH POST TO SINGULARITY HUB ---------------------")
bot.debug(finish.status_code)
bot.debug(finish.reason)
return finish | [
"def",
"send_build_close",
"(",
"params",
",",
"response_url",
")",
":",
"# Finally, package everything to send back to shub",
"response",
"=",
"{",
"\"log\"",
":",
"json",
".",
"dumps",
"(",
"params",
"[",
"'log_file'",
"]",
")",
",",
"\"repo_url\"",
":",
"params... | send build close sends a final response (post) to the server to bring down
the instance. The following must be included in params:
repo_url, logfile, repo_id, secret, log_file, token | [
"send",
"build",
"close",
"sends",
"a",
"final",
"response",
"(",
"post",
")",
"to",
"the",
"server",
"to",
"bring",
"down",
"the",
"instance",
".",
"The",
"following",
"must",
"be",
"included",
"in",
"params",
":"
] | 498c3433724b332f7493fec632d8daf479f47b82 | https://github.com/singularityhub/singularity-python/blob/498c3433724b332f7493fec632d8daf479f47b82/singularity/build/main.py#L208-L237 | train |
singularityhub/singularity-python | singularity/analysis/utils.py | remove_unicode_dict | def remove_unicode_dict(input_dict):
'''remove unicode keys and values from dict, encoding in utf8
'''
if isinstance(input_dict, collections.Mapping):
return dict(map(remove_unicode_dict, input_dict.iteritems()))
elif isinstance(input_dict, collections.Iterable):
return type(input_dict)(map(remove_unicode_dict, input_dict))
else:
return input_dict | python | def remove_unicode_dict(input_dict):
'''remove unicode keys and values from dict, encoding in utf8
'''
if isinstance(input_dict, collections.Mapping):
return dict(map(remove_unicode_dict, input_dict.iteritems()))
elif isinstance(input_dict, collections.Iterable):
return type(input_dict)(map(remove_unicode_dict, input_dict))
else:
return input_dict | [
"def",
"remove_unicode_dict",
"(",
"input_dict",
")",
":",
"if",
"isinstance",
"(",
"input_dict",
",",
"collections",
".",
"Mapping",
")",
":",
"return",
"dict",
"(",
"map",
"(",
"remove_unicode_dict",
",",
"input_dict",
".",
"iteritems",
"(",
")",
")",
")",... | remove unicode keys and values from dict, encoding in utf8 | [
"remove",
"unicode",
"keys",
"and",
"values",
"from",
"dict",
"encoding",
"in",
"utf8"
] | 498c3433724b332f7493fec632d8daf479f47b82 | https://github.com/singularityhub/singularity-python/blob/498c3433724b332f7493fec632d8daf479f47b82/singularity/analysis/utils.py#L39-L47 | train |
singularityhub/singularity-python | singularity/analysis/utils.py | update_dict | def update_dict(input_dict,key,value):
'''update_dict will update lists in a dictionary. If the key is not included,
if will add as new list. If it is, it will append.
:param input_dict: the dict to update
:param value: the value to update with
'''
if key in input_dict:
input_dict[key].append(value)
else:
input_dict[key] = [value]
return input_dict | python | def update_dict(input_dict,key,value):
'''update_dict will update lists in a dictionary. If the key is not included,
if will add as new list. If it is, it will append.
:param input_dict: the dict to update
:param value: the value to update with
'''
if key in input_dict:
input_dict[key].append(value)
else:
input_dict[key] = [value]
return input_dict | [
"def",
"update_dict",
"(",
"input_dict",
",",
"key",
",",
"value",
")",
":",
"if",
"key",
"in",
"input_dict",
":",
"input_dict",
"[",
"key",
"]",
".",
"append",
"(",
"value",
")",
"else",
":",
"input_dict",
"[",
"key",
"]",
"=",
"[",
"value",
"]",
... | update_dict will update lists in a dictionary. If the key is not included,
if will add as new list. If it is, it will append.
:param input_dict: the dict to update
:param value: the value to update with | [
"update_dict",
"will",
"update",
"lists",
"in",
"a",
"dictionary",
".",
"If",
"the",
"key",
"is",
"not",
"included",
"if",
"will",
"add",
"as",
"new",
"list",
".",
"If",
"it",
"is",
"it",
"will",
"append",
".",
":",
"param",
"input_dict",
":",
"the",
... | 498c3433724b332f7493fec632d8daf479f47b82 | https://github.com/singularityhub/singularity-python/blob/498c3433724b332f7493fec632d8daf479f47b82/singularity/analysis/utils.py#L50-L60 | train |
singularityhub/singularity-python | singularity/analysis/utils.py | update_dict_sum | def update_dict_sum(input_dict,key,increment=None,initial_value=None):
'''update_dict sum will increment a dictionary key
by an increment, and add a value of 0 if it doesn't exist
:param input_dict: the dict to update
:param increment: the value to increment by. Default is 1
:param initial_value: value to start with. Default is 0
'''
if increment == None:
increment = 1
if initial_value == None:
initial_value = 0
if key in input_dict:
input_dict[key] += increment
else:
input_dict[key] = initial_value + increment
return input_dict | python | def update_dict_sum(input_dict,key,increment=None,initial_value=None):
'''update_dict sum will increment a dictionary key
by an increment, and add a value of 0 if it doesn't exist
:param input_dict: the dict to update
:param increment: the value to increment by. Default is 1
:param initial_value: value to start with. Default is 0
'''
if increment == None:
increment = 1
if initial_value == None:
initial_value = 0
if key in input_dict:
input_dict[key] += increment
else:
input_dict[key] = initial_value + increment
return input_dict | [
"def",
"update_dict_sum",
"(",
"input_dict",
",",
"key",
",",
"increment",
"=",
"None",
",",
"initial_value",
"=",
"None",
")",
":",
"if",
"increment",
"==",
"None",
":",
"increment",
"=",
"1",
"if",
"initial_value",
"==",
"None",
":",
"initial_value",
"="... | update_dict sum will increment a dictionary key
by an increment, and add a value of 0 if it doesn't exist
:param input_dict: the dict to update
:param increment: the value to increment by. Default is 1
:param initial_value: value to start with. Default is 0 | [
"update_dict",
"sum",
"will",
"increment",
"a",
"dictionary",
"key",
"by",
"an",
"increment",
"and",
"add",
"a",
"value",
"of",
"0",
"if",
"it",
"doesn",
"t",
"exist",
":",
"param",
"input_dict",
":",
"the",
"dict",
"to",
"update",
":",
"param",
"increme... | 498c3433724b332f7493fec632d8daf479f47b82 | https://github.com/singularityhub/singularity-python/blob/498c3433724b332f7493fec632d8daf479f47b82/singularity/analysis/utils.py#L63-L80 | train |
singularityhub/singularity-python | singularity/analysis/metrics.py | information_coefficient | def information_coefficient(total1,total2,intersect):
'''a simple jacaard (information coefficient) to compare two lists of overlaps/diffs
'''
total = total1 + total2
return 2.0*len(intersect) / total | python | def information_coefficient(total1,total2,intersect):
'''a simple jacaard (information coefficient) to compare two lists of overlaps/diffs
'''
total = total1 + total2
return 2.0*len(intersect) / total | [
"def",
"information_coefficient",
"(",
"total1",
",",
"total2",
",",
"intersect",
")",
":",
"total",
"=",
"total1",
"+",
"total2",
"return",
"2.0",
"*",
"len",
"(",
"intersect",
")",
"/",
"total"
] | a simple jacaard (information coefficient) to compare two lists of overlaps/diffs | [
"a",
"simple",
"jacaard",
"(",
"information",
"coefficient",
")",
"to",
"compare",
"two",
"lists",
"of",
"overlaps",
"/",
"diffs"
] | 498c3433724b332f7493fec632d8daf479f47b82 | https://github.com/singularityhub/singularity-python/blob/498c3433724b332f7493fec632d8daf479f47b82/singularity/analysis/metrics.py#L34-L38 | train |
singularityhub/singularity-python | singularity/analysis/metrics.py | RSA | def RSA(m1,m2):
'''RSA analysis will compare the similarity of two matrices
'''
from scipy.stats import pearsonr
import scipy.linalg
import numpy
# This will take the diagonal of each matrix (and the other half is changed to nan) and flatten to vector
vectorm1 = m1.mask(numpy.triu(numpy.ones(m1.shape)).astype(numpy.bool)).values.flatten()
vectorm2 = m2.mask(numpy.triu(numpy.ones(m2.shape)).astype(numpy.bool)).values.flatten()
# Now remove the nans
m1defined = numpy.argwhere(~numpy.isnan(numpy.array(vectorm1,dtype=float)))
m2defined = numpy.argwhere(~numpy.isnan(numpy.array(vectorm2,dtype=float)))
idx = numpy.intersect1d(m1defined,m2defined)
return pearsonr(vectorm1[idx],vectorm2[idx])[0] | python | def RSA(m1,m2):
'''RSA analysis will compare the similarity of two matrices
'''
from scipy.stats import pearsonr
import scipy.linalg
import numpy
# This will take the diagonal of each matrix (and the other half is changed to nan) and flatten to vector
vectorm1 = m1.mask(numpy.triu(numpy.ones(m1.shape)).astype(numpy.bool)).values.flatten()
vectorm2 = m2.mask(numpy.triu(numpy.ones(m2.shape)).astype(numpy.bool)).values.flatten()
# Now remove the nans
m1defined = numpy.argwhere(~numpy.isnan(numpy.array(vectorm1,dtype=float)))
m2defined = numpy.argwhere(~numpy.isnan(numpy.array(vectorm2,dtype=float)))
idx = numpy.intersect1d(m1defined,m2defined)
return pearsonr(vectorm1[idx],vectorm2[idx])[0] | [
"def",
"RSA",
"(",
"m1",
",",
"m2",
")",
":",
"from",
"scipy",
".",
"stats",
"import",
"pearsonr",
"import",
"scipy",
".",
"linalg",
"import",
"numpy",
"# This will take the diagonal of each matrix (and the other half is changed to nan) and flatten to vector",
"vectorm1",
... | RSA analysis will compare the similarity of two matrices | [
"RSA",
"analysis",
"will",
"compare",
"the",
"similarity",
"of",
"two",
"matrices"
] | 498c3433724b332f7493fec632d8daf479f47b82 | https://github.com/singularityhub/singularity-python/blob/498c3433724b332f7493fec632d8daf479f47b82/singularity/analysis/metrics.py#L42-L56 | train |
singularityhub/singularity-python | singularity/build/google.py | get_google_service | def get_google_service(service_type=None,version=None):
'''
get_url will use the requests library to get a url
:param service_type: the service to get (default is storage)
:param version: version to use (default is v1)
'''
if service_type == None:
service_type = "storage"
if version == None:
version = "v1"
credentials = GoogleCredentials.get_application_default()
return build(service_type, version, credentials=credentials) | python | def get_google_service(service_type=None,version=None):
'''
get_url will use the requests library to get a url
:param service_type: the service to get (default is storage)
:param version: version to use (default is v1)
'''
if service_type == None:
service_type = "storage"
if version == None:
version = "v1"
credentials = GoogleCredentials.get_application_default()
return build(service_type, version, credentials=credentials) | [
"def",
"get_google_service",
"(",
"service_type",
"=",
"None",
",",
"version",
"=",
"None",
")",
":",
"if",
"service_type",
"==",
"None",
":",
"service_type",
"=",
"\"storage\"",
"if",
"version",
"==",
"None",
":",
"version",
"=",
"\"v1\"",
"credentials",
"=... | get_url will use the requests library to get a url
:param service_type: the service to get (default is storage)
:param version: version to use (default is v1) | [
"get_url",
"will",
"use",
"the",
"requests",
"library",
"to",
"get",
"a",
"url",
":",
"param",
"service_type",
":",
"the",
"service",
"to",
"get",
"(",
"default",
"is",
"storage",
")",
":",
"param",
"version",
":",
"version",
"to",
"use",
"(",
"default",... | 498c3433724b332f7493fec632d8daf479f47b82 | https://github.com/singularityhub/singularity-python/blob/498c3433724b332f7493fec632d8daf479f47b82/singularity/build/google.py#L63-L75 | train |
singularityhub/singularity-python | singularity/build/google.py | upload_file | def upload_file(storage_service,bucket,bucket_path,file_name,verbose=True):
'''get_folder will return the folder with folder_name, and if create=True,
will create it if not found. If folder is found or created, the metadata is
returned, otherwise None is returned
:param storage_service: the drive_service created from get_storage_service
:param bucket: the bucket object from get_bucket
:param file_name: the name of the file to upload
:param bucket_path: the path to upload to
'''
# Set up path on bucket
upload_path = "%s/%s" %(bucket['id'],bucket_path)
if upload_path[-1] != '/':
upload_path = "%s/" %(upload_path)
upload_path = "%s%s" %(upload_path,os.path.basename(file_name))
body = {'name': upload_path }
# Create media object with correct mimetype
if os.path.exists(file_name):
mimetype = sniff_extension(file_name,verbose=verbose)
media = http.MediaFileUpload(file_name,
mimetype=mimetype,
resumable=True)
request = storage_service.objects().insert(bucket=bucket['id'],
body=body,
predefinedAcl="publicRead",
media_body=media)
result = request.execute()
return result
bot.warning('%s requested for upload does not exist, skipping' %file_name) | python | def upload_file(storage_service,bucket,bucket_path,file_name,verbose=True):
'''get_folder will return the folder with folder_name, and if create=True,
will create it if not found. If folder is found or created, the metadata is
returned, otherwise None is returned
:param storage_service: the drive_service created from get_storage_service
:param bucket: the bucket object from get_bucket
:param file_name: the name of the file to upload
:param bucket_path: the path to upload to
'''
# Set up path on bucket
upload_path = "%s/%s" %(bucket['id'],bucket_path)
if upload_path[-1] != '/':
upload_path = "%s/" %(upload_path)
upload_path = "%s%s" %(upload_path,os.path.basename(file_name))
body = {'name': upload_path }
# Create media object with correct mimetype
if os.path.exists(file_name):
mimetype = sniff_extension(file_name,verbose=verbose)
media = http.MediaFileUpload(file_name,
mimetype=mimetype,
resumable=True)
request = storage_service.objects().insert(bucket=bucket['id'],
body=body,
predefinedAcl="publicRead",
media_body=media)
result = request.execute()
return result
bot.warning('%s requested for upload does not exist, skipping' %file_name) | [
"def",
"upload_file",
"(",
"storage_service",
",",
"bucket",
",",
"bucket_path",
",",
"file_name",
",",
"verbose",
"=",
"True",
")",
":",
"# Set up path on bucket",
"upload_path",
"=",
"\"%s/%s\"",
"%",
"(",
"bucket",
"[",
"'id'",
"]",
",",
"bucket_path",
")",... | get_folder will return the folder with folder_name, and if create=True,
will create it if not found. If folder is found or created, the metadata is
returned, otherwise None is returned
:param storage_service: the drive_service created from get_storage_service
:param bucket: the bucket object from get_bucket
:param file_name: the name of the file to upload
:param bucket_path: the path to upload to | [
"get_folder",
"will",
"return",
"the",
"folder",
"with",
"folder_name",
"and",
"if",
"create",
"=",
"True",
"will",
"create",
"it",
"if",
"not",
"found",
".",
"If",
"folder",
"is",
"found",
"or",
"created",
"the",
"metadata",
"is",
"returned",
"otherwise",
... | 498c3433724b332f7493fec632d8daf479f47b82 | https://github.com/singularityhub/singularity-python/blob/498c3433724b332f7493fec632d8daf479f47b82/singularity/build/google.py#L105-L132 | train |
singularityhub/singularity-python | singularity/build/google.py | get_image_path | def get_image_path(repo_url, trailing_path):
'''get_image_path will determine an image path based on a repo url, removing
any token, and taking into account urls that end with .git.
:param repo_url: the repo url to parse:
:param trailing_path: the trailing path (commit then hash is common)
'''
repo_url = repo_url.split('@')[-1].strip()
if repo_url.endswith('.git'):
repo_url = repo_url[:-4]
return "%s/%s" %(re.sub('^http.+//www[.]','',repo_url), trailing_path) | python | def get_image_path(repo_url, trailing_path):
'''get_image_path will determine an image path based on a repo url, removing
any token, and taking into account urls that end with .git.
:param repo_url: the repo url to parse:
:param trailing_path: the trailing path (commit then hash is common)
'''
repo_url = repo_url.split('@')[-1].strip()
if repo_url.endswith('.git'):
repo_url = repo_url[:-4]
return "%s/%s" %(re.sub('^http.+//www[.]','',repo_url), trailing_path) | [
"def",
"get_image_path",
"(",
"repo_url",
",",
"trailing_path",
")",
":",
"repo_url",
"=",
"repo_url",
".",
"split",
"(",
"'@'",
")",
"[",
"-",
"1",
"]",
".",
"strip",
"(",
")",
"if",
"repo_url",
".",
"endswith",
"(",
"'.git'",
")",
":",
"repo_url",
... | get_image_path will determine an image path based on a repo url, removing
any token, and taking into account urls that end with .git.
:param repo_url: the repo url to parse:
:param trailing_path: the trailing path (commit then hash is common) | [
"get_image_path",
"will",
"determine",
"an",
"image",
"path",
"based",
"on",
"a",
"repo",
"url",
"removing",
"any",
"token",
"and",
"taking",
"into",
"account",
"urls",
"that",
"end",
"with",
".",
"git",
".",
":",
"param",
"repo_url",
":",
"the",
"repo",
... | 498c3433724b332f7493fec632d8daf479f47b82 | https://github.com/singularityhub/singularity-python/blob/498c3433724b332f7493fec632d8daf479f47b82/singularity/build/google.py#L147-L156 | train |
singularityhub/singularity-python | singularity/build/google.py | run_build | def run_build(logfile='/tmp/.shub-log'):
'''run_build will generate the Singularity build from a spec_file from a repo_url.
If no arguments are required, the metadata api is queried for the values.
:param build_dir: directory to do the build in. If not specified, will use temporary.
:param spec_file: the spec_file name to use, assumed to be in git repo
:param repo_url: the url to download the repo from
:param repo_id: the repo_id to uniquely identify the repo (in case name changes)
:param commit: the commit to checkout. If none provided, will use most recent.
:param bucket_name: the name of the bucket to send files to
:param verbose: print out extra details as we go (default True)
:param token: a token to send back to the server to authenticate the collection
:param secret: a secret to match to the correct container
:param response_url: the build url to send the response back to. Should also come
from metadata. If not specified, no response is sent
:param branch: the branch to checkout for the build.
:: note: this function is currently configured to work with Google Compute
Engine metadata api, and should (will) be customized if needed to work elsewhere
'''
# If we are building the image, this will not be set
go = get_build_metadata(key='dobuild')
if go == None:
sys.exit(0)
# If the user wants debug, this will be set
debug = True
enable_debug = get_build_metadata(key='debug')
if enable_debug == None:
debug = False
bot.info('DEBUG %s' %debug)
# Uaw /tmp for build directory
build_dir = tempfile.mkdtemp()
# Get variables from the instance metadata API
metadata = [{'key': 'repo_url', 'value': None },
{'key': 'repo_id', 'value': None },
{'key': 'response_url', 'value': None },
{'key': 'bucket_name', 'value': None },
{'key': 'tag', 'value': None },
{'key': 'container_id', 'value': None },
{'key': 'commit', 'value': None },
{'key': 'token', 'value': None},
{'key': 'branch', 'value': None },
{'key': 'spec_file', 'value': None},
{'key': 'logging_url', 'value': None },
{'key': 'logfile', 'value': logfile }]
# Obtain values from build
bot.log("BUILD PARAMETERS:")
params = get_build_params(metadata)
params['debug'] = debug
# Default spec file is Singularity
if params['spec_file'] == None:
params['spec_file'] = "Singularity"
if params['bucket_name'] == None:
params['bucket_name'] = "singularityhub"
if params['tag'] == None:
params['tag'] = "latest"
output = run_build_main(build_dir=build_dir,
params=params)
# Output includes:
finished_image = output['image']
metadata = output['metadata']
params = output['params']
# Upload image package files to Google Storage
if os.path.exists(finished_image):
bot.info("%s successfully built" %finished_image)
dest_dir = tempfile.mkdtemp(prefix='build')
# The path to the images on google drive will be the github url/commit folder
trailing_path = "%s/%s" %(params['commit'], params['version'])
image_path = get_image_path(params['repo_url'], trailing_path)
# commits are no longer unique
# storage is by commit
build_files = [finished_image]
bot.info("Sending image to storage:")
bot.info('\n'.join(build_files))
# Start the storage service, retrieve the bucket
storage_service = get_google_service() # default is "storage" "v1"
bucket = get_bucket(storage_service,params["bucket_name"])
# For each file, upload to storage
files = []
for build_file in build_files:
bot.info("Uploading %s to storage..." %build_file)
storage_file = upload_file(storage_service,
bucket=bucket,
bucket_path=image_path,
file_name=build_file)
files.append(storage_file)
# Finally, package everything to send back to shub
response = {"files": json.dumps(files),
"repo_url": params['repo_url'],
"commit": params['commit'],
"repo_id": params['repo_id'],
"branch": params['branch'],
"tag": params['tag'],
"container_id": params['container_id'],
"spec_file":params['spec_file'],
"token": params['token'],
"metadata": json.dumps(metadata)}
# Did the user specify a specific log file?
custom_logfile = get_build_metadata('logfile')
if custom_logfile is not None:
logfile = custom_logfile
response['logfile'] = logfile
# Send final build data to instance
send_build_data(build_dir=build_dir,
response_url=params['response_url'],
secret=params['token'],
data=response)
# Dump final params, for logger to retrieve
passing_params = "/tmp/params.pkl"
pickle.dump(params,open(passing_params,'wb')) | python | def run_build(logfile='/tmp/.shub-log'):
'''run_build will generate the Singularity build from a spec_file from a repo_url.
If no arguments are required, the metadata api is queried for the values.
:param build_dir: directory to do the build in. If not specified, will use temporary.
:param spec_file: the spec_file name to use, assumed to be in git repo
:param repo_url: the url to download the repo from
:param repo_id: the repo_id to uniquely identify the repo (in case name changes)
:param commit: the commit to checkout. If none provided, will use most recent.
:param bucket_name: the name of the bucket to send files to
:param verbose: print out extra details as we go (default True)
:param token: a token to send back to the server to authenticate the collection
:param secret: a secret to match to the correct container
:param response_url: the build url to send the response back to. Should also come
from metadata. If not specified, no response is sent
:param branch: the branch to checkout for the build.
:: note: this function is currently configured to work with Google Compute
Engine metadata api, and should (will) be customized if needed to work elsewhere
'''
# If we are building the image, this will not be set
go = get_build_metadata(key='dobuild')
if go == None:
sys.exit(0)
# If the user wants debug, this will be set
debug = True
enable_debug = get_build_metadata(key='debug')
if enable_debug == None:
debug = False
bot.info('DEBUG %s' %debug)
# Uaw /tmp for build directory
build_dir = tempfile.mkdtemp()
# Get variables from the instance metadata API
metadata = [{'key': 'repo_url', 'value': None },
{'key': 'repo_id', 'value': None },
{'key': 'response_url', 'value': None },
{'key': 'bucket_name', 'value': None },
{'key': 'tag', 'value': None },
{'key': 'container_id', 'value': None },
{'key': 'commit', 'value': None },
{'key': 'token', 'value': None},
{'key': 'branch', 'value': None },
{'key': 'spec_file', 'value': None},
{'key': 'logging_url', 'value': None },
{'key': 'logfile', 'value': logfile }]
# Obtain values from build
bot.log("BUILD PARAMETERS:")
params = get_build_params(metadata)
params['debug'] = debug
# Default spec file is Singularity
if params['spec_file'] == None:
params['spec_file'] = "Singularity"
if params['bucket_name'] == None:
params['bucket_name'] = "singularityhub"
if params['tag'] == None:
params['tag'] = "latest"
output = run_build_main(build_dir=build_dir,
params=params)
# Output includes:
finished_image = output['image']
metadata = output['metadata']
params = output['params']
# Upload image package files to Google Storage
if os.path.exists(finished_image):
bot.info("%s successfully built" %finished_image)
dest_dir = tempfile.mkdtemp(prefix='build')
# The path to the images on google drive will be the github url/commit folder
trailing_path = "%s/%s" %(params['commit'], params['version'])
image_path = get_image_path(params['repo_url'], trailing_path)
# commits are no longer unique
# storage is by commit
build_files = [finished_image]
bot.info("Sending image to storage:")
bot.info('\n'.join(build_files))
# Start the storage service, retrieve the bucket
storage_service = get_google_service() # default is "storage" "v1"
bucket = get_bucket(storage_service,params["bucket_name"])
# For each file, upload to storage
files = []
for build_file in build_files:
bot.info("Uploading %s to storage..." %build_file)
storage_file = upload_file(storage_service,
bucket=bucket,
bucket_path=image_path,
file_name=build_file)
files.append(storage_file)
# Finally, package everything to send back to shub
response = {"files": json.dumps(files),
"repo_url": params['repo_url'],
"commit": params['commit'],
"repo_id": params['repo_id'],
"branch": params['branch'],
"tag": params['tag'],
"container_id": params['container_id'],
"spec_file":params['spec_file'],
"token": params['token'],
"metadata": json.dumps(metadata)}
# Did the user specify a specific log file?
custom_logfile = get_build_metadata('logfile')
if custom_logfile is not None:
logfile = custom_logfile
response['logfile'] = logfile
# Send final build data to instance
send_build_data(build_dir=build_dir,
response_url=params['response_url'],
secret=params['token'],
data=response)
# Dump final params, for logger to retrieve
passing_params = "/tmp/params.pkl"
pickle.dump(params,open(passing_params,'wb')) | [
"def",
"run_build",
"(",
"logfile",
"=",
"'/tmp/.shub-log'",
")",
":",
"# If we are building the image, this will not be set",
"go",
"=",
"get_build_metadata",
"(",
"key",
"=",
"'dobuild'",
")",
"if",
"go",
"==",
"None",
":",
"sys",
".",
"exit",
"(",
"0",
")",
... | run_build will generate the Singularity build from a spec_file from a repo_url.
If no arguments are required, the metadata api is queried for the values.
:param build_dir: directory to do the build in. If not specified, will use temporary.
:param spec_file: the spec_file name to use, assumed to be in git repo
:param repo_url: the url to download the repo from
:param repo_id: the repo_id to uniquely identify the repo (in case name changes)
:param commit: the commit to checkout. If none provided, will use most recent.
:param bucket_name: the name of the bucket to send files to
:param verbose: print out extra details as we go (default True)
:param token: a token to send back to the server to authenticate the collection
:param secret: a secret to match to the correct container
:param response_url: the build url to send the response back to. Should also come
from metadata. If not specified, no response is sent
:param branch: the branch to checkout for the build.
:: note: this function is currently configured to work with Google Compute
Engine metadata api, and should (will) be customized if needed to work elsewhere | [
"run_build",
"will",
"generate",
"the",
"Singularity",
"build",
"from",
"a",
"spec_file",
"from",
"a",
"repo_url",
"."
] | 498c3433724b332f7493fec632d8daf479f47b82 | https://github.com/singularityhub/singularity-python/blob/498c3433724b332f7493fec632d8daf479f47b82/singularity/build/google.py#L160-L291 | train |
singularityhub/singularity-python | singularity/build/google.py | finish_build | def finish_build(verbose=True):
'''finish_build will finish the build by way of sending the log to the same bucket.
the params are loaded from the previous function that built the image, expected in
$HOME/params.pkl
:: note: this function is currently configured to work with Google Compute
Engine metadata api, and should (will) be customized if needed to work elsewhere
'''
# If we are building the image, this will not be set
go = get_build_metadata(key='dobuild')
if go == None:
sys.exit(0)
# Load metadata
passing_params = "/tmp/params.pkl"
params = pickle.load(open(passing_params,'rb'))
# Start the storage service, retrieve the bucket
storage_service = get_google_service()
bucket = get_bucket(storage_service,params['bucket_name'])
# If version isn't in params, build failed
version = 'error-%s' % str(uuid.uuid4())
if 'version' in params:
version = params['version']
trailing_path = "%s/%s" %(params['commit'], version)
image_path = get_image_path(params['repo_url'], trailing_path)
# Upload the log file
params['log_file'] = upload_file(storage_service,
bucket=bucket,
bucket_path=image_path,
file_name=params['logfile'])
# Close up shop
send_build_close(params=params,
response_url=params['logging_url']) | python | def finish_build(verbose=True):
'''finish_build will finish the build by way of sending the log to the same bucket.
the params are loaded from the previous function that built the image, expected in
$HOME/params.pkl
:: note: this function is currently configured to work with Google Compute
Engine metadata api, and should (will) be customized if needed to work elsewhere
'''
# If we are building the image, this will not be set
go = get_build_metadata(key='dobuild')
if go == None:
sys.exit(0)
# Load metadata
passing_params = "/tmp/params.pkl"
params = pickle.load(open(passing_params,'rb'))
# Start the storage service, retrieve the bucket
storage_service = get_google_service()
bucket = get_bucket(storage_service,params['bucket_name'])
# If version isn't in params, build failed
version = 'error-%s' % str(uuid.uuid4())
if 'version' in params:
version = params['version']
trailing_path = "%s/%s" %(params['commit'], version)
image_path = get_image_path(params['repo_url'], trailing_path)
# Upload the log file
params['log_file'] = upload_file(storage_service,
bucket=bucket,
bucket_path=image_path,
file_name=params['logfile'])
# Close up shop
send_build_close(params=params,
response_url=params['logging_url']) | [
"def",
"finish_build",
"(",
"verbose",
"=",
"True",
")",
":",
"# If we are building the image, this will not be set",
"go",
"=",
"get_build_metadata",
"(",
"key",
"=",
"'dobuild'",
")",
"if",
"go",
"==",
"None",
":",
"sys",
".",
"exit",
"(",
"0",
")",
"# Load ... | finish_build will finish the build by way of sending the log to the same bucket.
the params are loaded from the previous function that built the image, expected in
$HOME/params.pkl
:: note: this function is currently configured to work with Google Compute
Engine metadata api, and should (will) be customized if needed to work elsewhere | [
"finish_build",
"will",
"finish",
"the",
"build",
"by",
"way",
"of",
"sending",
"the",
"log",
"to",
"the",
"same",
"bucket",
".",
"the",
"params",
"are",
"loaded",
"from",
"the",
"previous",
"function",
"that",
"built",
"the",
"image",
"expected",
"in",
"$... | 498c3433724b332f7493fec632d8daf479f47b82 | https://github.com/singularityhub/singularity-python/blob/498c3433724b332f7493fec632d8daf479f47b82/singularity/build/google.py#L295-L330 | train |
singularityhub/singularity-python | singularity/build/google.py | get_build_metadata | def get_build_metadata(key):
'''get_build_metadata will return metadata about an instance from within it.
:param key: the key to look up
'''
headers = {"Metadata-Flavor":"Google"}
url = "http://metadata.google.internal/computeMetadata/v1/instance/attributes/%s" %(key)
response = requests.get(url=url,headers=headers)
if response.status_code == 200:
return response.text
return None | python | def get_build_metadata(key):
'''get_build_metadata will return metadata about an instance from within it.
:param key: the key to look up
'''
headers = {"Metadata-Flavor":"Google"}
url = "http://metadata.google.internal/computeMetadata/v1/instance/attributes/%s" %(key)
response = requests.get(url=url,headers=headers)
if response.status_code == 200:
return response.text
return None | [
"def",
"get_build_metadata",
"(",
"key",
")",
":",
"headers",
"=",
"{",
"\"Metadata-Flavor\"",
":",
"\"Google\"",
"}",
"url",
"=",
"\"http://metadata.google.internal/computeMetadata/v1/instance/attributes/%s\"",
"%",
"(",
"key",
")",
"response",
"=",
"requests",
".",
... | get_build_metadata will return metadata about an instance from within it.
:param key: the key to look up | [
"get_build_metadata",
"will",
"return",
"metadata",
"about",
"an",
"instance",
"from",
"within",
"it",
".",
":",
"param",
"key",
":",
"the",
"key",
"to",
"look",
"up"
] | 498c3433724b332f7493fec632d8daf479f47b82 | https://github.com/singularityhub/singularity-python/blob/498c3433724b332f7493fec632d8daf479f47b82/singularity/build/google.py#L338-L347 | train |
singularityhub/singularity-python | singularity/build/google.py | get_build_params | def get_build_params(metadata):
'''get_build_params uses get_build_metadata to retrieve corresponding meta data values for a build
:param metadata: a list, each item a dictionary of metadata, in format:
metadata = [{'key': 'repo_url', 'value': repo_url },
{'key': 'repo_id', 'value': repo_id },
{'key': 'credential', 'value': credential },
{'key': 'response_url', 'value': response_url },
{'key': 'token', 'value': token},
{'key': 'commit', 'value': commit }]
'''
params = dict()
for item in metadata:
if item['value'] == None:
response = get_build_metadata(key=item['key'])
item['value'] = response
params[item['key']] = item['value']
if item['key'] not in ['token', 'secret', 'credential']:
bot.info('%s is set to %s' %(item['key'],item['value']))
return params | python | def get_build_params(metadata):
'''get_build_params uses get_build_metadata to retrieve corresponding meta data values for a build
:param metadata: a list, each item a dictionary of metadata, in format:
metadata = [{'key': 'repo_url', 'value': repo_url },
{'key': 'repo_id', 'value': repo_id },
{'key': 'credential', 'value': credential },
{'key': 'response_url', 'value': response_url },
{'key': 'token', 'value': token},
{'key': 'commit', 'value': commit }]
'''
params = dict()
for item in metadata:
if item['value'] == None:
response = get_build_metadata(key=item['key'])
item['value'] = response
params[item['key']] = item['value']
if item['key'] not in ['token', 'secret', 'credential']:
bot.info('%s is set to %s' %(item['key'],item['value']))
return params | [
"def",
"get_build_params",
"(",
"metadata",
")",
":",
"params",
"=",
"dict",
"(",
")",
"for",
"item",
"in",
"metadata",
":",
"if",
"item",
"[",
"'value'",
"]",
"==",
"None",
":",
"response",
"=",
"get_build_metadata",
"(",
"key",
"=",
"item",
"[",
"'ke... | get_build_params uses get_build_metadata to retrieve corresponding meta data values for a build
:param metadata: a list, each item a dictionary of metadata, in format:
metadata = [{'key': 'repo_url', 'value': repo_url },
{'key': 'repo_id', 'value': repo_id },
{'key': 'credential', 'value': credential },
{'key': 'response_url', 'value': response_url },
{'key': 'token', 'value': token},
{'key': 'commit', 'value': commit }] | [
"get_build_params",
"uses",
"get_build_metadata",
"to",
"retrieve",
"corresponding",
"meta",
"data",
"values",
"for",
"a",
"build",
":",
"param",
"metadata",
":",
"a",
"list",
"each",
"item",
"a",
"dictionary",
"of",
"metadata",
"in",
"format",
":",
"metadata",
... | 498c3433724b332f7493fec632d8daf479f47b82 | https://github.com/singularityhub/singularity-python/blob/498c3433724b332f7493fec632d8daf479f47b82/singularity/build/google.py#L350-L369 | train |
ployground/bsdploy | bsdploy/fabutils.py | rsync | def rsync(*args, **kwargs):
""" wrapper around the rsync command.
the ssh connection arguments are set automatically.
any args are just passed directly to rsync.
you can use {host_string} in place of the server.
the kwargs are passed on the 'local' fabric command.
if not set, 'capture' is set to False.
example usage:
rsync('-pthrvz', "{host_string}:/some/src/directory", "some/destination/")
"""
kwargs.setdefault('capture', False)
replacements = dict(
host_string="{user}@{host}".format(
user=env.instance.config.get('user', 'root'),
host=env.instance.config.get(
'host', env.instance.config.get(
'ip', env.instance.uid))))
args = [x.format(**replacements) for x in args]
ssh_info = env.instance.init_ssh_key()
ssh_info.pop('host')
ssh_info.pop('user')
ssh_args = env.instance.ssh_args_from_info(ssh_info)
cmd_parts = ['rsync']
cmd_parts.extend(['-e', "ssh %s" % shjoin(ssh_args)])
cmd_parts.extend(args)
cmd = shjoin(cmd_parts)
return local(cmd, **kwargs) | python | def rsync(*args, **kwargs):
""" wrapper around the rsync command.
the ssh connection arguments are set automatically.
any args are just passed directly to rsync.
you can use {host_string} in place of the server.
the kwargs are passed on the 'local' fabric command.
if not set, 'capture' is set to False.
example usage:
rsync('-pthrvz', "{host_string}:/some/src/directory", "some/destination/")
"""
kwargs.setdefault('capture', False)
replacements = dict(
host_string="{user}@{host}".format(
user=env.instance.config.get('user', 'root'),
host=env.instance.config.get(
'host', env.instance.config.get(
'ip', env.instance.uid))))
args = [x.format(**replacements) for x in args]
ssh_info = env.instance.init_ssh_key()
ssh_info.pop('host')
ssh_info.pop('user')
ssh_args = env.instance.ssh_args_from_info(ssh_info)
cmd_parts = ['rsync']
cmd_parts.extend(['-e', "ssh %s" % shjoin(ssh_args)])
cmd_parts.extend(args)
cmd = shjoin(cmd_parts)
return local(cmd, **kwargs) | [
"def",
"rsync",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"kwargs",
".",
"setdefault",
"(",
"'capture'",
",",
"False",
")",
"replacements",
"=",
"dict",
"(",
"host_string",
"=",
"\"{user}@{host}\"",
".",
"format",
"(",
"user",
"=",
"env",
".... | wrapper around the rsync command.
the ssh connection arguments are set automatically.
any args are just passed directly to rsync.
you can use {host_string} in place of the server.
the kwargs are passed on the 'local' fabric command.
if not set, 'capture' is set to False.
example usage:
rsync('-pthrvz', "{host_string}:/some/src/directory", "some/destination/") | [
"wrapper",
"around",
"the",
"rsync",
"command",
"."
] | 096d63b316264931627bed1f8ca8abf7eb517352 | https://github.com/ployground/bsdploy/blob/096d63b316264931627bed1f8ca8abf7eb517352/bsdploy/fabutils.py#L23-L53 | train |
ployground/bsdploy | bsdploy/fabfile_daemonology.py | bootstrap | def bootstrap(**kwargs):
""" Bootstrap an EC2 instance that has been booted into an AMI from http://www.daemonology.net/freebsd-on-ec2/
Note: deprecated, current AMI images are basically pre-bootstrapped, they just need to be configured.
"""
# the user for the image is `ec2-user`, there is no sudo, but we can su to root w/o password
original_host = env.host_string
env.host_string = 'ec2-user@%s' % env.instance.uid
bootstrap_files = env.instance.config.get('bootstrap-files', 'bootstrap-files')
put('%s/authorized_keys' % bootstrap_files, '/tmp/authorized_keys')
put(join(bsdploy_path, 'enable_root_login_on_daemonology.sh'), '/tmp/', mode='0775')
run("""su root -c '/tmp/enable_root_login_on_daemonology.sh'""")
# revert back to root
env.host_string = original_host
# give sshd a chance to restart
sleep(2)
run('rm /tmp/enable_root_login_on_daemonology.sh')
# allow overwrites from the commandline
env.instance.config.update(kwargs)
bu = BootstrapUtils()
bu.ssh_keys = None
bu.upload_authorized_keys = False
bu.bootstrap_files_yaml = 'daemonology-files.yml'
bu.print_bootstrap_files()
bu.create_bootstrap_directories()
bu.upload_bootstrap_files({})
# we need to install python here, because there is no way to install it via
# ansible playbooks
bu.install_pkg('/', chroot=False, packages=['python27']) | python | def bootstrap(**kwargs):
""" Bootstrap an EC2 instance that has been booted into an AMI from http://www.daemonology.net/freebsd-on-ec2/
Note: deprecated, current AMI images are basically pre-bootstrapped, they just need to be configured.
"""
# the user for the image is `ec2-user`, there is no sudo, but we can su to root w/o password
original_host = env.host_string
env.host_string = 'ec2-user@%s' % env.instance.uid
bootstrap_files = env.instance.config.get('bootstrap-files', 'bootstrap-files')
put('%s/authorized_keys' % bootstrap_files, '/tmp/authorized_keys')
put(join(bsdploy_path, 'enable_root_login_on_daemonology.sh'), '/tmp/', mode='0775')
run("""su root -c '/tmp/enable_root_login_on_daemonology.sh'""")
# revert back to root
env.host_string = original_host
# give sshd a chance to restart
sleep(2)
run('rm /tmp/enable_root_login_on_daemonology.sh')
# allow overwrites from the commandline
env.instance.config.update(kwargs)
bu = BootstrapUtils()
bu.ssh_keys = None
bu.upload_authorized_keys = False
bu.bootstrap_files_yaml = 'daemonology-files.yml'
bu.print_bootstrap_files()
bu.create_bootstrap_directories()
bu.upload_bootstrap_files({})
# we need to install python here, because there is no way to install it via
# ansible playbooks
bu.install_pkg('/', chroot=False, packages=['python27']) | [
"def",
"bootstrap",
"(",
"*",
"*",
"kwargs",
")",
":",
"# the user for the image is `ec2-user`, there is no sudo, but we can su to root w/o password",
"original_host",
"=",
"env",
".",
"host_string",
"env",
".",
"host_string",
"=",
"'ec2-user@%s'",
"%",
"env",
".",
"insta... | Bootstrap an EC2 instance that has been booted into an AMI from http://www.daemonology.net/freebsd-on-ec2/
Note: deprecated, current AMI images are basically pre-bootstrapped, they just need to be configured. | [
"Bootstrap",
"an",
"EC2",
"instance",
"that",
"has",
"been",
"booted",
"into",
"an",
"AMI",
"from",
"http",
":",
"//",
"www",
".",
"daemonology",
".",
"net",
"/",
"freebsd",
"-",
"on",
"-",
"ec2",
"/",
"Note",
":",
"deprecated",
"current",
"AMI",
"imag... | 096d63b316264931627bed1f8ca8abf7eb517352 | https://github.com/ployground/bsdploy/blob/096d63b316264931627bed1f8ca8abf7eb517352/bsdploy/fabfile_daemonology.py#L14-L44 | train |
ployground/bsdploy | bsdploy/fabfile_digitalocean.py | bootstrap | def bootstrap(**kwargs):
"""Digital Oceans FreeBSD droplets are pretty much already pre-bootstrapped,
including having python2.7 and sudo etc. pre-installed.
the only thing we need to change is to allow root to login (without a password)
enable pf and ensure it is running
"""
bu = BootstrapUtils()
# (temporarily) set the user to `freebsd`
original_host = env.host_string
env.host_string = 'freebsd@%s' % env.instance.uid
# copy DO bsdclout-init results:
if bu.os_release.startswith('10'):
sudo("""cat /etc/rc.digitalocean.d/droplet.conf > /etc/rc.conf""")
sudo("""sysrc zfs_enable=YES""")
sudo("""sysrc sshd_enable=YES""")
# enable and start pf
sudo("""sysrc pf_enable=YES""")
sudo("""sysrc -f /boot/loader.conf pfload=YES""")
sudo('kldload pf', warn_only=True)
sudo('''echo 'pass in all' > /etc/pf.conf''')
sudo('''echo 'pass out all' >> /etc/pf.conf''')
sudo('''chmod 644 /etc/pf.conf''')
sudo('service pf start')
# overwrite sshd_config, because the DO version only contains defaults
# and a line explicitly forbidding root to log in
sudo("""echo 'PermitRootLogin without-password' > /etc/ssh/sshd_config""")
# additionally, make sure the root user is unlocked!
sudo('pw unlock root')
# overwrite the authorized keys for root, because DO creates its entries to explicitly
# disallow root login
bootstrap_files = env.instance.config.get('bootstrap-files', 'bootstrap-files')
put(path.abspath(path.join(env['config_base'], bootstrap_files, 'authorized_keys')), '/tmp/authorized_keys', use_sudo=True)
sudo('''mv /tmp/authorized_keys /root/.ssh/''')
sudo('''chown root:wheel /root/.ssh/authorized_keys''')
sudo("""service sshd fastreload""")
# revert back to root
env.host_string = original_host
# give sshd a chance to restart
sleep(2)
# clean up DO cloudinit leftovers
run("rm -f /etc/rc.d/digitalocean")
run("rm -rf /etc/rc.digitalocean.d")
run("rm -rf /usr/local/bsd-cloudinit/")
run("pkg remove -y avahi-autoipd || true")
# allow overwrites from the commandline
env.instance.config.update(kwargs)
bu.ssh_keys = None
bu.upload_authorized_keys = False | python | def bootstrap(**kwargs):
"""Digital Oceans FreeBSD droplets are pretty much already pre-bootstrapped,
including having python2.7 and sudo etc. pre-installed.
the only thing we need to change is to allow root to login (without a password)
enable pf and ensure it is running
"""
bu = BootstrapUtils()
# (temporarily) set the user to `freebsd`
original_host = env.host_string
env.host_string = 'freebsd@%s' % env.instance.uid
# copy DO bsdclout-init results:
if bu.os_release.startswith('10'):
sudo("""cat /etc/rc.digitalocean.d/droplet.conf > /etc/rc.conf""")
sudo("""sysrc zfs_enable=YES""")
sudo("""sysrc sshd_enable=YES""")
# enable and start pf
sudo("""sysrc pf_enable=YES""")
sudo("""sysrc -f /boot/loader.conf pfload=YES""")
sudo('kldload pf', warn_only=True)
sudo('''echo 'pass in all' > /etc/pf.conf''')
sudo('''echo 'pass out all' >> /etc/pf.conf''')
sudo('''chmod 644 /etc/pf.conf''')
sudo('service pf start')
# overwrite sshd_config, because the DO version only contains defaults
# and a line explicitly forbidding root to log in
sudo("""echo 'PermitRootLogin without-password' > /etc/ssh/sshd_config""")
# additionally, make sure the root user is unlocked!
sudo('pw unlock root')
# overwrite the authorized keys for root, because DO creates its entries to explicitly
# disallow root login
bootstrap_files = env.instance.config.get('bootstrap-files', 'bootstrap-files')
put(path.abspath(path.join(env['config_base'], bootstrap_files, 'authorized_keys')), '/tmp/authorized_keys', use_sudo=True)
sudo('''mv /tmp/authorized_keys /root/.ssh/''')
sudo('''chown root:wheel /root/.ssh/authorized_keys''')
sudo("""service sshd fastreload""")
# revert back to root
env.host_string = original_host
# give sshd a chance to restart
sleep(2)
# clean up DO cloudinit leftovers
run("rm -f /etc/rc.d/digitalocean")
run("rm -rf /etc/rc.digitalocean.d")
run("rm -rf /usr/local/bsd-cloudinit/")
run("pkg remove -y avahi-autoipd || true")
# allow overwrites from the commandline
env.instance.config.update(kwargs)
bu.ssh_keys = None
bu.upload_authorized_keys = False | [
"def",
"bootstrap",
"(",
"*",
"*",
"kwargs",
")",
":",
"bu",
"=",
"BootstrapUtils",
"(",
")",
"# (temporarily) set the user to `freebsd`",
"original_host",
"=",
"env",
".",
"host_string",
"env",
".",
"host_string",
"=",
"'freebsd@%s'",
"%",
"env",
".",
"instance... | Digital Oceans FreeBSD droplets are pretty much already pre-bootstrapped,
including having python2.7 and sudo etc. pre-installed.
the only thing we need to change is to allow root to login (without a password)
enable pf and ensure it is running | [
"Digital",
"Oceans",
"FreeBSD",
"droplets",
"are",
"pretty",
"much",
"already",
"pre",
"-",
"bootstrapped",
"including",
"having",
"python2",
".",
"7",
"and",
"sudo",
"etc",
".",
"pre",
"-",
"installed",
".",
"the",
"only",
"thing",
"we",
"need",
"to",
"ch... | 096d63b316264931627bed1f8ca8abf7eb517352 | https://github.com/ployground/bsdploy/blob/096d63b316264931627bed1f8ca8abf7eb517352/bsdploy/fabfile_digitalocean.py#L14-L65 | train |
ployground/bsdploy | bsdploy/bootstrap_utils.py | BootstrapUtils.bootstrap_files | def bootstrap_files(self):
""" we need some files to bootstrap the FreeBSD installation.
Some...
- need to be provided by the user (i.e. authorized_keys)
- others have some (sensible) defaults (i.e. rc.conf)
- some can be downloaded via URL (i.e.) http://pkg.freebsd.org/freebsd:10:x86:64/latest/Latest/pkg.txz
For those which can be downloaded we check the downloads directory. if the file exists there
(and if the checksum matches TODO!) we will upload it to the host. If not, we will fetch the file
from the given URL from the host.
For files that cannot be downloaded (authorized_keys, rc.conf etc.) we allow the user to provide their
own version in a ``bootstrap-files`` folder. The location of this folder can either be explicitly provided
via the ``bootstrap-files`` key in the host definition of the config file or it defaults to ``deployment/bootstrap-files``.
User provided files can be rendered as Jinja2 templates, by providing ``use_jinja: True`` in the YAML file.
They will be rendered with the instance configuration dictionary as context.
If the file is not found there, we revert to the default
files that are part of bsdploy. If the file cannot be found there either we either error out or for authorized_keys
we look in ``~/.ssh/identity.pub``.
"""
bootstrap_file_yamls = [
abspath(join(self.default_template_path, self.bootstrap_files_yaml)),
abspath(join(self.custom_template_path, self.bootstrap_files_yaml))]
bootstrap_files = dict()
if self.upload_authorized_keys:
bootstrap_files['authorized_keys'] = BootstrapFile(self, 'authorized_keys', **{
'directory': '/mnt/root/.ssh',
'directory_mode': '0600',
'remote': '/mnt/root/.ssh/authorized_keys',
'fallback': [
'~/.ssh/identity.pub',
'~/.ssh/id_rsa.pub',
'~/.ssh/id_dsa.pub',
'~/.ssh/id_ecdsa.pub']})
for bootstrap_file_yaml in bootstrap_file_yamls:
if not exists(bootstrap_file_yaml):
continue
with open(bootstrap_file_yaml) as f:
info = yaml.load(f, Loader=SafeLoader)
if info is None:
continue
for k, v in info.items():
bootstrap_files[k] = BootstrapFile(self, k, **v)
for bf in bootstrap_files.values():
if not exists(bf.local) and bf.raw_fallback:
if not bf.existing_fallback:
print("Found no public key in %s, you have to create '%s' manually" % (expanduser('~/.ssh'), bf.local))
sys.exit(1)
print("The '%s' file is missing." % bf.local)
for path in bf.existing_fallback:
yes = env.instance.config.get('bootstrap-yes', False)
if yes or yesno("Should we generate it using the key in '%s'?" % path):
if not exists(bf.expected_path):
os.mkdir(bf.expected_path)
with open(bf.local, 'wb') as out:
with open(path, 'rb') as f:
out.write(f.read())
break
else:
# answered no to all options
sys.exit(1)
if not bf.check():
print('Cannot find %s' % bf.local)
sys.exit(1)
packages_path = join(self.download_path, 'packages')
if exists(packages_path):
for dirpath, dirnames, filenames in os.walk(packages_path):
path = dirpath.split(packages_path)[1][1:]
for filename in filenames:
if not filename.endswith('.txz'):
continue
bootstrap_files[join(path, filename)] = BootstrapFile(
self, join(path, filename), **dict(
local=join(packages_path, join(path, filename)),
remote=join('/mnt/var/cache/pkg/All', filename),
encrypted=False))
if self.ssh_keys is not None:
for ssh_key_name, ssh_key_options in list(self.ssh_keys):
ssh_key = join(self.custom_template_path, ssh_key_name)
if exists(ssh_key):
pub_key_name = '%s.pub' % ssh_key_name
pub_key = '%s.pub' % ssh_key
if not exists(pub_key):
print("Public key '%s' for '%s' missing." % (pub_key, ssh_key))
sys.exit(1)
bootstrap_files[ssh_key_name] = BootstrapFile(
self, ssh_key_name, **dict(
local=ssh_key,
remote='/mnt/etc/ssh/%s' % ssh_key_name,
mode=0600))
bootstrap_files[pub_key_name] = BootstrapFile(
self, pub_key_name, **dict(
local=pub_key,
remote='/mnt/etc/ssh/%s' % pub_key_name,
mode=0644))
if hasattr(env.instance, 'get_vault_lib'):
vaultlib = env.instance.get_vault_lib()
for bf in bootstrap_files.values():
if bf.encrypted is None and exists(bf.local):
with open(bf.local) as f:
data = f.read()
bf.info['encrypted'] = vaultlib.is_encrypted(data)
return bootstrap_files | python | def bootstrap_files(self):
""" we need some files to bootstrap the FreeBSD installation.
Some...
- need to be provided by the user (i.e. authorized_keys)
- others have some (sensible) defaults (i.e. rc.conf)
- some can be downloaded via URL (i.e.) http://pkg.freebsd.org/freebsd:10:x86:64/latest/Latest/pkg.txz
For those which can be downloaded we check the downloads directory. if the file exists there
(and if the checksum matches TODO!) we will upload it to the host. If not, we will fetch the file
from the given URL from the host.
For files that cannot be downloaded (authorized_keys, rc.conf etc.) we allow the user to provide their
own version in a ``bootstrap-files`` folder. The location of this folder can either be explicitly provided
via the ``bootstrap-files`` key in the host definition of the config file or it defaults to ``deployment/bootstrap-files``.
User provided files can be rendered as Jinja2 templates, by providing ``use_jinja: True`` in the YAML file.
They will be rendered with the instance configuration dictionary as context.
If the file is not found there, we revert to the default
files that are part of bsdploy. If the file cannot be found there either we either error out or for authorized_keys
we look in ``~/.ssh/identity.pub``.
"""
bootstrap_file_yamls = [
abspath(join(self.default_template_path, self.bootstrap_files_yaml)),
abspath(join(self.custom_template_path, self.bootstrap_files_yaml))]
bootstrap_files = dict()
if self.upload_authorized_keys:
bootstrap_files['authorized_keys'] = BootstrapFile(self, 'authorized_keys', **{
'directory': '/mnt/root/.ssh',
'directory_mode': '0600',
'remote': '/mnt/root/.ssh/authorized_keys',
'fallback': [
'~/.ssh/identity.pub',
'~/.ssh/id_rsa.pub',
'~/.ssh/id_dsa.pub',
'~/.ssh/id_ecdsa.pub']})
for bootstrap_file_yaml in bootstrap_file_yamls:
if not exists(bootstrap_file_yaml):
continue
with open(bootstrap_file_yaml) as f:
info = yaml.load(f, Loader=SafeLoader)
if info is None:
continue
for k, v in info.items():
bootstrap_files[k] = BootstrapFile(self, k, **v)
for bf in bootstrap_files.values():
if not exists(bf.local) and bf.raw_fallback:
if not bf.existing_fallback:
print("Found no public key in %s, you have to create '%s' manually" % (expanduser('~/.ssh'), bf.local))
sys.exit(1)
print("The '%s' file is missing." % bf.local)
for path in bf.existing_fallback:
yes = env.instance.config.get('bootstrap-yes', False)
if yes or yesno("Should we generate it using the key in '%s'?" % path):
if not exists(bf.expected_path):
os.mkdir(bf.expected_path)
with open(bf.local, 'wb') as out:
with open(path, 'rb') as f:
out.write(f.read())
break
else:
# answered no to all options
sys.exit(1)
if not bf.check():
print('Cannot find %s' % bf.local)
sys.exit(1)
packages_path = join(self.download_path, 'packages')
if exists(packages_path):
for dirpath, dirnames, filenames in os.walk(packages_path):
path = dirpath.split(packages_path)[1][1:]
for filename in filenames:
if not filename.endswith('.txz'):
continue
bootstrap_files[join(path, filename)] = BootstrapFile(
self, join(path, filename), **dict(
local=join(packages_path, join(path, filename)),
remote=join('/mnt/var/cache/pkg/All', filename),
encrypted=False))
if self.ssh_keys is not None:
for ssh_key_name, ssh_key_options in list(self.ssh_keys):
ssh_key = join(self.custom_template_path, ssh_key_name)
if exists(ssh_key):
pub_key_name = '%s.pub' % ssh_key_name
pub_key = '%s.pub' % ssh_key
if not exists(pub_key):
print("Public key '%s' for '%s' missing." % (pub_key, ssh_key))
sys.exit(1)
bootstrap_files[ssh_key_name] = BootstrapFile(
self, ssh_key_name, **dict(
local=ssh_key,
remote='/mnt/etc/ssh/%s' % ssh_key_name,
mode=0600))
bootstrap_files[pub_key_name] = BootstrapFile(
self, pub_key_name, **dict(
local=pub_key,
remote='/mnt/etc/ssh/%s' % pub_key_name,
mode=0644))
if hasattr(env.instance, 'get_vault_lib'):
vaultlib = env.instance.get_vault_lib()
for bf in bootstrap_files.values():
if bf.encrypted is None and exists(bf.local):
with open(bf.local) as f:
data = f.read()
bf.info['encrypted'] = vaultlib.is_encrypted(data)
return bootstrap_files | [
"def",
"bootstrap_files",
"(",
"self",
")",
":",
"bootstrap_file_yamls",
"=",
"[",
"abspath",
"(",
"join",
"(",
"self",
".",
"default_template_path",
",",
"self",
".",
"bootstrap_files_yaml",
")",
")",
",",
"abspath",
"(",
"join",
"(",
"self",
".",
"custom_t... | we need some files to bootstrap the FreeBSD installation.
Some...
- need to be provided by the user (i.e. authorized_keys)
- others have some (sensible) defaults (i.e. rc.conf)
- some can be downloaded via URL (i.e.) http://pkg.freebsd.org/freebsd:10:x86:64/latest/Latest/pkg.txz
For those which can be downloaded we check the downloads directory. if the file exists there
(and if the checksum matches TODO!) we will upload it to the host. If not, we will fetch the file
from the given URL from the host.
For files that cannot be downloaded (authorized_keys, rc.conf etc.) we allow the user to provide their
own version in a ``bootstrap-files`` folder. The location of this folder can either be explicitly provided
via the ``bootstrap-files`` key in the host definition of the config file or it defaults to ``deployment/bootstrap-files``.
User provided files can be rendered as Jinja2 templates, by providing ``use_jinja: True`` in the YAML file.
They will be rendered with the instance configuration dictionary as context.
If the file is not found there, we revert to the default
files that are part of bsdploy. If the file cannot be found there either we either error out or for authorized_keys
we look in ``~/.ssh/identity.pub``. | [
"we",
"need",
"some",
"files",
"to",
"bootstrap",
"the",
"FreeBSD",
"installation",
".",
"Some",
"...",
"-",
"need",
"to",
"be",
"provided",
"by",
"the",
"user",
"(",
"i",
".",
"e",
".",
"authorized_keys",
")",
"-",
"others",
"have",
"some",
"(",
"sens... | 096d63b316264931627bed1f8ca8abf7eb517352 | https://github.com/ployground/bsdploy/blob/096d63b316264931627bed1f8ca8abf7eb517352/bsdploy/bootstrap_utils.py#L158-L266 | train |
ployground/bsdploy | bsdploy/bootstrap_utils.py | BootstrapUtils.devices | def devices(self):
""" computes the name of the disk devices that are suitable
installation targets by subtracting CDROM- and USB devices
from the list of total mounts.
"""
install_devices = self.install_devices
if 'bootstrap-system-devices' in env.instance.config:
devices = set(env.instance.config['bootstrap-system-devices'].split())
else:
devices = set(self.sysctl_devices)
for sysctl_device in self.sysctl_devices:
for install_device in install_devices:
if install_device.startswith(sysctl_device):
devices.remove(sysctl_device)
return devices | python | def devices(self):
""" computes the name of the disk devices that are suitable
installation targets by subtracting CDROM- and USB devices
from the list of total mounts.
"""
install_devices = self.install_devices
if 'bootstrap-system-devices' in env.instance.config:
devices = set(env.instance.config['bootstrap-system-devices'].split())
else:
devices = set(self.sysctl_devices)
for sysctl_device in self.sysctl_devices:
for install_device in install_devices:
if install_device.startswith(sysctl_device):
devices.remove(sysctl_device)
return devices | [
"def",
"devices",
"(",
"self",
")",
":",
"install_devices",
"=",
"self",
".",
"install_devices",
"if",
"'bootstrap-system-devices'",
"in",
"env",
".",
"instance",
".",
"config",
":",
"devices",
"=",
"set",
"(",
"env",
".",
"instance",
".",
"config",
"[",
"... | computes the name of the disk devices that are suitable
installation targets by subtracting CDROM- and USB devices
from the list of total mounts. | [
"computes",
"the",
"name",
"of",
"the",
"disk",
"devices",
"that",
"are",
"suitable",
"installation",
"targets",
"by",
"subtracting",
"CDROM",
"-",
"and",
"USB",
"devices",
"from",
"the",
"list",
"of",
"total",
"mounts",
"."
] | 096d63b316264931627bed1f8ca8abf7eb517352 | https://github.com/ployground/bsdploy/blob/096d63b316264931627bed1f8ca8abf7eb517352/bsdploy/bootstrap_utils.py#L372-L386 | train |
ployground/bsdploy | bsdploy/bootstrap_utils.py | BootstrapUtils.fetch_assets | def fetch_assets(self):
""" download bootstrap assets to control host.
If present on the control host they will be uploaded to the target host during bootstrapping.
"""
# allow overwrites from the commandline
packages = set(
env.instance.config.get('bootstrap-packages', '').split())
packages.update(['python27'])
cmd = env.instance.config.get('bootstrap-local-download-cmd', 'wget -c -O "{0.local}" "{0.url}"')
items = sorted(self.bootstrap_files.items())
for filename, asset in items:
if asset.url:
if not exists(dirname(asset.local)):
os.makedirs(dirname(asset.local))
local(cmd.format(asset))
if filename == 'packagesite.txz':
# add packages to download
items.extend(self._fetch_packages(asset.local, packages)) | python | def fetch_assets(self):
""" download bootstrap assets to control host.
If present on the control host they will be uploaded to the target host during bootstrapping.
"""
# allow overwrites from the commandline
packages = set(
env.instance.config.get('bootstrap-packages', '').split())
packages.update(['python27'])
cmd = env.instance.config.get('bootstrap-local-download-cmd', 'wget -c -O "{0.local}" "{0.url}"')
items = sorted(self.bootstrap_files.items())
for filename, asset in items:
if asset.url:
if not exists(dirname(asset.local)):
os.makedirs(dirname(asset.local))
local(cmd.format(asset))
if filename == 'packagesite.txz':
# add packages to download
items.extend(self._fetch_packages(asset.local, packages)) | [
"def",
"fetch_assets",
"(",
"self",
")",
":",
"# allow overwrites from the commandline",
"packages",
"=",
"set",
"(",
"env",
".",
"instance",
".",
"config",
".",
"get",
"(",
"'bootstrap-packages'",
",",
"''",
")",
".",
"split",
"(",
")",
")",
"packages",
"."... | download bootstrap assets to control host.
If present on the control host they will be uploaded to the target host during bootstrapping. | [
"download",
"bootstrap",
"assets",
"to",
"control",
"host",
".",
"If",
"present",
"on",
"the",
"control",
"host",
"they",
"will",
"be",
"uploaded",
"to",
"the",
"target",
"host",
"during",
"bootstrapping",
"."
] | 096d63b316264931627bed1f8ca8abf7eb517352 | https://github.com/ployground/bsdploy/blob/096d63b316264931627bed1f8ca8abf7eb517352/bsdploy/bootstrap_utils.py#L457-L474 | train |
david-caro/python-foreman | foreman/client.py | res_to_str | def res_to_str(res):
"""
:param res: :class:`requests.Response` object
Parse the given request and generate an informative string from it
"""
if 'Authorization' in res.request.headers:
res.request.headers['Authorization'] = "*****"
return """
####################################
url = %s
headers = %s
-------- data sent -----------------
%s
------------------------------------
@@@@@ response @@@@@@@@@@@@@@@@
headers = %s
code = %d
reason = %s
--------- data received ------------
%s
------------------------------------
####################################
""" % (res.url,
str(res.request.headers),
OLD_REQ and res.request.data or res.request.body,
res.headers,
res.status_code,
res.reason,
res.text) | python | def res_to_str(res):
"""
:param res: :class:`requests.Response` object
Parse the given request and generate an informative string from it
"""
if 'Authorization' in res.request.headers:
res.request.headers['Authorization'] = "*****"
return """
####################################
url = %s
headers = %s
-------- data sent -----------------
%s
------------------------------------
@@@@@ response @@@@@@@@@@@@@@@@
headers = %s
code = %d
reason = %s
--------- data received ------------
%s
------------------------------------
####################################
""" % (res.url,
str(res.request.headers),
OLD_REQ and res.request.data or res.request.body,
res.headers,
res.status_code,
res.reason,
res.text) | [
"def",
"res_to_str",
"(",
"res",
")",
":",
"if",
"'Authorization'",
"in",
"res",
".",
"request",
".",
"headers",
":",
"res",
".",
"request",
".",
"headers",
"[",
"'Authorization'",
"]",
"=",
"\"*****\"",
"return",
"\"\"\"\n####################################\nurl... | :param res: :class:`requests.Response` object
Parse the given request and generate an informative string from it | [
":",
"param",
"res",
":",
":",
"class",
":",
"requests",
".",
"Response",
"object"
] | a81e4c99fe6a9ad56e9758dbe96dd55bca67d3b8 | https://github.com/david-caro/python-foreman/blob/a81e4c99fe6a9ad56e9758dbe96dd55bca67d3b8/foreman/client.py#L63-L92 | train |
david-caro/python-foreman | foreman/client.py | parse_resource_definition | def parse_resource_definition(resource_name, resource_dct):
"""
Returns all the info extracted from a resource section of the apipie json
:param resource_name: Name of the resource that is defined by the section
:param resrouce_dict: Dictionary as generated by apipie of the resource
definition
"""
new_dict = {
'__module__': resource_dct.get('__module__', __name__),
'__doc__': resource_dct['full_description'],
'_resource_name': resource_name,
'_own_methods': set(),
'_conflicting_methods': [],
}
# methods in foreign_methods are meant for other resources,
# that is, the url and the resource field do not match /api/{resource}
foreign_methods = {}
# as defined per apipie gem, each method can have more than one api,
# for example, /api/hosts can have the GET /api/hosts api and the GET
# /api/hosts/:id api or DELETE /api/hosts
for method in resource_dct['methods']:
# set the docstring if it only has one api
if not new_dict['__doc__'] and len(method['apis']) == 1:
new_dict['__doc__'] = \
method['apis'][0]['short_description']
for api in method['apis']:
api = MethodAPIDescription(resource_name, method, api)
if api.resource != resource_name:
# this means that the json apipie passed says that an
# endpoint in the form: /api/{resource}/* belongs to
# {different_resource}, we just put it under {resource}
# later, storing it under _foreign_methods for now as we
# might not have parsed {resource} yet
functions = foreign_methods.setdefault(api.resource, {})
if api.name in functions:
old_api = functions.get(api.name).defs
# show only in debug the repeated but identical definitions
log_method = logger.warning
if api.url == old_api.url:
log_method = logger.debug
log_method(
"There is a conflict trying to redefine a method "
"for a foreign resource (%s): \n"
"\tresource:\n"
"\tapipie_resource: %s\n"
"\tnew_api: %s\n"
"\tnew_url: %s\n"
"\told_api: %s\n"
"\told_url: %s",
api.name,
resource_name,
pprint.pformat(api),
api.url,
pprint.pformat(old_api),
old_api.url,
)
new_dict['_conflicting_methods'].append(api)
continue
functions[api.name] = api.generate_func()
else:
# it's an own method, resource and url match
if api.name in new_dict['_own_methods']:
old_api = new_dict.get(api.name).defs
log_method = logger.warning
# show only in debug the repeated but identical definitions
if api.url == old_api.url:
log_method = logger.debug
log_method(
"There is a conflict trying to redefine method "
"(%s): \n"
"\tapipie_resource: %s\n"
"\tnew_api: %s\n"
"\tnew_url: %s\n"
"\told_api: %s\n"
"\told_url: %s",
api.name,
resource_name,
pprint.pformat(api),
api.url,
pprint.pformat(old_api),
old_api.url,
)
new_dict['_conflicting_methods'].append(api)
continue
new_dict['_own_methods'].add(api.name)
new_dict[api.name] = api.generate_func()
return new_dict, foreign_methods | python | def parse_resource_definition(resource_name, resource_dct):
"""
Returns all the info extracted from a resource section of the apipie json
:param resource_name: Name of the resource that is defined by the section
:param resrouce_dict: Dictionary as generated by apipie of the resource
definition
"""
new_dict = {
'__module__': resource_dct.get('__module__', __name__),
'__doc__': resource_dct['full_description'],
'_resource_name': resource_name,
'_own_methods': set(),
'_conflicting_methods': [],
}
# methods in foreign_methods are meant for other resources,
# that is, the url and the resource field do not match /api/{resource}
foreign_methods = {}
# as defined per apipie gem, each method can have more than one api,
# for example, /api/hosts can have the GET /api/hosts api and the GET
# /api/hosts/:id api or DELETE /api/hosts
for method in resource_dct['methods']:
# set the docstring if it only has one api
if not new_dict['__doc__'] and len(method['apis']) == 1:
new_dict['__doc__'] = \
method['apis'][0]['short_description']
for api in method['apis']:
api = MethodAPIDescription(resource_name, method, api)
if api.resource != resource_name:
# this means that the json apipie passed says that an
# endpoint in the form: /api/{resource}/* belongs to
# {different_resource}, we just put it under {resource}
# later, storing it under _foreign_methods for now as we
# might not have parsed {resource} yet
functions = foreign_methods.setdefault(api.resource, {})
if api.name in functions:
old_api = functions.get(api.name).defs
# show only in debug the repeated but identical definitions
log_method = logger.warning
if api.url == old_api.url:
log_method = logger.debug
log_method(
"There is a conflict trying to redefine a method "
"for a foreign resource (%s): \n"
"\tresource:\n"
"\tapipie_resource: %s\n"
"\tnew_api: %s\n"
"\tnew_url: %s\n"
"\told_api: %s\n"
"\told_url: %s",
api.name,
resource_name,
pprint.pformat(api),
api.url,
pprint.pformat(old_api),
old_api.url,
)
new_dict['_conflicting_methods'].append(api)
continue
functions[api.name] = api.generate_func()
else:
# it's an own method, resource and url match
if api.name in new_dict['_own_methods']:
old_api = new_dict.get(api.name).defs
log_method = logger.warning
# show only in debug the repeated but identical definitions
if api.url == old_api.url:
log_method = logger.debug
log_method(
"There is a conflict trying to redefine method "
"(%s): \n"
"\tapipie_resource: %s\n"
"\tnew_api: %s\n"
"\tnew_url: %s\n"
"\told_api: %s\n"
"\told_url: %s",
api.name,
resource_name,
pprint.pformat(api),
api.url,
pprint.pformat(old_api),
old_api.url,
)
new_dict['_conflicting_methods'].append(api)
continue
new_dict['_own_methods'].add(api.name)
new_dict[api.name] = api.generate_func()
return new_dict, foreign_methods | [
"def",
"parse_resource_definition",
"(",
"resource_name",
",",
"resource_dct",
")",
":",
"new_dict",
"=",
"{",
"'__module__'",
":",
"resource_dct",
".",
"get",
"(",
"'__module__'",
",",
"__name__",
")",
",",
"'__doc__'",
":",
"resource_dct",
"[",
"'full_descriptio... | Returns all the info extracted from a resource section of the apipie json
:param resource_name: Name of the resource that is defined by the section
:param resrouce_dict: Dictionary as generated by apipie of the resource
definition | [
"Returns",
"all",
"the",
"info",
"extracted",
"from",
"a",
"resource",
"section",
"of",
"the",
"apipie",
"json"
] | a81e4c99fe6a9ad56e9758dbe96dd55bca67d3b8 | https://github.com/david-caro/python-foreman/blob/a81e4c99fe6a9ad56e9758dbe96dd55bca67d3b8/foreman/client.py#L312-L406 | train |
david-caro/python-foreman | foreman/client.py | MethodAPIDescription.parse_resource_from_url | def parse_resource_from_url(self, url):
"""
Returns the appropriate resource name for the given URL.
:param url: API URL stub, like: '/api/hosts'
:return: Resource name, like 'hosts', or None if not found
"""
# special case for the api root
if url == '/api':
return 'api'
elif url == '/katello':
return 'katello'
match = self.resource_pattern.match(url)
if match:
return match.groupdict().get('resource', None) | python | def parse_resource_from_url(self, url):
"""
Returns the appropriate resource name for the given URL.
:param url: API URL stub, like: '/api/hosts'
:return: Resource name, like 'hosts', or None if not found
"""
# special case for the api root
if url == '/api':
return 'api'
elif url == '/katello':
return 'katello'
match = self.resource_pattern.match(url)
if match:
return match.groupdict().get('resource', None) | [
"def",
"parse_resource_from_url",
"(",
"self",
",",
"url",
")",
":",
"# special case for the api root",
"if",
"url",
"==",
"'/api'",
":",
"return",
"'api'",
"elif",
"url",
"==",
"'/katello'",
":",
"return",
"'katello'",
"match",
"=",
"self",
".",
"resource_patte... | Returns the appropriate resource name for the given URL.
:param url: API URL stub, like: '/api/hosts'
:return: Resource name, like 'hosts', or None if not found | [
"Returns",
"the",
"appropriate",
"resource",
"name",
"for",
"the",
"given",
"URL",
"."
] | a81e4c99fe6a9ad56e9758dbe96dd55bca67d3b8 | https://github.com/david-caro/python-foreman/blob/a81e4c99fe6a9ad56e9758dbe96dd55bca67d3b8/foreman/client.py#L137-L152 | train |
david-caro/python-foreman | foreman/client.py | MethodAPIDescription._get_name | def _get_name(self):
"""
There are three cases, because apipie definitions can have multiple
signatures but python does not
For example, the api endpoint:
/api/myres/:myres_id/subres/:subres_id/subres2
for method *index* will be translated to the api method name:
subres_index_subres2
So when you want to call it from v2 object, you'll have:
myres.subres_index_subres2
"""
if self.url.count(':') > 1:
# /api/one/two/:three/four -> two_:three_four
base_name = self.url.split('/', 3)[-1].replace('/', '_')[1:]
# :one_two_three -> two_three
if base_name.startswith(':'):
base_name = base_name.split('_')[-1]
# one_:two_three_:four_five -> one_three_five
base_name = re.sub('_:[^/]+', '', base_name)
# in case that the last term was a parameter
if base_name.endswith('_'):
base_name = base_name[:-1]
# one_two_three -> one_two_method_three
base_name = (
'_' + self._method['name']
).join(base_name.rsplit('_', 1))
else:
base_name = self._method['name']
if base_name == 'import':
base_name = 'import_'
if self._apipie_resource != self.resource:
return '%s_%s' % (self._apipie_resource, base_name)
else:
return base_name | python | def _get_name(self):
"""
There are three cases, because apipie definitions can have multiple
signatures but python does not
For example, the api endpoint:
/api/myres/:myres_id/subres/:subres_id/subres2
for method *index* will be translated to the api method name:
subres_index_subres2
So when you want to call it from v2 object, you'll have:
myres.subres_index_subres2
"""
if self.url.count(':') > 1:
# /api/one/two/:three/four -> two_:three_four
base_name = self.url.split('/', 3)[-1].replace('/', '_')[1:]
# :one_two_three -> two_three
if base_name.startswith(':'):
base_name = base_name.split('_')[-1]
# one_:two_three_:four_five -> one_three_five
base_name = re.sub('_:[^/]+', '', base_name)
# in case that the last term was a parameter
if base_name.endswith('_'):
base_name = base_name[:-1]
# one_two_three -> one_two_method_three
base_name = (
'_' + self._method['name']
).join(base_name.rsplit('_', 1))
else:
base_name = self._method['name']
if base_name == 'import':
base_name = 'import_'
if self._apipie_resource != self.resource:
return '%s_%s' % (self._apipie_resource, base_name)
else:
return base_name | [
"def",
"_get_name",
"(",
"self",
")",
":",
"if",
"self",
".",
"url",
".",
"count",
"(",
"':'",
")",
">",
"1",
":",
"# /api/one/two/:three/four -> two_:three_four",
"base_name",
"=",
"self",
".",
"url",
".",
"split",
"(",
"'/'",
",",
"3",
")",
"[",
"-",... | There are three cases, because apipie definitions can have multiple
signatures but python does not
For example, the api endpoint:
/api/myres/:myres_id/subres/:subres_id/subres2
for method *index* will be translated to the api method name:
subres_index_subres2
So when you want to call it from v2 object, you'll have:
myres.subres_index_subres2 | [
"There",
"are",
"three",
"cases",
"because",
"apipie",
"definitions",
"can",
"have",
"multiple",
"signatures",
"but",
"python",
"does",
"not",
"For",
"example",
"the",
"api",
"endpoint",
":",
"/",
"api",
"/",
"myres",
"/",
":",
"myres_id",
"/",
"subres",
"... | a81e4c99fe6a9ad56e9758dbe96dd55bca67d3b8 | https://github.com/david-caro/python-foreman/blob/a81e4c99fe6a9ad56e9758dbe96dd55bca67d3b8/foreman/client.py#L154-L191 | train |
david-caro/python-foreman | foreman/client.py | MethodAPIDescription.generate_func | def generate_func(self, as_global=False):
"""
Generate function for specific method and using specific api
:param as_global: if set, will use the global function name, instead of
the class method (usually {resource}_{class_method}) when defining
the function
"""
keywords = []
params_def = []
params_doc = ""
original_names = {}
params = dict(
(param['name'], param)
for param in self.params
)
# parse the url required params, as sometimes they are skipped in the
# parameters list of the definition
for param in self.url_params:
if param not in params:
param = {
'name': param,
'required': True,
'description': '',
'validator': '',
}
params[param['name']] = param
else:
params[param]['required'] = True
# split required and non-required params for the definition
req_params = []
nonreq_params = []
for param in six.itervalues(params):
if param['required']:
req_params.append(param)
else:
nonreq_params.append(param)
for param in req_params + nonreq_params:
params_doc += self.create_param_doc(param) + "\n"
local_name = param['name']
# some params collide with python keywords, that's why we do
# this switch (and undo it inside the function we generate)
if param['name'] == 'except':
local_name = 'except_'
original_names[local_name] = param['name']
keywords.append(local_name)
if param['required']:
params_def.append("%s" % local_name)
else:
params_def.append("%s=None" % local_name)
func_head = 'def {0}(self, {1}):'.format(
as_global and self.get_global_method_name() or self.name,
', '.join(params_def)
)
code_body = (
' _vars_ = locals()\n'
' _url = self._fill_url("{url}", _vars_, {url_params})\n'
' _original_names = {original_names}\n'
' _kwargs = dict((_original_names[k], _vars_[k])\n'
' for k in {keywords} if _vars_[k])\n'
' return self._foreman.do_{http_method}(_url, _kwargs)')
code_body = code_body.format(
http_method=self.http_method.lower(),
url=self.url,
url_params=self.url_params,
keywords=keywords,
original_names=original_names,
)
code = [
func_head,
' """',
self.short_desc,
'',
params_doc,
' """',
code_body,
]
code = '\n'.join(code)
six.exec_(code)
function = locals()[self.name]
# to ease debugging, all the funcs have the definitions attached
setattr(function, 'defs', self)
return function | python | def generate_func(self, as_global=False):
"""
Generate function for specific method and using specific api
:param as_global: if set, will use the global function name, instead of
the class method (usually {resource}_{class_method}) when defining
the function
"""
keywords = []
params_def = []
params_doc = ""
original_names = {}
params = dict(
(param['name'], param)
for param in self.params
)
# parse the url required params, as sometimes they are skipped in the
# parameters list of the definition
for param in self.url_params:
if param not in params:
param = {
'name': param,
'required': True,
'description': '',
'validator': '',
}
params[param['name']] = param
else:
params[param]['required'] = True
# split required and non-required params for the definition
req_params = []
nonreq_params = []
for param in six.itervalues(params):
if param['required']:
req_params.append(param)
else:
nonreq_params.append(param)
for param in req_params + nonreq_params:
params_doc += self.create_param_doc(param) + "\n"
local_name = param['name']
# some params collide with python keywords, that's why we do
# this switch (and undo it inside the function we generate)
if param['name'] == 'except':
local_name = 'except_'
original_names[local_name] = param['name']
keywords.append(local_name)
if param['required']:
params_def.append("%s" % local_name)
else:
params_def.append("%s=None" % local_name)
func_head = 'def {0}(self, {1}):'.format(
as_global and self.get_global_method_name() or self.name,
', '.join(params_def)
)
code_body = (
' _vars_ = locals()\n'
' _url = self._fill_url("{url}", _vars_, {url_params})\n'
' _original_names = {original_names}\n'
' _kwargs = dict((_original_names[k], _vars_[k])\n'
' for k in {keywords} if _vars_[k])\n'
' return self._foreman.do_{http_method}(_url, _kwargs)')
code_body = code_body.format(
http_method=self.http_method.lower(),
url=self.url,
url_params=self.url_params,
keywords=keywords,
original_names=original_names,
)
code = [
func_head,
' """',
self.short_desc,
'',
params_doc,
' """',
code_body,
]
code = '\n'.join(code)
six.exec_(code)
function = locals()[self.name]
# to ease debugging, all the funcs have the definitions attached
setattr(function, 'defs', self)
return function | [
"def",
"generate_func",
"(",
"self",
",",
"as_global",
"=",
"False",
")",
":",
"keywords",
"=",
"[",
"]",
"params_def",
"=",
"[",
"]",
"params_doc",
"=",
"\"\"",
"original_names",
"=",
"{",
"}",
"params",
"=",
"dict",
"(",
"(",
"param",
"[",
"'name'",
... | Generate function for specific method and using specific api
:param as_global: if set, will use the global function name, instead of
the class method (usually {resource}_{class_method}) when defining
the function | [
"Generate",
"function",
"for",
"specific",
"method",
"and",
"using",
"specific",
"api"
] | a81e4c99fe6a9ad56e9758dbe96dd55bca67d3b8 | https://github.com/david-caro/python-foreman/blob/a81e4c99fe6a9ad56e9758dbe96dd55bca67d3b8/foreman/client.py#L196-L287 | train |
david-caro/python-foreman | foreman/client.py | MethodAPIDescription.create_param_doc | def create_param_doc(cls, param, prefix=None):
"""
Generate documentation for single parameter of function
:param param: dict contains info about parameter
:param sub: prefix string for recursive purposes
"""
desc = cls.exclude_html_reg.sub('', param['description']).strip()
if not desc:
desc = "<no description>"
name = param['name']
if prefix:
name = "%s[%s]" % (prefix, name)
doc_ = ":param %s: %s; %s" % (name, desc, param['validator'])
if param['required']:
doc_ += " (REQUIRED)"
else:
doc_ += " (OPTIONAL)"
for param in param.get('params', []):
doc_ += "\n" + cls.create_param_doc(param, name)
return doc_ | python | def create_param_doc(cls, param, prefix=None):
"""
Generate documentation for single parameter of function
:param param: dict contains info about parameter
:param sub: prefix string for recursive purposes
"""
desc = cls.exclude_html_reg.sub('', param['description']).strip()
if not desc:
desc = "<no description>"
name = param['name']
if prefix:
name = "%s[%s]" % (prefix, name)
doc_ = ":param %s: %s; %s" % (name, desc, param['validator'])
if param['required']:
doc_ += " (REQUIRED)"
else:
doc_ += " (OPTIONAL)"
for param in param.get('params', []):
doc_ += "\n" + cls.create_param_doc(param, name)
return doc_ | [
"def",
"create_param_doc",
"(",
"cls",
",",
"param",
",",
"prefix",
"=",
"None",
")",
":",
"desc",
"=",
"cls",
".",
"exclude_html_reg",
".",
"sub",
"(",
"''",
",",
"param",
"[",
"'description'",
"]",
")",
".",
"strip",
"(",
")",
"if",
"not",
"desc",
... | Generate documentation for single parameter of function
:param param: dict contains info about parameter
:param sub: prefix string for recursive purposes | [
"Generate",
"documentation",
"for",
"single",
"parameter",
"of",
"function",
":",
"param",
"param",
":",
"dict",
"contains",
"info",
"about",
"parameter",
":",
"param",
"sub",
":",
"prefix",
"string",
"for",
"recursive",
"purposes"
] | a81e4c99fe6a9ad56e9758dbe96dd55bca67d3b8 | https://github.com/david-caro/python-foreman/blob/a81e4c99fe6a9ad56e9758dbe96dd55bca67d3b8/foreman/client.py#L290-L309 | train |
david-caro/python-foreman | foreman/client.py | MetaForeman.convert_plugin_def | def convert_plugin_def(http_method, funcs):
"""
This function parses one of the elements of the definitions dict for a
plugin and extracts the relevant information
:param http_method: HTTP method that uses (GET, POST, DELETE, ...)
:param funcs: functions related to that HTTP method
"""
methods = []
if http_method not in ('GET', 'PUT', 'POST', 'DELETE'):
logger.error(
'Plugin load failure, HTTP method %s unsupported.',
http_method,
)
return methods
for fname, params in six.iteritems(funcs):
method = {
'apis': [{'short_description': 'no-doc'}],
'params': [],
}
method['apis'][0]['http_method'] = http_method
method['apis'][0]['api_url'] = '/api/' + fname
method['name'] = fname
for pname, pdef in six.iteritems(params):
param = {
'name': pname,
'validator': "Must be %s" % pdef['ptype'],
'description': '',
'required': pdef['required'],
}
method['params'].append(param)
methods.append(method)
return methods | python | def convert_plugin_def(http_method, funcs):
"""
This function parses one of the elements of the definitions dict for a
plugin and extracts the relevant information
:param http_method: HTTP method that uses (GET, POST, DELETE, ...)
:param funcs: functions related to that HTTP method
"""
methods = []
if http_method not in ('GET', 'PUT', 'POST', 'DELETE'):
logger.error(
'Plugin load failure, HTTP method %s unsupported.',
http_method,
)
return methods
for fname, params in six.iteritems(funcs):
method = {
'apis': [{'short_description': 'no-doc'}],
'params': [],
}
method['apis'][0]['http_method'] = http_method
method['apis'][0]['api_url'] = '/api/' + fname
method['name'] = fname
for pname, pdef in six.iteritems(params):
param = {
'name': pname,
'validator': "Must be %s" % pdef['ptype'],
'description': '',
'required': pdef['required'],
}
method['params'].append(param)
methods.append(method)
return methods | [
"def",
"convert_plugin_def",
"(",
"http_method",
",",
"funcs",
")",
":",
"methods",
"=",
"[",
"]",
"if",
"http_method",
"not",
"in",
"(",
"'GET'",
",",
"'PUT'",
",",
"'POST'",
",",
"'DELETE'",
")",
":",
"logger",
".",
"error",
"(",
"'Plugin load failure, H... | This function parses one of the elements of the definitions dict for a
plugin and extracts the relevant information
:param http_method: HTTP method that uses (GET, POST, DELETE, ...)
:param funcs: functions related to that HTTP method | [
"This",
"function",
"parses",
"one",
"of",
"the",
"elements",
"of",
"the",
"definitions",
"dict",
"for",
"a",
"plugin",
"and",
"extracts",
"the",
"relevant",
"information"
] | a81e4c99fe6a9ad56e9758dbe96dd55bca67d3b8 | https://github.com/david-caro/python-foreman/blob/a81e4c99fe6a9ad56e9758dbe96dd55bca67d3b8/foreman/client.py#L510-L542 | train |
david-caro/python-foreman | scripts/version_manager.py | get_current_version | def get_current_version(repo_path):
"""
Given a repo will return the version string, according to semantic
versioning, counting as non-backwards compatible commit any one with a
message header that matches (case insensitive)::
sem-ver: .*break.*
And as features any commit with a header matching::
sem-ver: feature
And counting any other as a bugfix
"""
repo = dulwich.repo.Repo(repo_path)
tags = get_tags(repo)
maj_version = 0
feat_version = 0
fix_version = 0
for commit_sha, children in reversed(
get_children_per_first_parent(repo_path).items()
):
commit = get_repo_object(repo, commit_sha)
maj_version, feat_version, fix_version = get_version(
commit=commit,
tags=tags,
maj_version=maj_version,
feat_version=feat_version,
fix_version=fix_version,
children=children,
)
return '%s.%s.%s' % (maj_version, feat_version, fix_version) | python | def get_current_version(repo_path):
"""
Given a repo will return the version string, according to semantic
versioning, counting as non-backwards compatible commit any one with a
message header that matches (case insensitive)::
sem-ver: .*break.*
And as features any commit with a header matching::
sem-ver: feature
And counting any other as a bugfix
"""
repo = dulwich.repo.Repo(repo_path)
tags = get_tags(repo)
maj_version = 0
feat_version = 0
fix_version = 0
for commit_sha, children in reversed(
get_children_per_first_parent(repo_path).items()
):
commit = get_repo_object(repo, commit_sha)
maj_version, feat_version, fix_version = get_version(
commit=commit,
tags=tags,
maj_version=maj_version,
feat_version=feat_version,
fix_version=fix_version,
children=children,
)
return '%s.%s.%s' % (maj_version, feat_version, fix_version) | [
"def",
"get_current_version",
"(",
"repo_path",
")",
":",
"repo",
"=",
"dulwich",
".",
"repo",
".",
"Repo",
"(",
"repo_path",
")",
"tags",
"=",
"get_tags",
"(",
"repo",
")",
"maj_version",
"=",
"0",
"feat_version",
"=",
"0",
"fix_version",
"=",
"0",
"for... | Given a repo will return the version string, according to semantic
versioning, counting as non-backwards compatible commit any one with a
message header that matches (case insensitive)::
sem-ver: .*break.*
And as features any commit with a header matching::
sem-ver: feature
And counting any other as a bugfix | [
"Given",
"a",
"repo",
"will",
"return",
"the",
"version",
"string",
"according",
"to",
"semantic",
"versioning",
"counting",
"as",
"non",
"-",
"backwards",
"compatible",
"commit",
"any",
"one",
"with",
"a",
"message",
"header",
"that",
"matches",
"(",
"case",
... | a81e4c99fe6a9ad56e9758dbe96dd55bca67d3b8 | https://github.com/david-caro/python-foreman/blob/a81e4c99fe6a9ad56e9758dbe96dd55bca67d3b8/scripts/version_manager.py#L402-L435 | train |
david-caro/python-foreman | scripts/version_manager.py | get_authors | def get_authors(repo_path, from_commit):
"""
Given a repo and optionally a base revision to start from, will return
the list of authors.
"""
repo = dulwich.repo.Repo(repo_path)
refs = get_refs(repo)
start_including = False
authors = set()
if from_commit is None:
start_including = True
for commit_sha, children in reversed(
get_children_per_first_parent(repo_path).items()
):
commit = get_repo_object(repo, commit_sha)
if (
start_including or commit_sha.startswith(from_commit) or
fuzzy_matches_refs(from_commit, refs.get(commit_sha, []))
):
authors.add(commit.author.decode())
for child in children:
authors.add(child.author.decode())
start_including = True
return '\n'.join(sorted(authors)) | python | def get_authors(repo_path, from_commit):
"""
Given a repo and optionally a base revision to start from, will return
the list of authors.
"""
repo = dulwich.repo.Repo(repo_path)
refs = get_refs(repo)
start_including = False
authors = set()
if from_commit is None:
start_including = True
for commit_sha, children in reversed(
get_children_per_first_parent(repo_path).items()
):
commit = get_repo_object(repo, commit_sha)
if (
start_including or commit_sha.startswith(from_commit) or
fuzzy_matches_refs(from_commit, refs.get(commit_sha, []))
):
authors.add(commit.author.decode())
for child in children:
authors.add(child.author.decode())
start_including = True
return '\n'.join(sorted(authors)) | [
"def",
"get_authors",
"(",
"repo_path",
",",
"from_commit",
")",
":",
"repo",
"=",
"dulwich",
".",
"repo",
".",
"Repo",
"(",
"repo_path",
")",
"refs",
"=",
"get_refs",
"(",
"repo",
")",
"start_including",
"=",
"False",
"authors",
"=",
"set",
"(",
")",
... | Given a repo and optionally a base revision to start from, will return
the list of authors. | [
"Given",
"a",
"repo",
"and",
"optionally",
"a",
"base",
"revision",
"to",
"start",
"from",
"will",
"return",
"the",
"list",
"of",
"authors",
"."
] | a81e4c99fe6a9ad56e9758dbe96dd55bca67d3b8 | https://github.com/david-caro/python-foreman/blob/a81e4c99fe6a9ad56e9758dbe96dd55bca67d3b8/scripts/version_manager.py#L438-L465 | train |
pystorm/pystorm | pystorm/spout.py | Spout.emit | def emit(
self, tup, tup_id=None, stream=None, direct_task=None, need_task_ids=False
):
"""Emit a spout Tuple message.
:param tup: the Tuple to send to Storm, should contain only
JSON-serializable data.
:type tup: list or tuple
:param tup_id: the ID for the Tuple. Leave this blank for an
unreliable emit.
:type tup_id: str
:param stream: ID of the stream this Tuple should be emitted to.
Leave empty to emit to the default stream.
:type stream: str
:param direct_task: the task to send the Tuple to if performing a
direct emit.
:type direct_task: int
:param need_task_ids: indicate whether or not you'd like the task IDs
the Tuple was emitted (default: ``False``).
:type need_task_ids: bool
:returns: ``None``, unless ``need_task_ids=True``, in which case it will
be a ``list`` of task IDs that the Tuple was sent to if. Note
that when specifying direct_task, this will be equal to
``[direct_task]``.
"""
return super(Spout, self).emit(
tup,
tup_id=tup_id,
stream=stream,
direct_task=direct_task,
need_task_ids=need_task_ids,
) | python | def emit(
self, tup, tup_id=None, stream=None, direct_task=None, need_task_ids=False
):
"""Emit a spout Tuple message.
:param tup: the Tuple to send to Storm, should contain only
JSON-serializable data.
:type tup: list or tuple
:param tup_id: the ID for the Tuple. Leave this blank for an
unreliable emit.
:type tup_id: str
:param stream: ID of the stream this Tuple should be emitted to.
Leave empty to emit to the default stream.
:type stream: str
:param direct_task: the task to send the Tuple to if performing a
direct emit.
:type direct_task: int
:param need_task_ids: indicate whether or not you'd like the task IDs
the Tuple was emitted (default: ``False``).
:type need_task_ids: bool
:returns: ``None``, unless ``need_task_ids=True``, in which case it will
be a ``list`` of task IDs that the Tuple was sent to if. Note
that when specifying direct_task, this will be equal to
``[direct_task]``.
"""
return super(Spout, self).emit(
tup,
tup_id=tup_id,
stream=stream,
direct_task=direct_task,
need_task_ids=need_task_ids,
) | [
"def",
"emit",
"(",
"self",
",",
"tup",
",",
"tup_id",
"=",
"None",
",",
"stream",
"=",
"None",
",",
"direct_task",
"=",
"None",
",",
"need_task_ids",
"=",
"False",
")",
":",
"return",
"super",
"(",
"Spout",
",",
"self",
")",
".",
"emit",
"(",
"tup... | Emit a spout Tuple message.
:param tup: the Tuple to send to Storm, should contain only
JSON-serializable data.
:type tup: list or tuple
:param tup_id: the ID for the Tuple. Leave this blank for an
unreliable emit.
:type tup_id: str
:param stream: ID of the stream this Tuple should be emitted to.
Leave empty to emit to the default stream.
:type stream: str
:param direct_task: the task to send the Tuple to if performing a
direct emit.
:type direct_task: int
:param need_task_ids: indicate whether or not you'd like the task IDs
the Tuple was emitted (default: ``False``).
:type need_task_ids: bool
:returns: ``None``, unless ``need_task_ids=True``, in which case it will
be a ``list`` of task IDs that the Tuple was sent to if. Note
that when specifying direct_task, this will be equal to
``[direct_task]``. | [
"Emit",
"a",
"spout",
"Tuple",
"message",
"."
] | 0f853e007c79e03cefdb4a0794423f84dce4c2f3 | https://github.com/pystorm/pystorm/blob/0f853e007c79e03cefdb4a0794423f84dce4c2f3/pystorm/spout.py#L50-L82 | train |
pystorm/pystorm | pystorm/spout.py | Spout._run | def _run(self):
"""The inside of ``run``'s infinite loop.
Separated out so it can be properly unit tested.
"""
cmd = self.read_command()
if cmd["command"] == "next":
self.next_tuple()
elif cmd["command"] == "ack":
self.ack(cmd["id"])
elif cmd["command"] == "fail":
self.fail(cmd["id"])
elif cmd["command"] == "activate":
self.activate()
elif cmd["command"] == "deactivate":
self.deactivate()
else:
self.logger.error("Received invalid command from Storm: %r", cmd)
self.send_message({"command": "sync"}) | python | def _run(self):
"""The inside of ``run``'s infinite loop.
Separated out so it can be properly unit tested.
"""
cmd = self.read_command()
if cmd["command"] == "next":
self.next_tuple()
elif cmd["command"] == "ack":
self.ack(cmd["id"])
elif cmd["command"] == "fail":
self.fail(cmd["id"])
elif cmd["command"] == "activate":
self.activate()
elif cmd["command"] == "deactivate":
self.deactivate()
else:
self.logger.error("Received invalid command from Storm: %r", cmd)
self.send_message({"command": "sync"}) | [
"def",
"_run",
"(",
"self",
")",
":",
"cmd",
"=",
"self",
".",
"read_command",
"(",
")",
"if",
"cmd",
"[",
"\"command\"",
"]",
"==",
"\"next\"",
":",
"self",
".",
"next_tuple",
"(",
")",
"elif",
"cmd",
"[",
"\"command\"",
"]",
"==",
"\"ack\"",
":",
... | The inside of ``run``'s infinite loop.
Separated out so it can be properly unit tested. | [
"The",
"inside",
"of",
"run",
"s",
"infinite",
"loop",
"."
] | 0f853e007c79e03cefdb4a0794423f84dce4c2f3 | https://github.com/pystorm/pystorm/blob/0f853e007c79e03cefdb4a0794423f84dce4c2f3/pystorm/spout.py#L100-L118 | train |
pystorm/pystorm | pystorm/spout.py | ReliableSpout.ack | def ack(self, tup_id):
"""Called when a bolt acknowledges a Tuple in the topology.
:param tup_id: the ID of the Tuple that has been fully acknowledged in
the topology.
:type tup_id: str
"""
self.failed_tuples.pop(tup_id, None)
try:
del self.unacked_tuples[tup_id]
except KeyError:
self.logger.error("Received ack for unknown tuple ID: %r", tup_id) | python | def ack(self, tup_id):
"""Called when a bolt acknowledges a Tuple in the topology.
:param tup_id: the ID of the Tuple that has been fully acknowledged in
the topology.
:type tup_id: str
"""
self.failed_tuples.pop(tup_id, None)
try:
del self.unacked_tuples[tup_id]
except KeyError:
self.logger.error("Received ack for unknown tuple ID: %r", tup_id) | [
"def",
"ack",
"(",
"self",
",",
"tup_id",
")",
":",
"self",
".",
"failed_tuples",
".",
"pop",
"(",
"tup_id",
",",
"None",
")",
"try",
":",
"del",
"self",
".",
"unacked_tuples",
"[",
"tup_id",
"]",
"except",
"KeyError",
":",
"self",
".",
"logger",
"."... | Called when a bolt acknowledges a Tuple in the topology.
:param tup_id: the ID of the Tuple that has been fully acknowledged in
the topology.
:type tup_id: str | [
"Called",
"when",
"a",
"bolt",
"acknowledges",
"a",
"Tuple",
"in",
"the",
"topology",
"."
] | 0f853e007c79e03cefdb4a0794423f84dce4c2f3 | https://github.com/pystorm/pystorm/blob/0f853e007c79e03cefdb4a0794423f84dce4c2f3/pystorm/spout.py#L137-L148 | train |
pystorm/pystorm | pystorm/spout.py | ReliableSpout.fail | def fail(self, tup_id):
"""Called when a Tuple fails in the topology
A reliable spout will replay a failed tuple up to ``max_fails`` times.
:param tup_id: the ID of the Tuple that has failed in the topology
either due to a bolt calling ``fail()`` or a Tuple
timing out.
:type tup_id: str
"""
saved_args = self.unacked_tuples.get(tup_id)
if saved_args is None:
self.logger.error("Received fail for unknown tuple ID: %r", tup_id)
return
tup, stream, direct_task, need_task_ids = saved_args
if self.failed_tuples[tup_id] < self.max_fails:
self.emit(
tup,
tup_id=tup_id,
stream=stream,
direct_task=direct_task,
need_task_ids=need_task_ids,
)
self.failed_tuples[tup_id] += 1
else:
# Just pretend we got an ack when we exceed retry limit
self.logger.info(
"Acking tuple ID %r after it exceeded retry limit " "(%r)",
tup_id,
self.max_fails,
)
self.ack(tup_id) | python | def fail(self, tup_id):
"""Called when a Tuple fails in the topology
A reliable spout will replay a failed tuple up to ``max_fails`` times.
:param tup_id: the ID of the Tuple that has failed in the topology
either due to a bolt calling ``fail()`` or a Tuple
timing out.
:type tup_id: str
"""
saved_args = self.unacked_tuples.get(tup_id)
if saved_args is None:
self.logger.error("Received fail for unknown tuple ID: %r", tup_id)
return
tup, stream, direct_task, need_task_ids = saved_args
if self.failed_tuples[tup_id] < self.max_fails:
self.emit(
tup,
tup_id=tup_id,
stream=stream,
direct_task=direct_task,
need_task_ids=need_task_ids,
)
self.failed_tuples[tup_id] += 1
else:
# Just pretend we got an ack when we exceed retry limit
self.logger.info(
"Acking tuple ID %r after it exceeded retry limit " "(%r)",
tup_id,
self.max_fails,
)
self.ack(tup_id) | [
"def",
"fail",
"(",
"self",
",",
"tup_id",
")",
":",
"saved_args",
"=",
"self",
".",
"unacked_tuples",
".",
"get",
"(",
"tup_id",
")",
"if",
"saved_args",
"is",
"None",
":",
"self",
".",
"logger",
".",
"error",
"(",
"\"Received fail for unknown tuple ID: %r\... | Called when a Tuple fails in the topology
A reliable spout will replay a failed tuple up to ``max_fails`` times.
:param tup_id: the ID of the Tuple that has failed in the topology
either due to a bolt calling ``fail()`` or a Tuple
timing out.
:type tup_id: str | [
"Called",
"when",
"a",
"Tuple",
"fails",
"in",
"the",
"topology"
] | 0f853e007c79e03cefdb4a0794423f84dce4c2f3 | https://github.com/pystorm/pystorm/blob/0f853e007c79e03cefdb4a0794423f84dce4c2f3/pystorm/spout.py#L150-L181 | train |
pystorm/pystorm | pystorm/spout.py | ReliableSpout.emit | def emit(
self, tup, tup_id=None, stream=None, direct_task=None, need_task_ids=False
):
"""Emit a spout Tuple & add metadata about it to `unacked_tuples`.
In order for this to work, `tup_id` is a required parameter.
See :meth:`Bolt.emit`.
"""
if tup_id is None:
raise ValueError(
"You must provide a tuple ID when emitting with a "
"ReliableSpout in order for the tuple to be "
"tracked."
)
args = (tup, stream, direct_task, need_task_ids)
self.unacked_tuples[tup_id] = args
return super(ReliableSpout, self).emit(
tup,
tup_id=tup_id,
stream=stream,
direct_task=direct_task,
need_task_ids=need_task_ids,
) | python | def emit(
self, tup, tup_id=None, stream=None, direct_task=None, need_task_ids=False
):
"""Emit a spout Tuple & add metadata about it to `unacked_tuples`.
In order for this to work, `tup_id` is a required parameter.
See :meth:`Bolt.emit`.
"""
if tup_id is None:
raise ValueError(
"You must provide a tuple ID when emitting with a "
"ReliableSpout in order for the tuple to be "
"tracked."
)
args = (tup, stream, direct_task, need_task_ids)
self.unacked_tuples[tup_id] = args
return super(ReliableSpout, self).emit(
tup,
tup_id=tup_id,
stream=stream,
direct_task=direct_task,
need_task_ids=need_task_ids,
) | [
"def",
"emit",
"(",
"self",
",",
"tup",
",",
"tup_id",
"=",
"None",
",",
"stream",
"=",
"None",
",",
"direct_task",
"=",
"None",
",",
"need_task_ids",
"=",
"False",
")",
":",
"if",
"tup_id",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"You must pr... | Emit a spout Tuple & add metadata about it to `unacked_tuples`.
In order for this to work, `tup_id` is a required parameter.
See :meth:`Bolt.emit`. | [
"Emit",
"a",
"spout",
"Tuple",
"&",
"add",
"metadata",
"about",
"it",
"to",
"unacked_tuples",
"."
] | 0f853e007c79e03cefdb4a0794423f84dce4c2f3 | https://github.com/pystorm/pystorm/blob/0f853e007c79e03cefdb4a0794423f84dce4c2f3/pystorm/spout.py#L183-L206 | train |
pystorm/pystorm | pystorm/component.py | remote_pdb_handler | def remote_pdb_handler(signum, frame):
""" Handler to drop us into a remote debugger upon receiving SIGUSR1 """
try:
from remote_pdb import RemotePdb
rdb = RemotePdb(host="127.0.0.1", port=0)
rdb.set_trace(frame=frame)
except ImportError:
log.warning(
"remote_pdb unavailable. Please install remote_pdb to "
"allow remote debugging."
)
# Restore signal handler for later
signal.signal(signum, remote_pdb_handler) | python | def remote_pdb_handler(signum, frame):
""" Handler to drop us into a remote debugger upon receiving SIGUSR1 """
try:
from remote_pdb import RemotePdb
rdb = RemotePdb(host="127.0.0.1", port=0)
rdb.set_trace(frame=frame)
except ImportError:
log.warning(
"remote_pdb unavailable. Please install remote_pdb to "
"allow remote debugging."
)
# Restore signal handler for later
signal.signal(signum, remote_pdb_handler) | [
"def",
"remote_pdb_handler",
"(",
"signum",
",",
"frame",
")",
":",
"try",
":",
"from",
"remote_pdb",
"import",
"RemotePdb",
"rdb",
"=",
"RemotePdb",
"(",
"host",
"=",
"\"127.0.0.1\"",
",",
"port",
"=",
"0",
")",
"rdb",
".",
"set_trace",
"(",
"frame",
"=... | Handler to drop us into a remote debugger upon receiving SIGUSR1 | [
"Handler",
"to",
"drop",
"us",
"into",
"a",
"remote",
"debugger",
"upon",
"receiving",
"SIGUSR1"
] | 0f853e007c79e03cefdb4a0794423f84dce4c2f3 | https://github.com/pystorm/pystorm/blob/0f853e007c79e03cefdb4a0794423f84dce4c2f3/pystorm/component.py#L50-L63 | train |
pystorm/pystorm | pystorm/component.py | StormHandler.emit | def emit(self, record):
"""
Emit a record.
If a formatter is specified, it is used to format the record.
If exception information is present, it is formatted using
traceback.print_exception and sent to Storm.
"""
try:
msg = self.format(record)
level = _STORM_LOG_LEVELS.get(record.levelname.lower(), _STORM_LOG_INFO)
self.serializer.send_message(
{"command": "log", "msg": str(msg), "level": level}
)
except Exception:
self.handleError(record) | python | def emit(self, record):
"""
Emit a record.
If a formatter is specified, it is used to format the record.
If exception information is present, it is formatted using
traceback.print_exception and sent to Storm.
"""
try:
msg = self.format(record)
level = _STORM_LOG_LEVELS.get(record.levelname.lower(), _STORM_LOG_INFO)
self.serializer.send_message(
{"command": "log", "msg": str(msg), "level": level}
)
except Exception:
self.handleError(record) | [
"def",
"emit",
"(",
"self",
",",
"record",
")",
":",
"try",
":",
"msg",
"=",
"self",
".",
"format",
"(",
"record",
")",
"level",
"=",
"_STORM_LOG_LEVELS",
".",
"get",
"(",
"record",
".",
"levelname",
".",
"lower",
"(",
")",
",",
"_STORM_LOG_INFO",
")... | Emit a record.
If a formatter is specified, it is used to format the record.
If exception information is present, it is formatted using
traceback.print_exception and sent to Storm. | [
"Emit",
"a",
"record",
"."
] | 0f853e007c79e03cefdb4a0794423f84dce4c2f3 | https://github.com/pystorm/pystorm/blob/0f853e007c79e03cefdb4a0794423f84dce4c2f3/pystorm/component.py#L78-L93 | train |
pystorm/pystorm | pystorm/component.py | Component._setup_component | def _setup_component(self, storm_conf, context):
"""Add helpful instance variables to component after initial handshake
with Storm. Also configure logging.
"""
self.topology_name = storm_conf.get("topology.name", "")
self.task_id = context.get("taskid", "")
self.component_name = context.get("componentid")
# If using Storm before 0.10.0 componentid is not available
if self.component_name is None:
self.component_name = context.get("task->component", {}).get(
str(self.task_id), ""
)
self.debug = storm_conf.get("topology.debug", False)
self.storm_conf = storm_conf
self.context = context
# Set up logging
self.logger = logging.getLogger(".".join((__name__, self.component_name)))
log_path = self.storm_conf.get("pystorm.log.path")
log_file_name = self.storm_conf.get(
"pystorm.log.file",
"pystorm_{topology_name}" "_{component_name}" "_{task_id}" "_{pid}.log",
)
root_log = logging.getLogger()
log_level = self.storm_conf.get("pystorm.log.level", "info")
if log_path:
max_bytes = self.storm_conf.get("pystorm.log.max_bytes", 1000000) # 1 MB
backup_count = self.storm_conf.get("pystorm.log.backup_count", 10)
log_file = join(
log_path,
(
log_file_name.format(
topology_name=self.topology_name,
component_name=self.component_name,
task_id=self.task_id,
pid=self.pid,
)
),
)
handler = RotatingFileHandler(
log_file, maxBytes=max_bytes, backupCount=backup_count
)
log_format = self.storm_conf.get(
"pystorm.log.format",
"%(asctime)s - %(name)s - " "%(levelname)s - %(message)s",
)
else:
self.log(
"pystorm StormHandler logging enabled, so all messages at "
'levels greater than "pystorm.log.level" ({}) will be sent'
" to Storm.".format(log_level)
)
handler = StormHandler(self.serializer)
log_format = self.storm_conf.get(
"pystorm.log.format", "%(asctime)s - %(name)s - " "%(message)s"
)
formatter = logging.Formatter(log_format)
log_level = _PYTHON_LOG_LEVELS.get(log_level, logging.INFO)
if self.debug:
# potentially override logging that was provided if
# topology.debug was set to true
log_level = logging.DEBUG
handler.setLevel(log_level)
handler.setFormatter(formatter)
root_log.addHandler(handler)
self.logger.setLevel(log_level)
logging.getLogger("pystorm").setLevel(log_level)
# Redirect stdout to ensure that print statements/functions
# won't disrupt the multilang protocol
if self.serializer.output_stream == sys.stdout:
sys.stdout = LogStream(logging.getLogger("pystorm.stdout")) | python | def _setup_component(self, storm_conf, context):
"""Add helpful instance variables to component after initial handshake
with Storm. Also configure logging.
"""
self.topology_name = storm_conf.get("topology.name", "")
self.task_id = context.get("taskid", "")
self.component_name = context.get("componentid")
# If using Storm before 0.10.0 componentid is not available
if self.component_name is None:
self.component_name = context.get("task->component", {}).get(
str(self.task_id), ""
)
self.debug = storm_conf.get("topology.debug", False)
self.storm_conf = storm_conf
self.context = context
# Set up logging
self.logger = logging.getLogger(".".join((__name__, self.component_name)))
log_path = self.storm_conf.get("pystorm.log.path")
log_file_name = self.storm_conf.get(
"pystorm.log.file",
"pystorm_{topology_name}" "_{component_name}" "_{task_id}" "_{pid}.log",
)
root_log = logging.getLogger()
log_level = self.storm_conf.get("pystorm.log.level", "info")
if log_path:
max_bytes = self.storm_conf.get("pystorm.log.max_bytes", 1000000) # 1 MB
backup_count = self.storm_conf.get("pystorm.log.backup_count", 10)
log_file = join(
log_path,
(
log_file_name.format(
topology_name=self.topology_name,
component_name=self.component_name,
task_id=self.task_id,
pid=self.pid,
)
),
)
handler = RotatingFileHandler(
log_file, maxBytes=max_bytes, backupCount=backup_count
)
log_format = self.storm_conf.get(
"pystorm.log.format",
"%(asctime)s - %(name)s - " "%(levelname)s - %(message)s",
)
else:
self.log(
"pystorm StormHandler logging enabled, so all messages at "
'levels greater than "pystorm.log.level" ({}) will be sent'
" to Storm.".format(log_level)
)
handler = StormHandler(self.serializer)
log_format = self.storm_conf.get(
"pystorm.log.format", "%(asctime)s - %(name)s - " "%(message)s"
)
formatter = logging.Formatter(log_format)
log_level = _PYTHON_LOG_LEVELS.get(log_level, logging.INFO)
if self.debug:
# potentially override logging that was provided if
# topology.debug was set to true
log_level = logging.DEBUG
handler.setLevel(log_level)
handler.setFormatter(formatter)
root_log.addHandler(handler)
self.logger.setLevel(log_level)
logging.getLogger("pystorm").setLevel(log_level)
# Redirect stdout to ensure that print statements/functions
# won't disrupt the multilang protocol
if self.serializer.output_stream == sys.stdout:
sys.stdout = LogStream(logging.getLogger("pystorm.stdout")) | [
"def",
"_setup_component",
"(",
"self",
",",
"storm_conf",
",",
"context",
")",
":",
"self",
".",
"topology_name",
"=",
"storm_conf",
".",
"get",
"(",
"\"topology.name\"",
",",
"\"\"",
")",
"self",
".",
"task_id",
"=",
"context",
".",
"get",
"(",
"\"taskid... | Add helpful instance variables to component after initial handshake
with Storm. Also configure logging. | [
"Add",
"helpful",
"instance",
"variables",
"to",
"component",
"after",
"initial",
"handshake",
"with",
"Storm",
".",
"Also",
"configure",
"logging",
"."
] | 0f853e007c79e03cefdb4a0794423f84dce4c2f3 | https://github.com/pystorm/pystorm/blob/0f853e007c79e03cefdb4a0794423f84dce4c2f3/pystorm/component.py#L230-L300 | train |
pystorm/pystorm | pystorm/component.py | Component.read_handshake | def read_handshake(self):
"""Read and process an initial handshake message from Storm."""
msg = self.read_message()
pid_dir, _conf, _context = msg["pidDir"], msg["conf"], msg["context"]
# Write a blank PID file out to the pidDir
open(join(pid_dir, str(self.pid)), "w").close()
self.send_message({"pid": self.pid})
return _conf, _context | python | def read_handshake(self):
"""Read and process an initial handshake message from Storm."""
msg = self.read_message()
pid_dir, _conf, _context = msg["pidDir"], msg["conf"], msg["context"]
# Write a blank PID file out to the pidDir
open(join(pid_dir, str(self.pid)), "w").close()
self.send_message({"pid": self.pid})
return _conf, _context | [
"def",
"read_handshake",
"(",
"self",
")",
":",
"msg",
"=",
"self",
".",
"read_message",
"(",
")",
"pid_dir",
",",
"_conf",
",",
"_context",
"=",
"msg",
"[",
"\"pidDir\"",
"]",
",",
"msg",
"[",
"\"conf\"",
"]",
",",
"msg",
"[",
"\"context\"",
"]",
"#... | Read and process an initial handshake message from Storm. | [
"Read",
"and",
"process",
"an",
"initial",
"handshake",
"message",
"from",
"Storm",
"."
] | 0f853e007c79e03cefdb4a0794423f84dce4c2f3 | https://github.com/pystorm/pystorm/blob/0f853e007c79e03cefdb4a0794423f84dce4c2f3/pystorm/component.py#L326-L335 | train |
pystorm/pystorm | pystorm/component.py | Component.send_message | def send_message(self, message):
"""Send a message to Storm via stdout."""
if not isinstance(message, dict):
logger = self.logger if self.logger else log
logger.error(
"%s.%d attempted to send a non dict message to Storm: " "%r",
self.component_name,
self.pid,
message,
)
return
self.serializer.send_message(message) | python | def send_message(self, message):
"""Send a message to Storm via stdout."""
if not isinstance(message, dict):
logger = self.logger if self.logger else log
logger.error(
"%s.%d attempted to send a non dict message to Storm: " "%r",
self.component_name,
self.pid,
message,
)
return
self.serializer.send_message(message) | [
"def",
"send_message",
"(",
"self",
",",
"message",
")",
":",
"if",
"not",
"isinstance",
"(",
"message",
",",
"dict",
")",
":",
"logger",
"=",
"self",
".",
"logger",
"if",
"self",
".",
"logger",
"else",
"log",
"logger",
".",
"error",
"(",
"\"%s.%d atte... | Send a message to Storm via stdout. | [
"Send",
"a",
"message",
"to",
"Storm",
"via",
"stdout",
"."
] | 0f853e007c79e03cefdb4a0794423f84dce4c2f3 | https://github.com/pystorm/pystorm/blob/0f853e007c79e03cefdb4a0794423f84dce4c2f3/pystorm/component.py#L337-L348 | train |
pystorm/pystorm | pystorm/component.py | Component.raise_exception | def raise_exception(self, exception, tup=None):
"""Report an exception back to Storm via logging.
:param exception: a Python exception.
:param tup: a :class:`Tuple` object.
"""
if tup:
message = (
"Python {exception_name} raised while processing Tuple "
"{tup!r}\n{traceback}"
)
else:
message = "Python {exception_name} raised\n{traceback}"
message = message.format(
exception_name=exception.__class__.__name__, tup=tup, traceback=format_exc()
)
self.send_message({"command": "error", "msg": str(message)})
self.send_message({"command": "sync"}) | python | def raise_exception(self, exception, tup=None):
"""Report an exception back to Storm via logging.
:param exception: a Python exception.
:param tup: a :class:`Tuple` object.
"""
if tup:
message = (
"Python {exception_name} raised while processing Tuple "
"{tup!r}\n{traceback}"
)
else:
message = "Python {exception_name} raised\n{traceback}"
message = message.format(
exception_name=exception.__class__.__name__, tup=tup, traceback=format_exc()
)
self.send_message({"command": "error", "msg": str(message)})
self.send_message({"command": "sync"}) | [
"def",
"raise_exception",
"(",
"self",
",",
"exception",
",",
"tup",
"=",
"None",
")",
":",
"if",
"tup",
":",
"message",
"=",
"(",
"\"Python {exception_name} raised while processing Tuple \"",
"\"{tup!r}\\n{traceback}\"",
")",
"else",
":",
"message",
"=",
"\"Python ... | Report an exception back to Storm via logging.
:param exception: a Python exception.
:param tup: a :class:`Tuple` object. | [
"Report",
"an",
"exception",
"back",
"to",
"Storm",
"via",
"logging",
"."
] | 0f853e007c79e03cefdb4a0794423f84dce4c2f3 | https://github.com/pystorm/pystorm/blob/0f853e007c79e03cefdb4a0794423f84dce4c2f3/pystorm/component.py#L350-L367 | train |
pystorm/pystorm | pystorm/component.py | Component.log | def log(self, message, level=None):
"""Log a message to Storm optionally providing a logging level.
:param message: the log message to send to Storm.
:type message: str
:param level: the logging level that Storm should use when writing the
``message``. Can be one of: trace, debug, info, warn, or
error (default: ``info``).
:type level: str
.. warning::
This will send your message to Storm regardless of what level you
specify. In almost all cases, you are better of using
``Component.logger`` and not setting ``pystorm.log.path``, because
that will use a :class:`pystorm.component.StormHandler` to do the
filtering on the Python side (instead of on the Java side after taking
the time to serialize your message and send it to Storm).
"""
level = _STORM_LOG_LEVELS.get(level, _STORM_LOG_INFO)
self.send_message({"command": "log", "msg": str(message), "level": level}) | python | def log(self, message, level=None):
"""Log a message to Storm optionally providing a logging level.
:param message: the log message to send to Storm.
:type message: str
:param level: the logging level that Storm should use when writing the
``message``. Can be one of: trace, debug, info, warn, or
error (default: ``info``).
:type level: str
.. warning::
This will send your message to Storm regardless of what level you
specify. In almost all cases, you are better of using
``Component.logger`` and not setting ``pystorm.log.path``, because
that will use a :class:`pystorm.component.StormHandler` to do the
filtering on the Python side (instead of on the Java side after taking
the time to serialize your message and send it to Storm).
"""
level = _STORM_LOG_LEVELS.get(level, _STORM_LOG_INFO)
self.send_message({"command": "log", "msg": str(message), "level": level}) | [
"def",
"log",
"(",
"self",
",",
"message",
",",
"level",
"=",
"None",
")",
":",
"level",
"=",
"_STORM_LOG_LEVELS",
".",
"get",
"(",
"level",
",",
"_STORM_LOG_INFO",
")",
"self",
".",
"send_message",
"(",
"{",
"\"command\"",
":",
"\"log\"",
",",
"\"msg\""... | Log a message to Storm optionally providing a logging level.
:param message: the log message to send to Storm.
:type message: str
:param level: the logging level that Storm should use when writing the
``message``. Can be one of: trace, debug, info, warn, or
error (default: ``info``).
:type level: str
.. warning::
This will send your message to Storm regardless of what level you
specify. In almost all cases, you are better of using
``Component.logger`` and not setting ``pystorm.log.path``, because
that will use a :class:`pystorm.component.StormHandler` to do the
filtering on the Python side (instead of on the Java side after taking
the time to serialize your message and send it to Storm). | [
"Log",
"a",
"message",
"to",
"Storm",
"optionally",
"providing",
"a",
"logging",
"level",
"."
] | 0f853e007c79e03cefdb4a0794423f84dce4c2f3 | https://github.com/pystorm/pystorm/blob/0f853e007c79e03cefdb4a0794423f84dce4c2f3/pystorm/component.py#L384-L404 | train |
pystorm/pystorm | pystorm/component.py | Component.emit | def emit(
self,
tup,
tup_id=None,
stream=None,
anchors=None,
direct_task=None,
need_task_ids=False,
):
"""Emit a new Tuple to a stream.
:param tup: the Tuple payload to send to Storm, should contain only
JSON-serializable data.
:type tup: :class:`list` or :class:`pystorm.component.Tuple`
:param tup_id: the ID for the Tuple. If omitted by a
:class:`pystorm.spout.Spout`, this emit will be
unreliable.
:type tup_id: str
:param stream: the ID of the stream to emit this Tuple to. Specify
``None`` to emit to default stream.
:type stream: str
:param anchors: IDs the Tuples (or
:class:`pystorm.component.Tuple` instances)
which the emitted Tuples should be anchored to. This is
only passed by :class:`pystorm.bolt.Bolt`.
:type anchors: list
:param direct_task: the task to send the Tuple to.
:type direct_task: int
:param need_task_ids: indicate whether or not you'd like the task IDs
the Tuple was emitted (default: ``False``).
:type need_task_ids: bool
:returns: ``None``, unless ``need_task_ids=True``, in which case it will
be a ``list`` of task IDs that the Tuple was sent to if. Note
that when specifying direct_task, this will be equal to
``[direct_task]``.
"""
if not isinstance(tup, (list, tuple)):
raise TypeError(
"All Tuples must be either lists or tuples, "
"received {!r} instead.".format(type(tup))
)
msg = {"command": "emit", "tuple": tup}
downstream_task_ids = None
if anchors is not None:
msg["anchors"] = anchors
if tup_id is not None:
msg["id"] = tup_id
if stream is not None:
msg["stream"] = stream
if direct_task is not None:
msg["task"] = direct_task
if need_task_ids:
downstream_task_ids = [direct_task]
if not need_task_ids:
# only need to send on False, Storm's default is True
msg["need_task_ids"] = need_task_ids
if need_task_ids and direct_task is None:
# Use both locks so we ensure send_message and read_task_ids are for
# same emit
with self._reader_lock, self._writer_lock:
self.send_message(msg)
downstream_task_ids = self.read_task_ids()
# No locks necessary in simple case because serializer will acquire
# write lock itself
else:
self.send_message(msg)
return downstream_task_ids | python | def emit(
self,
tup,
tup_id=None,
stream=None,
anchors=None,
direct_task=None,
need_task_ids=False,
):
"""Emit a new Tuple to a stream.
:param tup: the Tuple payload to send to Storm, should contain only
JSON-serializable data.
:type tup: :class:`list` or :class:`pystorm.component.Tuple`
:param tup_id: the ID for the Tuple. If omitted by a
:class:`pystorm.spout.Spout`, this emit will be
unreliable.
:type tup_id: str
:param stream: the ID of the stream to emit this Tuple to. Specify
``None`` to emit to default stream.
:type stream: str
:param anchors: IDs the Tuples (or
:class:`pystorm.component.Tuple` instances)
which the emitted Tuples should be anchored to. This is
only passed by :class:`pystorm.bolt.Bolt`.
:type anchors: list
:param direct_task: the task to send the Tuple to.
:type direct_task: int
:param need_task_ids: indicate whether or not you'd like the task IDs
the Tuple was emitted (default: ``False``).
:type need_task_ids: bool
:returns: ``None``, unless ``need_task_ids=True``, in which case it will
be a ``list`` of task IDs that the Tuple was sent to if. Note
that when specifying direct_task, this will be equal to
``[direct_task]``.
"""
if not isinstance(tup, (list, tuple)):
raise TypeError(
"All Tuples must be either lists or tuples, "
"received {!r} instead.".format(type(tup))
)
msg = {"command": "emit", "tuple": tup}
downstream_task_ids = None
if anchors is not None:
msg["anchors"] = anchors
if tup_id is not None:
msg["id"] = tup_id
if stream is not None:
msg["stream"] = stream
if direct_task is not None:
msg["task"] = direct_task
if need_task_ids:
downstream_task_ids = [direct_task]
if not need_task_ids:
# only need to send on False, Storm's default is True
msg["need_task_ids"] = need_task_ids
if need_task_ids and direct_task is None:
# Use both locks so we ensure send_message and read_task_ids are for
# same emit
with self._reader_lock, self._writer_lock:
self.send_message(msg)
downstream_task_ids = self.read_task_ids()
# No locks necessary in simple case because serializer will acquire
# write lock itself
else:
self.send_message(msg)
return downstream_task_ids | [
"def",
"emit",
"(",
"self",
",",
"tup",
",",
"tup_id",
"=",
"None",
",",
"stream",
"=",
"None",
",",
"anchors",
"=",
"None",
",",
"direct_task",
"=",
"None",
",",
"need_task_ids",
"=",
"False",
",",
")",
":",
"if",
"not",
"isinstance",
"(",
"tup",
... | Emit a new Tuple to a stream.
:param tup: the Tuple payload to send to Storm, should contain only
JSON-serializable data.
:type tup: :class:`list` or :class:`pystorm.component.Tuple`
:param tup_id: the ID for the Tuple. If omitted by a
:class:`pystorm.spout.Spout`, this emit will be
unreliable.
:type tup_id: str
:param stream: the ID of the stream to emit this Tuple to. Specify
``None`` to emit to default stream.
:type stream: str
:param anchors: IDs the Tuples (or
:class:`pystorm.component.Tuple` instances)
which the emitted Tuples should be anchored to. This is
only passed by :class:`pystorm.bolt.Bolt`.
:type anchors: list
:param direct_task: the task to send the Tuple to.
:type direct_task: int
:param need_task_ids: indicate whether or not you'd like the task IDs
the Tuple was emitted (default: ``False``).
:type need_task_ids: bool
:returns: ``None``, unless ``need_task_ids=True``, in which case it will
be a ``list`` of task IDs that the Tuple was sent to if. Note
that when specifying direct_task, this will be equal to
``[direct_task]``. | [
"Emit",
"a",
"new",
"Tuple",
"to",
"a",
"stream",
"."
] | 0f853e007c79e03cefdb4a0794423f84dce4c2f3 | https://github.com/pystorm/pystorm/blob/0f853e007c79e03cefdb4a0794423f84dce4c2f3/pystorm/component.py#L406-L478 | train |
pystorm/pystorm | pystorm/component.py | Component.run | def run(self):
"""Main run loop for all components.
Performs initial handshake with Storm and reads Tuples handing them off
to subclasses. Any exceptions are caught and logged back to Storm
prior to the Python process exiting.
.. warning::
Subclasses should **not** override this method.
"""
storm_conf, context = self.read_handshake()
self._setup_component(storm_conf, context)
self.initialize(storm_conf, context)
while True:
try:
self._run()
except StormWentAwayError:
log.info("Exiting because parent Storm process went away.")
self._exit(2)
except Exception as e:
log_msg = "Exception in {}.run()".format(self.__class__.__name__)
exc_info = sys.exc_info()
try:
self.logger.error(log_msg, exc_info=True)
self._handle_run_exception(e)
except StormWentAwayError:
log.error(log_msg, exc_info=exc_info)
log.info("Exiting because parent Storm process went away.")
self._exit(2)
except:
log.error(log_msg, exc_info=exc_info)
log.error(
"While trying to handle previous exception...",
exc_info=sys.exc_info(),
)
if self.exit_on_exception:
self._exit(1) | python | def run(self):
"""Main run loop for all components.
Performs initial handshake with Storm and reads Tuples handing them off
to subclasses. Any exceptions are caught and logged back to Storm
prior to the Python process exiting.
.. warning::
Subclasses should **not** override this method.
"""
storm_conf, context = self.read_handshake()
self._setup_component(storm_conf, context)
self.initialize(storm_conf, context)
while True:
try:
self._run()
except StormWentAwayError:
log.info("Exiting because parent Storm process went away.")
self._exit(2)
except Exception as e:
log_msg = "Exception in {}.run()".format(self.__class__.__name__)
exc_info = sys.exc_info()
try:
self.logger.error(log_msg, exc_info=True)
self._handle_run_exception(e)
except StormWentAwayError:
log.error(log_msg, exc_info=exc_info)
log.info("Exiting because parent Storm process went away.")
self._exit(2)
except:
log.error(log_msg, exc_info=exc_info)
log.error(
"While trying to handle previous exception...",
exc_info=sys.exc_info(),
)
if self.exit_on_exception:
self._exit(1) | [
"def",
"run",
"(",
"self",
")",
":",
"storm_conf",
",",
"context",
"=",
"self",
".",
"read_handshake",
"(",
")",
"self",
".",
"_setup_component",
"(",
"storm_conf",
",",
"context",
")",
"self",
".",
"initialize",
"(",
"storm_conf",
",",
"context",
")",
"... | Main run loop for all components.
Performs initial handshake with Storm and reads Tuples handing them off
to subclasses. Any exceptions are caught and logged back to Storm
prior to the Python process exiting.
.. warning::
Subclasses should **not** override this method. | [
"Main",
"run",
"loop",
"for",
"all",
"components",
"."
] | 0f853e007c79e03cefdb4a0794423f84dce4c2f3 | https://github.com/pystorm/pystorm/blob/0f853e007c79e03cefdb4a0794423f84dce4c2f3/pystorm/component.py#L504-L542 | train |
pystorm/pystorm | pystorm/component.py | Component._exit | def _exit(self, status_code):
"""Properly kill Python process including zombie threads."""
# If there are active threads still running infinite loops, sys.exit
# won't kill them but os._exit will. os._exit skips calling cleanup
# handlers, flushing stdio buffers, etc.
exit_func = os._exit if threading.active_count() > 1 else sys.exit
exit_func(status_code) | python | def _exit(self, status_code):
"""Properly kill Python process including zombie threads."""
# If there are active threads still running infinite loops, sys.exit
# won't kill them but os._exit will. os._exit skips calling cleanup
# handlers, flushing stdio buffers, etc.
exit_func = os._exit if threading.active_count() > 1 else sys.exit
exit_func(status_code) | [
"def",
"_exit",
"(",
"self",
",",
"status_code",
")",
":",
"# If there are active threads still running infinite loops, sys.exit",
"# won't kill them but os._exit will. os._exit skips calling cleanup",
"# handlers, flushing stdio buffers, etc.",
"exit_func",
"=",
"os",
".",
"_exit",
... | Properly kill Python process including zombie threads. | [
"Properly",
"kill",
"Python",
"process",
"including",
"zombie",
"threads",
"."
] | 0f853e007c79e03cefdb4a0794423f84dce4c2f3 | https://github.com/pystorm/pystorm/blob/0f853e007c79e03cefdb4a0794423f84dce4c2f3/pystorm/component.py#L544-L550 | train |
pystorm/pystorm | pystorm/serializers/json_serializer.py | JSONSerializer._wrap_stream | def _wrap_stream(stream):
"""Returns a TextIOWrapper around the given stream that handles UTF-8
encoding/decoding.
"""
if hasattr(stream, "buffer"):
return io.TextIOWrapper(stream.buffer, encoding="utf-8")
elif hasattr(stream, "readable"):
return io.TextIOWrapper(stream, encoding="utf-8")
# Python 2.x stdin and stdout are just files
else:
return io.open(stream.fileno(), mode=stream.mode, encoding="utf-8") | python | def _wrap_stream(stream):
"""Returns a TextIOWrapper around the given stream that handles UTF-8
encoding/decoding.
"""
if hasattr(stream, "buffer"):
return io.TextIOWrapper(stream.buffer, encoding="utf-8")
elif hasattr(stream, "readable"):
return io.TextIOWrapper(stream, encoding="utf-8")
# Python 2.x stdin and stdout are just files
else:
return io.open(stream.fileno(), mode=stream.mode, encoding="utf-8") | [
"def",
"_wrap_stream",
"(",
"stream",
")",
":",
"if",
"hasattr",
"(",
"stream",
",",
"\"buffer\"",
")",
":",
"return",
"io",
".",
"TextIOWrapper",
"(",
"stream",
".",
"buffer",
",",
"encoding",
"=",
"\"utf-8\"",
")",
"elif",
"hasattr",
"(",
"stream",
","... | Returns a TextIOWrapper around the given stream that handles UTF-8
encoding/decoding. | [
"Returns",
"a",
"TextIOWrapper",
"around",
"the",
"given",
"stream",
"that",
"handles",
"UTF",
"-",
"8",
"encoding",
"/",
"decoding",
"."
] | 0f853e007c79e03cefdb4a0794423f84dce4c2f3 | https://github.com/pystorm/pystorm/blob/0f853e007c79e03cefdb4a0794423f84dce4c2f3/pystorm/serializers/json_serializer.py#L27-L37 | train |
pystorm/pystorm | pystorm/serializers/json_serializer.py | JSONSerializer.read_message | def read_message(self):
"""The Storm multilang protocol consists of JSON messages followed by
a newline and "end\n".
All of Storm's messages (for either bolts or spouts) should be of the
form::
'<command or task_id form prior emit>\\nend\\n'
Command example, an incoming Tuple to a bolt::
'{ "id": "-6955786537413359385", "comp": "1", "stream": "1", "task": 9, "tuple": ["snow white and the seven dwarfs", "field2", 3]}\\nend\\n'
Command example for a spout to emit its next Tuple::
'{"command": "next"}\\nend\\n'
Example, the task IDs a prior emit was sent to::
'[12, 22, 24]\\nend\\n'
The edge case of where we read ``''`` from ``input_stream`` indicating
EOF, usually means that communication with the supervisor has been
severed.
"""
msg = ""
num_blank_lines = 0
while True:
# readline will return trailing \n so that output is unambigious, we
# should only have line == '' if we're at EOF
with self._reader_lock:
line = self.input_stream.readline()
if line == "end\n":
break
elif line == "":
raise StormWentAwayError()
elif line == "\n":
num_blank_lines += 1
if num_blank_lines % 1000 == 0:
log.warn(
"While trying to read a command or pending task "
"ID, Storm has instead sent %s '\\n' messages.",
num_blank_lines,
)
continue
msg = "{}{}\n".format(msg, line[0:-1])
try:
return json.loads(msg)
except Exception:
log.error("JSON decode error for message: %r", msg, exc_info=True)
raise | python | def read_message(self):
"""The Storm multilang protocol consists of JSON messages followed by
a newline and "end\n".
All of Storm's messages (for either bolts or spouts) should be of the
form::
'<command or task_id form prior emit>\\nend\\n'
Command example, an incoming Tuple to a bolt::
'{ "id": "-6955786537413359385", "comp": "1", "stream": "1", "task": 9, "tuple": ["snow white and the seven dwarfs", "field2", 3]}\\nend\\n'
Command example for a spout to emit its next Tuple::
'{"command": "next"}\\nend\\n'
Example, the task IDs a prior emit was sent to::
'[12, 22, 24]\\nend\\n'
The edge case of where we read ``''`` from ``input_stream`` indicating
EOF, usually means that communication with the supervisor has been
severed.
"""
msg = ""
num_blank_lines = 0
while True:
# readline will return trailing \n so that output is unambigious, we
# should only have line == '' if we're at EOF
with self._reader_lock:
line = self.input_stream.readline()
if line == "end\n":
break
elif line == "":
raise StormWentAwayError()
elif line == "\n":
num_blank_lines += 1
if num_blank_lines % 1000 == 0:
log.warn(
"While trying to read a command or pending task "
"ID, Storm has instead sent %s '\\n' messages.",
num_blank_lines,
)
continue
msg = "{}{}\n".format(msg, line[0:-1])
try:
return json.loads(msg)
except Exception:
log.error("JSON decode error for message: %r", msg, exc_info=True)
raise | [
"def",
"read_message",
"(",
"self",
")",
":",
"msg",
"=",
"\"\"",
"num_blank_lines",
"=",
"0",
"while",
"True",
":",
"# readline will return trailing \\n so that output is unambigious, we",
"# should only have line == '' if we're at EOF",
"with",
"self",
".",
"_reader_lock",
... | The Storm multilang protocol consists of JSON messages followed by
a newline and "end\n".
All of Storm's messages (for either bolts or spouts) should be of the
form::
'<command or task_id form prior emit>\\nend\\n'
Command example, an incoming Tuple to a bolt::
'{ "id": "-6955786537413359385", "comp": "1", "stream": "1", "task": 9, "tuple": ["snow white and the seven dwarfs", "field2", 3]}\\nend\\n'
Command example for a spout to emit its next Tuple::
'{"command": "next"}\\nend\\n'
Example, the task IDs a prior emit was sent to::
'[12, 22, 24]\\nend\\n'
The edge case of where we read ``''`` from ``input_stream`` indicating
EOF, usually means that communication with the supervisor has been
severed. | [
"The",
"Storm",
"multilang",
"protocol",
"consists",
"of",
"JSON",
"messages",
"followed",
"by",
"a",
"newline",
"and",
"end",
"\\",
"n",
"."
] | 0f853e007c79e03cefdb4a0794423f84dce4c2f3 | https://github.com/pystorm/pystorm/blob/0f853e007c79e03cefdb4a0794423f84dce4c2f3/pystorm/serializers/json_serializer.py#L39-L91 | train |
pystorm/pystorm | pystorm/serializers/json_serializer.py | JSONSerializer.serialize_dict | def serialize_dict(self, msg_dict):
"""Serialize to JSON a message dictionary."""
serialized = json.dumps(msg_dict, namedtuple_as_object=False)
if PY2:
serialized = serialized.decode("utf-8")
serialized = "{}\nend\n".format(serialized)
return serialized | python | def serialize_dict(self, msg_dict):
"""Serialize to JSON a message dictionary."""
serialized = json.dumps(msg_dict, namedtuple_as_object=False)
if PY2:
serialized = serialized.decode("utf-8")
serialized = "{}\nend\n".format(serialized)
return serialized | [
"def",
"serialize_dict",
"(",
"self",
",",
"msg_dict",
")",
":",
"serialized",
"=",
"json",
".",
"dumps",
"(",
"msg_dict",
",",
"namedtuple_as_object",
"=",
"False",
")",
"if",
"PY2",
":",
"serialized",
"=",
"serialized",
".",
"decode",
"(",
"\"utf-8\"",
"... | Serialize to JSON a message dictionary. | [
"Serialize",
"to",
"JSON",
"a",
"message",
"dictionary",
"."
] | 0f853e007c79e03cefdb4a0794423f84dce4c2f3 | https://github.com/pystorm/pystorm/blob/0f853e007c79e03cefdb4a0794423f84dce4c2f3/pystorm/serializers/json_serializer.py#L93-L99 | train |
pystorm/pystorm | pystorm/bolt.py | Bolt.read_tuple | def read_tuple(self):
"""Read a tuple from the pipe to Storm."""
cmd = self.read_command()
source = cmd["comp"]
stream = cmd["stream"]
values = cmd["tuple"]
val_type = self._source_tuple_types[source].get(stream)
return Tuple(
cmd["id"],
source,
stream,
cmd["task"],
tuple(values) if val_type is None else val_type(*values),
) | python | def read_tuple(self):
"""Read a tuple from the pipe to Storm."""
cmd = self.read_command()
source = cmd["comp"]
stream = cmd["stream"]
values = cmd["tuple"]
val_type = self._source_tuple_types[source].get(stream)
return Tuple(
cmd["id"],
source,
stream,
cmd["task"],
tuple(values) if val_type is None else val_type(*values),
) | [
"def",
"read_tuple",
"(",
"self",
")",
":",
"cmd",
"=",
"self",
".",
"read_command",
"(",
")",
"source",
"=",
"cmd",
"[",
"\"comp\"",
"]",
"stream",
"=",
"cmd",
"[",
"\"stream\"",
"]",
"values",
"=",
"cmd",
"[",
"\"tuple\"",
"]",
"val_type",
"=",
"se... | Read a tuple from the pipe to Storm. | [
"Read",
"a",
"tuple",
"from",
"the",
"pipe",
"to",
"Storm",
"."
] | 0f853e007c79e03cefdb4a0794423f84dce4c2f3 | https://github.com/pystorm/pystorm/blob/0f853e007c79e03cefdb4a0794423f84dce4c2f3/pystorm/bolt.py#L90-L103 | train |
pystorm/pystorm | pystorm/bolt.py | Bolt.emit | def emit(
self, tup, stream=None, anchors=None, direct_task=None, need_task_ids=False
):
"""Emit a new Tuple to a stream.
:param tup: the Tuple payload to send to Storm, should contain only
JSON-serializable data.
:type tup: :class:`list` or :class:`pystorm.component.Tuple`
:param stream: the ID of the stream to emit this Tuple to. Specify
``None`` to emit to default stream.
:type stream: str
:param anchors: IDs the Tuples (or :class:`pystorm.component.Tuple`
instances) which the emitted Tuples should be anchored
to. If ``auto_anchor`` is set to ``True`` and
you have not specified ``anchors``, ``anchors`` will be
set to the incoming/most recent Tuple ID(s).
:type anchors: list
:param direct_task: the task to send the Tuple to.
:type direct_task: int
:param need_task_ids: indicate whether or not you'd like the task IDs
the Tuple was emitted (default: ``False``).
:type need_task_ids: bool
:returns: ``None``, unless ``need_task_ids=True``, in which case it will
be a ``list`` of task IDs that the Tuple was sent to if. Note
that when specifying direct_task, this will be equal to
``[direct_task]``.
"""
if anchors is None:
anchors = self._current_tups if self.auto_anchor else []
anchors = [a.id if isinstance(a, Tuple) else a for a in anchors]
return super(Bolt, self).emit(
tup,
stream=stream,
anchors=anchors,
direct_task=direct_task,
need_task_ids=need_task_ids,
) | python | def emit(
self, tup, stream=None, anchors=None, direct_task=None, need_task_ids=False
):
"""Emit a new Tuple to a stream.
:param tup: the Tuple payload to send to Storm, should contain only
JSON-serializable data.
:type tup: :class:`list` or :class:`pystorm.component.Tuple`
:param stream: the ID of the stream to emit this Tuple to. Specify
``None`` to emit to default stream.
:type stream: str
:param anchors: IDs the Tuples (or :class:`pystorm.component.Tuple`
instances) which the emitted Tuples should be anchored
to. If ``auto_anchor`` is set to ``True`` and
you have not specified ``anchors``, ``anchors`` will be
set to the incoming/most recent Tuple ID(s).
:type anchors: list
:param direct_task: the task to send the Tuple to.
:type direct_task: int
:param need_task_ids: indicate whether or not you'd like the task IDs
the Tuple was emitted (default: ``False``).
:type need_task_ids: bool
:returns: ``None``, unless ``need_task_ids=True``, in which case it will
be a ``list`` of task IDs that the Tuple was sent to if. Note
that when specifying direct_task, this will be equal to
``[direct_task]``.
"""
if anchors is None:
anchors = self._current_tups if self.auto_anchor else []
anchors = [a.id if isinstance(a, Tuple) else a for a in anchors]
return super(Bolt, self).emit(
tup,
stream=stream,
anchors=anchors,
direct_task=direct_task,
need_task_ids=need_task_ids,
) | [
"def",
"emit",
"(",
"self",
",",
"tup",
",",
"stream",
"=",
"None",
",",
"anchors",
"=",
"None",
",",
"direct_task",
"=",
"None",
",",
"need_task_ids",
"=",
"False",
")",
":",
"if",
"anchors",
"is",
"None",
":",
"anchors",
"=",
"self",
".",
"_current... | Emit a new Tuple to a stream.
:param tup: the Tuple payload to send to Storm, should contain only
JSON-serializable data.
:type tup: :class:`list` or :class:`pystorm.component.Tuple`
:param stream: the ID of the stream to emit this Tuple to. Specify
``None`` to emit to default stream.
:type stream: str
:param anchors: IDs the Tuples (or :class:`pystorm.component.Tuple`
instances) which the emitted Tuples should be anchored
to. If ``auto_anchor`` is set to ``True`` and
you have not specified ``anchors``, ``anchors`` will be
set to the incoming/most recent Tuple ID(s).
:type anchors: list
:param direct_task: the task to send the Tuple to.
:type direct_task: int
:param need_task_ids: indicate whether or not you'd like the task IDs
the Tuple was emitted (default: ``False``).
:type need_task_ids: bool
:returns: ``None``, unless ``need_task_ids=True``, in which case it will
be a ``list`` of task IDs that the Tuple was sent to if. Note
that when specifying direct_task, this will be equal to
``[direct_task]``. | [
"Emit",
"a",
"new",
"Tuple",
"to",
"a",
"stream",
"."
] | 0f853e007c79e03cefdb4a0794423f84dce4c2f3 | https://github.com/pystorm/pystorm/blob/0f853e007c79e03cefdb4a0794423f84dce4c2f3/pystorm/bolt.py#L136-L174 | train |
pystorm/pystorm | pystorm/bolt.py | Bolt.ack | def ack(self, tup):
"""Indicate that processing of a Tuple has succeeded.
:param tup: the Tuple to acknowledge.
:type tup: :class:`str` or :class:`pystorm.component.Tuple`
"""
tup_id = tup.id if isinstance(tup, Tuple) else tup
self.send_message({"command": "ack", "id": tup_id}) | python | def ack(self, tup):
"""Indicate that processing of a Tuple has succeeded.
:param tup: the Tuple to acknowledge.
:type tup: :class:`str` or :class:`pystorm.component.Tuple`
"""
tup_id = tup.id if isinstance(tup, Tuple) else tup
self.send_message({"command": "ack", "id": tup_id}) | [
"def",
"ack",
"(",
"self",
",",
"tup",
")",
":",
"tup_id",
"=",
"tup",
".",
"id",
"if",
"isinstance",
"(",
"tup",
",",
"Tuple",
")",
"else",
"tup",
"self",
".",
"send_message",
"(",
"{",
"\"command\"",
":",
"\"ack\"",
",",
"\"id\"",
":",
"tup_id",
... | Indicate that processing of a Tuple has succeeded.
:param tup: the Tuple to acknowledge.
:type tup: :class:`str` or :class:`pystorm.component.Tuple` | [
"Indicate",
"that",
"processing",
"of",
"a",
"Tuple",
"has",
"succeeded",
"."
] | 0f853e007c79e03cefdb4a0794423f84dce4c2f3 | https://github.com/pystorm/pystorm/blob/0f853e007c79e03cefdb4a0794423f84dce4c2f3/pystorm/bolt.py#L176-L183 | train |
pystorm/pystorm | pystorm/bolt.py | Bolt.fail | def fail(self, tup):
"""Indicate that processing of a Tuple has failed.
:param tup: the Tuple to fail (its ``id`` if ``str``).
:type tup: :class:`str` or :class:`pystorm.component.Tuple`
"""
tup_id = tup.id if isinstance(tup, Tuple) else tup
self.send_message({"command": "fail", "id": tup_id}) | python | def fail(self, tup):
"""Indicate that processing of a Tuple has failed.
:param tup: the Tuple to fail (its ``id`` if ``str``).
:type tup: :class:`str` or :class:`pystorm.component.Tuple`
"""
tup_id = tup.id if isinstance(tup, Tuple) else tup
self.send_message({"command": "fail", "id": tup_id}) | [
"def",
"fail",
"(",
"self",
",",
"tup",
")",
":",
"tup_id",
"=",
"tup",
".",
"id",
"if",
"isinstance",
"(",
"tup",
",",
"Tuple",
")",
"else",
"tup",
"self",
".",
"send_message",
"(",
"{",
"\"command\"",
":",
"\"fail\"",
",",
"\"id\"",
":",
"tup_id",
... | Indicate that processing of a Tuple has failed.
:param tup: the Tuple to fail (its ``id`` if ``str``).
:type tup: :class:`str` or :class:`pystorm.component.Tuple` | [
"Indicate",
"that",
"processing",
"of",
"a",
"Tuple",
"has",
"failed",
"."
] | 0f853e007c79e03cefdb4a0794423f84dce4c2f3 | https://github.com/pystorm/pystorm/blob/0f853e007c79e03cefdb4a0794423f84dce4c2f3/pystorm/bolt.py#L185-L192 | train |
pystorm/pystorm | pystorm/bolt.py | Bolt._run | def _run(self):
"""The inside of ``run``'s infinite loop.
Separated out so it can be properly unit tested.
"""
tup = self.read_tuple()
self._current_tups = [tup]
if self.is_heartbeat(tup):
self.send_message({"command": "sync"})
elif self.is_tick(tup):
self.process_tick(tup)
if self.auto_ack:
self.ack(tup)
else:
self.process(tup)
if self.auto_ack:
self.ack(tup)
# Reset _current_tups so that we don't accidentally fail the wrong
# Tuples if a successive call to read_tuple fails.
# This is not done in `finally` clause because we want the current
# Tuples to fail when there is an exception.
self._current_tups = [] | python | def _run(self):
"""The inside of ``run``'s infinite loop.
Separated out so it can be properly unit tested.
"""
tup = self.read_tuple()
self._current_tups = [tup]
if self.is_heartbeat(tup):
self.send_message({"command": "sync"})
elif self.is_tick(tup):
self.process_tick(tup)
if self.auto_ack:
self.ack(tup)
else:
self.process(tup)
if self.auto_ack:
self.ack(tup)
# Reset _current_tups so that we don't accidentally fail the wrong
# Tuples if a successive call to read_tuple fails.
# This is not done in `finally` clause because we want the current
# Tuples to fail when there is an exception.
self._current_tups = [] | [
"def",
"_run",
"(",
"self",
")",
":",
"tup",
"=",
"self",
".",
"read_tuple",
"(",
")",
"self",
".",
"_current_tups",
"=",
"[",
"tup",
"]",
"if",
"self",
".",
"is_heartbeat",
"(",
"tup",
")",
":",
"self",
".",
"send_message",
"(",
"{",
"\"command\"",
... | The inside of ``run``'s infinite loop.
Separated out so it can be properly unit tested. | [
"The",
"inside",
"of",
"run",
"s",
"infinite",
"loop",
"."
] | 0f853e007c79e03cefdb4a0794423f84dce4c2f3 | https://github.com/pystorm/pystorm/blob/0f853e007c79e03cefdb4a0794423f84dce4c2f3/pystorm/bolt.py#L194-L215 | train |
pystorm/pystorm | pystorm/bolt.py | Bolt._handle_run_exception | def _handle_run_exception(self, exc):
"""Process an exception encountered while running the ``run()`` loop.
Called right before program exits.
"""
if len(self._current_tups) == 1:
tup = self._current_tups[0]
self.raise_exception(exc, tup)
if self.auto_fail:
self.fail(tup) | python | def _handle_run_exception(self, exc):
"""Process an exception encountered while running the ``run()`` loop.
Called right before program exits.
"""
if len(self._current_tups) == 1:
tup = self._current_tups[0]
self.raise_exception(exc, tup)
if self.auto_fail:
self.fail(tup) | [
"def",
"_handle_run_exception",
"(",
"self",
",",
"exc",
")",
":",
"if",
"len",
"(",
"self",
".",
"_current_tups",
")",
"==",
"1",
":",
"tup",
"=",
"self",
".",
"_current_tups",
"[",
"0",
"]",
"self",
".",
"raise_exception",
"(",
"exc",
",",
"tup",
"... | Process an exception encountered while running the ``run()`` loop.
Called right before program exits. | [
"Process",
"an",
"exception",
"encountered",
"while",
"running",
"the",
"run",
"()",
"loop",
"."
] | 0f853e007c79e03cefdb4a0794423f84dce4c2f3 | https://github.com/pystorm/pystorm/blob/0f853e007c79e03cefdb4a0794423f84dce4c2f3/pystorm/bolt.py#L217-L226 | train |
pystorm/pystorm | pystorm/bolt.py | BatchingBolt.emit | def emit(self, tup, **kwargs):
"""Modified emit that will not return task IDs after emitting.
See :class:`pystorm.component.Bolt` for more information.
:returns: ``None``.
"""
kwargs["need_task_ids"] = False
return super(BatchingBolt, self).emit(tup, **kwargs) | python | def emit(self, tup, **kwargs):
"""Modified emit that will not return task IDs after emitting.
See :class:`pystorm.component.Bolt` for more information.
:returns: ``None``.
"""
kwargs["need_task_ids"] = False
return super(BatchingBolt, self).emit(tup, **kwargs) | [
"def",
"emit",
"(",
"self",
",",
"tup",
",",
"*",
"*",
"kwargs",
")",
":",
"kwargs",
"[",
"\"need_task_ids\"",
"]",
"=",
"False",
"return",
"super",
"(",
"BatchingBolt",
",",
"self",
")",
".",
"emit",
"(",
"tup",
",",
"*",
"*",
"kwargs",
")"
] | Modified emit that will not return task IDs after emitting.
See :class:`pystorm.component.Bolt` for more information.
:returns: ``None``. | [
"Modified",
"emit",
"that",
"will",
"not",
"return",
"task",
"IDs",
"after",
"emitting",
"."
] | 0f853e007c79e03cefdb4a0794423f84dce4c2f3 | https://github.com/pystorm/pystorm/blob/0f853e007c79e03cefdb4a0794423f84dce4c2f3/pystorm/bolt.py#L316-L324 | train |
pystorm/pystorm | pystorm/bolt.py | BatchingBolt.process_tick | def process_tick(self, tick_tup):
"""Increment tick counter, and call ``process_batch`` for all current
batches if tick counter exceeds ``ticks_between_batches``.
See :class:`pystorm.component.Bolt` for more information.
.. warning::
This method should **not** be overriden. If you want to tweak
how Tuples are grouped into batches, override ``group_key``.
"""
self._tick_counter += 1
# ACK tick Tuple immediately, since it's just responsible for counter
self.ack(tick_tup)
if self._tick_counter > self.ticks_between_batches and self._batches:
self.process_batches()
self._tick_counter = 0 | python | def process_tick(self, tick_tup):
"""Increment tick counter, and call ``process_batch`` for all current
batches if tick counter exceeds ``ticks_between_batches``.
See :class:`pystorm.component.Bolt` for more information.
.. warning::
This method should **not** be overriden. If you want to tweak
how Tuples are grouped into batches, override ``group_key``.
"""
self._tick_counter += 1
# ACK tick Tuple immediately, since it's just responsible for counter
self.ack(tick_tup)
if self._tick_counter > self.ticks_between_batches and self._batches:
self.process_batches()
self._tick_counter = 0 | [
"def",
"process_tick",
"(",
"self",
",",
"tick_tup",
")",
":",
"self",
".",
"_tick_counter",
"+=",
"1",
"# ACK tick Tuple immediately, since it's just responsible for counter",
"self",
".",
"ack",
"(",
"tick_tup",
")",
"if",
"self",
".",
"_tick_counter",
">",
"self"... | Increment tick counter, and call ``process_batch`` for all current
batches if tick counter exceeds ``ticks_between_batches``.
See :class:`pystorm.component.Bolt` for more information.
.. warning::
This method should **not** be overriden. If you want to tweak
how Tuples are grouped into batches, override ``group_key``. | [
"Increment",
"tick",
"counter",
"and",
"call",
"process_batch",
"for",
"all",
"current",
"batches",
"if",
"tick",
"counter",
"exceeds",
"ticks_between_batches",
"."
] | 0f853e007c79e03cefdb4a0794423f84dce4c2f3 | https://github.com/pystorm/pystorm/blob/0f853e007c79e03cefdb4a0794423f84dce4c2f3/pystorm/bolt.py#L326-L341 | train |
pystorm/pystorm | pystorm/bolt.py | BatchingBolt.process_batches | def process_batches(self):
"""Iterate through all batches, call process_batch on them, and ack.
Separated out for the rare instances when we want to subclass
BatchingBolt and customize what mechanism causes batches to be
processed.
"""
for key, batch in iteritems(self._batches):
self._current_tups = batch
self._current_key = key
self.process_batch(key, batch)
if self.auto_ack:
for tup in batch:
self.ack(tup)
# Set current batch to [] so that we know it was acked if a
# later batch raises an exception
self._current_key = None
self._batches[key] = []
self._batches = defaultdict(list) | python | def process_batches(self):
"""Iterate through all batches, call process_batch on them, and ack.
Separated out for the rare instances when we want to subclass
BatchingBolt and customize what mechanism causes batches to be
processed.
"""
for key, batch in iteritems(self._batches):
self._current_tups = batch
self._current_key = key
self.process_batch(key, batch)
if self.auto_ack:
for tup in batch:
self.ack(tup)
# Set current batch to [] so that we know it was acked if a
# later batch raises an exception
self._current_key = None
self._batches[key] = []
self._batches = defaultdict(list) | [
"def",
"process_batches",
"(",
"self",
")",
":",
"for",
"key",
",",
"batch",
"in",
"iteritems",
"(",
"self",
".",
"_batches",
")",
":",
"self",
".",
"_current_tups",
"=",
"batch",
"self",
".",
"_current_key",
"=",
"key",
"self",
".",
"process_batch",
"("... | Iterate through all batches, call process_batch on them, and ack.
Separated out for the rare instances when we want to subclass
BatchingBolt and customize what mechanism causes batches to be
processed. | [
"Iterate",
"through",
"all",
"batches",
"call",
"process_batch",
"on",
"them",
"and",
"ack",
"."
] | 0f853e007c79e03cefdb4a0794423f84dce4c2f3 | https://github.com/pystorm/pystorm/blob/0f853e007c79e03cefdb4a0794423f84dce4c2f3/pystorm/bolt.py#L343-L361 | train |
pystorm/pystorm | pystorm/bolt.py | BatchingBolt.process | def process(self, tup):
"""Group non-tick Tuples into batches by ``group_key``.
.. warning::
This method should **not** be overriden. If you want to tweak
how Tuples are grouped into batches, override ``group_key``.
"""
# Append latest Tuple to batches
group_key = self.group_key(tup)
self._batches[group_key].append(tup) | python | def process(self, tup):
"""Group non-tick Tuples into batches by ``group_key``.
.. warning::
This method should **not** be overriden. If you want to tweak
how Tuples are grouped into batches, override ``group_key``.
"""
# Append latest Tuple to batches
group_key = self.group_key(tup)
self._batches[group_key].append(tup) | [
"def",
"process",
"(",
"self",
",",
"tup",
")",
":",
"# Append latest Tuple to batches",
"group_key",
"=",
"self",
".",
"group_key",
"(",
"tup",
")",
"self",
".",
"_batches",
"[",
"group_key",
"]",
".",
"append",
"(",
"tup",
")"
] | Group non-tick Tuples into batches by ``group_key``.
.. warning::
This method should **not** be overriden. If you want to tweak
how Tuples are grouped into batches, override ``group_key``. | [
"Group",
"non",
"-",
"tick",
"Tuples",
"into",
"batches",
"by",
"group_key",
"."
] | 0f853e007c79e03cefdb4a0794423f84dce4c2f3 | https://github.com/pystorm/pystorm/blob/0f853e007c79e03cefdb4a0794423f84dce4c2f3/pystorm/bolt.py#L363-L372 | train |
pystorm/pystorm | pystorm/bolt.py | BatchingBolt._handle_run_exception | def _handle_run_exception(self, exc):
"""Process an exception encountered while running the ``run()`` loop.
Called right before program exits.
"""
self.raise_exception(exc, self._current_tups)
if self.auto_fail:
failed = set()
for key, batch in iteritems(self._batches):
# Only wipe out batches other than current for exit_on_exception
if self.exit_on_exception or key == self._current_key:
for tup in batch:
self.fail(tup)
failed.add(tup.id)
# Fail current batch or tick Tuple if we have one
for tup in self._current_tups:
if tup.id not in failed:
self.fail(tup)
# Reset current batch info
self._batches[self._current_key] = []
self._current_key = None | python | def _handle_run_exception(self, exc):
"""Process an exception encountered while running the ``run()`` loop.
Called right before program exits.
"""
self.raise_exception(exc, self._current_tups)
if self.auto_fail:
failed = set()
for key, batch in iteritems(self._batches):
# Only wipe out batches other than current for exit_on_exception
if self.exit_on_exception or key == self._current_key:
for tup in batch:
self.fail(tup)
failed.add(tup.id)
# Fail current batch or tick Tuple if we have one
for tup in self._current_tups:
if tup.id not in failed:
self.fail(tup)
# Reset current batch info
self._batches[self._current_key] = []
self._current_key = None | [
"def",
"_handle_run_exception",
"(",
"self",
",",
"exc",
")",
":",
"self",
".",
"raise_exception",
"(",
"exc",
",",
"self",
".",
"_current_tups",
")",
"if",
"self",
".",
"auto_fail",
":",
"failed",
"=",
"set",
"(",
")",
"for",
"key",
",",
"batch",
"in"... | Process an exception encountered while running the ``run()`` loop.
Called right before program exits. | [
"Process",
"an",
"exception",
"encountered",
"while",
"running",
"the",
"run",
"()",
"loop",
"."
] | 0f853e007c79e03cefdb4a0794423f84dce4c2f3 | https://github.com/pystorm/pystorm/blob/0f853e007c79e03cefdb4a0794423f84dce4c2f3/pystorm/bolt.py#L391-L414 | train |
pystorm/pystorm | pystorm/bolt.py | TicklessBatchingBolt._batch_entry_run | def _batch_entry_run(self):
"""The inside of ``_batch_entry``'s infinite loop.
Separated out so it can be properly unit tested.
"""
time.sleep(self.secs_between_batches)
with self._batch_lock:
self.process_batches() | python | def _batch_entry_run(self):
"""The inside of ``_batch_entry``'s infinite loop.
Separated out so it can be properly unit tested.
"""
time.sleep(self.secs_between_batches)
with self._batch_lock:
self.process_batches() | [
"def",
"_batch_entry_run",
"(",
"self",
")",
":",
"time",
".",
"sleep",
"(",
"self",
".",
"secs_between_batches",
")",
"with",
"self",
".",
"_batch_lock",
":",
"self",
".",
"process_batches",
"(",
")"
] | The inside of ``_batch_entry``'s infinite loop.
Separated out so it can be properly unit tested. | [
"The",
"inside",
"of",
"_batch_entry",
"s",
"infinite",
"loop",
"."
] | 0f853e007c79e03cefdb4a0794423f84dce4c2f3 | https://github.com/pystorm/pystorm/blob/0f853e007c79e03cefdb4a0794423f84dce4c2f3/pystorm/bolt.py#L496-L503 | train |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.