repo stringlengths 7 55 | path stringlengths 4 127 | func_name stringlengths 1 88 | original_string stringlengths 75 19.8k | language stringclasses 1
value | code stringlengths 75 19.8k | code_tokens listlengths 20 707 | docstring stringlengths 3 17.3k | docstring_tokens listlengths 3 222 | sha stringlengths 40 40 | url stringlengths 87 242 | partition stringclasses 1
value | idx int64 0 252k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
BlueBrain/hpcbench | hpcbench/toolbox/edsl.py | kwargsql._sequence_map | def _sequence_map(cls, seq, path):
"""Apply a kwargsql expression to every item of a sequence,
and returns it.
:param seq: the list to transform
:param path: kwargsql expression to apply to every elements of
the given sequence.
"""
if not any(path):
# There is no further kwargsql expression
return seq
result = []
for item in seq:
try:
result.append(cls.__resolve_path(item, path))
except (KeyError, IndexError):
pass
return result | python | def _sequence_map(cls, seq, path):
"""Apply a kwargsql expression to every item of a sequence,
and returns it.
:param seq: the list to transform
:param path: kwargsql expression to apply to every elements of
the given sequence.
"""
if not any(path):
# There is no further kwargsql expression
return seq
result = []
for item in seq:
try:
result.append(cls.__resolve_path(item, path))
except (KeyError, IndexError):
pass
return result | [
"def",
"_sequence_map",
"(",
"cls",
",",
"seq",
",",
"path",
")",
":",
"if",
"not",
"any",
"(",
"path",
")",
":",
"# There is no further kwargsql expression",
"return",
"seq",
"result",
"=",
"[",
"]",
"for",
"item",
"in",
"seq",
":",
"try",
":",
"result"... | Apply a kwargsql expression to every item of a sequence,
and returns it.
:param seq: the list to transform
:param path: kwargsql expression to apply to every elements of
the given sequence. | [
"Apply",
"a",
"kwargsql",
"expression",
"to",
"every",
"item",
"of",
"a",
"sequence",
"and",
"returns",
"it",
"."
] | 192d0ec142b897157ec25f131d1ef28f84752592 | https://github.com/BlueBrain/hpcbench/blob/192d0ec142b897157ec25f131d1ef28f84752592/hpcbench/toolbox/edsl.py#L270-L287 | train | 53,600 |
PolyJIT/benchbuild | benchbuild/project.py | populate | def populate(projects_to_filter=None, group=None):
"""
Populate the list of projects that belong to this experiment.
Args:
projects_to_filter (list(Project)):
List of projects we want to assign to this experiment.
We intersect the list of projects with the list of supported
projects to get the list of projects that belong to this
experiment.
group (list(str)):
In addition to the project filter, we provide a way to filter
whole groups.
"""
if projects_to_filter is None:
projects_to_filter = []
import benchbuild.projects as all_projects
all_projects.discover()
prjs = ProjectRegistry.projects
if projects_to_filter:
prjs = {}
for filter_project in set(projects_to_filter):
try:
prjs.update({
x: y
for x, y in ProjectRegistry.projects.items(
prefix=filter_project)
})
except KeyError:
pass
if group:
groupkeys = set(group)
prjs = {
name: cls
for name, cls in prjs.items() if cls.GROUP in groupkeys
}
return {
x: prjs[x]
for x in prjs if prjs[x].DOMAIN != "debug" or x in projects_to_filter
} | python | def populate(projects_to_filter=None, group=None):
"""
Populate the list of projects that belong to this experiment.
Args:
projects_to_filter (list(Project)):
List of projects we want to assign to this experiment.
We intersect the list of projects with the list of supported
projects to get the list of projects that belong to this
experiment.
group (list(str)):
In addition to the project filter, we provide a way to filter
whole groups.
"""
if projects_to_filter is None:
projects_to_filter = []
import benchbuild.projects as all_projects
all_projects.discover()
prjs = ProjectRegistry.projects
if projects_to_filter:
prjs = {}
for filter_project in set(projects_to_filter):
try:
prjs.update({
x: y
for x, y in ProjectRegistry.projects.items(
prefix=filter_project)
})
except KeyError:
pass
if group:
groupkeys = set(group)
prjs = {
name: cls
for name, cls in prjs.items() if cls.GROUP in groupkeys
}
return {
x: prjs[x]
for x in prjs if prjs[x].DOMAIN != "debug" or x in projects_to_filter
} | [
"def",
"populate",
"(",
"projects_to_filter",
"=",
"None",
",",
"group",
"=",
"None",
")",
":",
"if",
"projects_to_filter",
"is",
"None",
":",
"projects_to_filter",
"=",
"[",
"]",
"import",
"benchbuild",
".",
"projects",
"as",
"all_projects",
"all_projects",
"... | Populate the list of projects that belong to this experiment.
Args:
projects_to_filter (list(Project)):
List of projects we want to assign to this experiment.
We intersect the list of projects with the list of supported
projects to get the list of projects that belong to this
experiment.
group (list(str)):
In addition to the project filter, we provide a way to filter
whole groups. | [
"Populate",
"the",
"list",
"of",
"projects",
"that",
"belong",
"to",
"this",
"experiment",
"."
] | 9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58 | https://github.com/PolyJIT/benchbuild/blob/9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58/benchbuild/project.py#L342-L385 | train | 53,601 |
PolyJIT/benchbuild | benchbuild/utils/compiler.py | cc | def cc(project, detect_project=False):
"""
Return a clang that hides CFLAGS and LDFLAGS.
This will generate a wrapper script in the current directory
and return a complete plumbum command to it.
Args:
cflags: The CFLAGS we want to hide.
ldflags: The LDFLAGS we want to hide.
func (optional): A function that will be pickled alongside the compiler.
It will be called before the actual compilation took place. This
way you can intercept the compilation process with arbitrary python
code.
Returns (benchbuild.utils.cmd):
Path to the new clang command.
"""
from benchbuild.utils import cmd
cc_name = str(CFG["compiler"]["c"])
wrap_cc(cc_name, compiler(cc_name), project, detect_project=detect_project)
return cmd["./{}".format(cc_name)] | python | def cc(project, detect_project=False):
"""
Return a clang that hides CFLAGS and LDFLAGS.
This will generate a wrapper script in the current directory
and return a complete plumbum command to it.
Args:
cflags: The CFLAGS we want to hide.
ldflags: The LDFLAGS we want to hide.
func (optional): A function that will be pickled alongside the compiler.
It will be called before the actual compilation took place. This
way you can intercept the compilation process with arbitrary python
code.
Returns (benchbuild.utils.cmd):
Path to the new clang command.
"""
from benchbuild.utils import cmd
cc_name = str(CFG["compiler"]["c"])
wrap_cc(cc_name, compiler(cc_name), project, detect_project=detect_project)
return cmd["./{}".format(cc_name)] | [
"def",
"cc",
"(",
"project",
",",
"detect_project",
"=",
"False",
")",
":",
"from",
"benchbuild",
".",
"utils",
"import",
"cmd",
"cc_name",
"=",
"str",
"(",
"CFG",
"[",
"\"compiler\"",
"]",
"[",
"\"c\"",
"]",
")",
"wrap_cc",
"(",
"cc_name",
",",
"compi... | Return a clang that hides CFLAGS and LDFLAGS.
This will generate a wrapper script in the current directory
and return a complete plumbum command to it.
Args:
cflags: The CFLAGS we want to hide.
ldflags: The LDFLAGS we want to hide.
func (optional): A function that will be pickled alongside the compiler.
It will be called before the actual compilation took place. This
way you can intercept the compilation process with arbitrary python
code.
Returns (benchbuild.utils.cmd):
Path to the new clang command. | [
"Return",
"a",
"clang",
"that",
"hides",
"CFLAGS",
"and",
"LDFLAGS",
"."
] | 9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58 | https://github.com/PolyJIT/benchbuild/blob/9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58/benchbuild/utils/compiler.py#L27-L49 | train | 53,602 |
PolyJIT/benchbuild | benchbuild/utils/compiler.py | cxx | def cxx(project, detect_project=False):
"""
Return a clang++ that hides CFLAGS and LDFLAGS.
This will generate a wrapper script in the current directory
and return a complete plumbum command to it.
Args:
cflags: The CFLAGS we want to hide.
ldflags: The LDFLAGS we want to hide.
func (optional): A function that will be pickled alongside the compiler.
It will be called before the actual compilation took place. This
way you can intercept the compilation process with arbitrary python
code.
Returns (benchbuild.utils.cmd):
Path to the new clang command.
"""
from benchbuild.utils import cmd
cxx_name = str(CFG["compiler"]["cxx"])
wrap_cc(
cxx_name, compiler(cxx_name), project, detect_project=detect_project)
return cmd["./{name}".format(name=cxx_name)] | python | def cxx(project, detect_project=False):
"""
Return a clang++ that hides CFLAGS and LDFLAGS.
This will generate a wrapper script in the current directory
and return a complete plumbum command to it.
Args:
cflags: The CFLAGS we want to hide.
ldflags: The LDFLAGS we want to hide.
func (optional): A function that will be pickled alongside the compiler.
It will be called before the actual compilation took place. This
way you can intercept the compilation process with arbitrary python
code.
Returns (benchbuild.utils.cmd):
Path to the new clang command.
"""
from benchbuild.utils import cmd
cxx_name = str(CFG["compiler"]["cxx"])
wrap_cc(
cxx_name, compiler(cxx_name), project, detect_project=detect_project)
return cmd["./{name}".format(name=cxx_name)] | [
"def",
"cxx",
"(",
"project",
",",
"detect_project",
"=",
"False",
")",
":",
"from",
"benchbuild",
".",
"utils",
"import",
"cmd",
"cxx_name",
"=",
"str",
"(",
"CFG",
"[",
"\"compiler\"",
"]",
"[",
"\"cxx\"",
"]",
")",
"wrap_cc",
"(",
"cxx_name",
",",
"... | Return a clang++ that hides CFLAGS and LDFLAGS.
This will generate a wrapper script in the current directory
and return a complete plumbum command to it.
Args:
cflags: The CFLAGS we want to hide.
ldflags: The LDFLAGS we want to hide.
func (optional): A function that will be pickled alongside the compiler.
It will be called before the actual compilation took place. This
way you can intercept the compilation process with arbitrary python
code.
Returns (benchbuild.utils.cmd):
Path to the new clang command. | [
"Return",
"a",
"clang",
"++",
"that",
"hides",
"CFLAGS",
"and",
"LDFLAGS",
"."
] | 9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58 | https://github.com/PolyJIT/benchbuild/blob/9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58/benchbuild/utils/compiler.py#L52-L75 | train | 53,603 |
PolyJIT/benchbuild | benchbuild/utils/compiler.py | compiler | def compiler(name):
"""
Get a usable clang++ plumbum command.
This searches for a usable clang++ in the llvm binary path
Returns:
plumbum Command that executes clang++
"""
pinfo = __get_paths()
_compiler = local[name]
_compiler = _compiler.setenv(
PATH=pinfo["path"], LD_LIBRARY_PATH=pinfo["ld_library_path"])
return _compiler | python | def compiler(name):
"""
Get a usable clang++ plumbum command.
This searches for a usable clang++ in the llvm binary path
Returns:
plumbum Command that executes clang++
"""
pinfo = __get_paths()
_compiler = local[name]
_compiler = _compiler.setenv(
PATH=pinfo["path"], LD_LIBRARY_PATH=pinfo["ld_library_path"])
return _compiler | [
"def",
"compiler",
"(",
"name",
")",
":",
"pinfo",
"=",
"__get_paths",
"(",
")",
"_compiler",
"=",
"local",
"[",
"name",
"]",
"_compiler",
"=",
"_compiler",
".",
"setenv",
"(",
"PATH",
"=",
"pinfo",
"[",
"\"path\"",
"]",
",",
"LD_LIBRARY_PATH",
"=",
"p... | Get a usable clang++ plumbum command.
This searches for a usable clang++ in the llvm binary path
Returns:
plumbum Command that executes clang++ | [
"Get",
"a",
"usable",
"clang",
"++",
"plumbum",
"command",
"."
] | 9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58 | https://github.com/PolyJIT/benchbuild/blob/9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58/benchbuild/utils/compiler.py#L98-L111 | train | 53,604 |
PolyJIT/benchbuild | benchbuild/utils/wrapping.py | strip_path_prefix | def strip_path_prefix(ipath, prefix):
"""
Strip prefix from path.
Args:
ipath: input path
prefix: the prefix to remove, if it is found in :ipath:
Examples:
>>> strip_path_prefix("/foo/bar", "/bar")
'/foo/bar'
>>> strip_path_prefix("/foo/bar", "/")
'foo/bar'
>>> strip_path_prefix("/foo/bar", "/foo")
'/bar'
>>> strip_path_prefix("/foo/bar", "None")
'/foo/bar'
"""
if prefix is None:
return ipath
return ipath[len(prefix):] if ipath.startswith(prefix) else ipath | python | def strip_path_prefix(ipath, prefix):
"""
Strip prefix from path.
Args:
ipath: input path
prefix: the prefix to remove, if it is found in :ipath:
Examples:
>>> strip_path_prefix("/foo/bar", "/bar")
'/foo/bar'
>>> strip_path_prefix("/foo/bar", "/")
'foo/bar'
>>> strip_path_prefix("/foo/bar", "/foo")
'/bar'
>>> strip_path_prefix("/foo/bar", "None")
'/foo/bar'
"""
if prefix is None:
return ipath
return ipath[len(prefix):] if ipath.startswith(prefix) else ipath | [
"def",
"strip_path_prefix",
"(",
"ipath",
",",
"prefix",
")",
":",
"if",
"prefix",
"is",
"None",
":",
"return",
"ipath",
"return",
"ipath",
"[",
"len",
"(",
"prefix",
")",
":",
"]",
"if",
"ipath",
".",
"startswith",
"(",
"prefix",
")",
"else",
"ipath"
... | Strip prefix from path.
Args:
ipath: input path
prefix: the prefix to remove, if it is found in :ipath:
Examples:
>>> strip_path_prefix("/foo/bar", "/bar")
'/foo/bar'
>>> strip_path_prefix("/foo/bar", "/")
'foo/bar'
>>> strip_path_prefix("/foo/bar", "/foo")
'/bar'
>>> strip_path_prefix("/foo/bar", "None")
'/foo/bar' | [
"Strip",
"prefix",
"from",
"path",
"."
] | 9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58 | https://github.com/PolyJIT/benchbuild/blob/9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58/benchbuild/utils/wrapping.py#L43-L65 | train | 53,605 |
PolyJIT/benchbuild | benchbuild/utils/wrapping.py | unpickle | def unpickle(pickle_file):
"""Unpickle a python object from the given path."""
pickle = None
with open(pickle_file, "rb") as pickle_f:
pickle = dill.load(pickle_f)
if not pickle:
LOG.error("Could not load python object from file")
return pickle | python | def unpickle(pickle_file):
"""Unpickle a python object from the given path."""
pickle = None
with open(pickle_file, "rb") as pickle_f:
pickle = dill.load(pickle_f)
if not pickle:
LOG.error("Could not load python object from file")
return pickle | [
"def",
"unpickle",
"(",
"pickle_file",
")",
":",
"pickle",
"=",
"None",
"with",
"open",
"(",
"pickle_file",
",",
"\"rb\"",
")",
"as",
"pickle_f",
":",
"pickle",
"=",
"dill",
".",
"load",
"(",
"pickle_f",
")",
"if",
"not",
"pickle",
":",
"LOG",
".",
"... | Unpickle a python object from the given path. | [
"Unpickle",
"a",
"python",
"object",
"from",
"the",
"given",
"path",
"."
] | 9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58 | https://github.com/PolyJIT/benchbuild/blob/9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58/benchbuild/utils/wrapping.py#L68-L75 | train | 53,606 |
PolyJIT/benchbuild | benchbuild/utils/wrapping.py | wrap_cc | def wrap_cc(filepath,
compiler,
project,
python=sys.executable,
detect_project=False):
"""
Substitute a compiler with a script that hides CFLAGS & LDFLAGS.
This will generate a wrapper script in the current directory
and return a complete plumbum command to it.
Args:
filepath (str): Path to the wrapper script.
compiler (benchbuild.utils.cmd):
Real compiler command we should call in the script.
project (benchbuild.project.Project):
The project this compiler will be for.
python (str): Path to the python interpreter we should use.
detect_project: Should we enable project detection or not.
Returns (benchbuild.utils.cmd):
Command of the new compiler we can call.
"""
env = __create_jinja_env()
template = env.get_template('run_compiler.py.inc')
cc_fname = local.path(filepath).with_suffix(".benchbuild.cc", depth=0)
cc_f = persist(compiler, filename=cc_fname)
project_file = persist(project, suffix=".project")
with open(filepath, 'w') as wrapper:
wrapper.write(
template.render(
cc_f=cc_f,
project_file=project_file,
python=python,
detect_project=detect_project))
chmod("+x", filepath)
LOG.debug("Placed wrapper in: %s for compiler %s", local.path(filepath),
str(compiler))
LOG.debug("Placed project in: %s", local.path(project_file))
LOG.debug("Placed compiler command in: %s", local.path(cc_f))
return local[filepath] | python | def wrap_cc(filepath,
compiler,
project,
python=sys.executable,
detect_project=False):
"""
Substitute a compiler with a script that hides CFLAGS & LDFLAGS.
This will generate a wrapper script in the current directory
and return a complete plumbum command to it.
Args:
filepath (str): Path to the wrapper script.
compiler (benchbuild.utils.cmd):
Real compiler command we should call in the script.
project (benchbuild.project.Project):
The project this compiler will be for.
python (str): Path to the python interpreter we should use.
detect_project: Should we enable project detection or not.
Returns (benchbuild.utils.cmd):
Command of the new compiler we can call.
"""
env = __create_jinja_env()
template = env.get_template('run_compiler.py.inc')
cc_fname = local.path(filepath).with_suffix(".benchbuild.cc", depth=0)
cc_f = persist(compiler, filename=cc_fname)
project_file = persist(project, suffix=".project")
with open(filepath, 'w') as wrapper:
wrapper.write(
template.render(
cc_f=cc_f,
project_file=project_file,
python=python,
detect_project=detect_project))
chmod("+x", filepath)
LOG.debug("Placed wrapper in: %s for compiler %s", local.path(filepath),
str(compiler))
LOG.debug("Placed project in: %s", local.path(project_file))
LOG.debug("Placed compiler command in: %s", local.path(cc_f))
return local[filepath] | [
"def",
"wrap_cc",
"(",
"filepath",
",",
"compiler",
",",
"project",
",",
"python",
"=",
"sys",
".",
"executable",
",",
"detect_project",
"=",
"False",
")",
":",
"env",
"=",
"__create_jinja_env",
"(",
")",
"template",
"=",
"env",
".",
"get_template",
"(",
... | Substitute a compiler with a script that hides CFLAGS & LDFLAGS.
This will generate a wrapper script in the current directory
and return a complete plumbum command to it.
Args:
filepath (str): Path to the wrapper script.
compiler (benchbuild.utils.cmd):
Real compiler command we should call in the script.
project (benchbuild.project.Project):
The project this compiler will be for.
python (str): Path to the python interpreter we should use.
detect_project: Should we enable project detection or not.
Returns (benchbuild.utils.cmd):
Command of the new compiler we can call. | [
"Substitute",
"a",
"compiler",
"with",
"a",
"script",
"that",
"hides",
"CFLAGS",
"&",
"LDFLAGS",
"."
] | 9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58 | https://github.com/PolyJIT/benchbuild/blob/9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58/benchbuild/utils/wrapping.py#L200-L244 | train | 53,607 |
PolyJIT/benchbuild | benchbuild/utils/wrapping.py | persist | def persist(id_obj, filename=None, suffix=None):
"""Persist an object in the filesystem.
This will generate a pickled version of the given obj in the filename path.
Objects shall provide an id() method to be able to use this persistence API.
If not, we will use the id() builtin of python to generate an identifier
for you.
The file will be created, if it does not exist.
If the file already exists, we will overwrite it.
Args:
id_obj (Any): An identifiable object you want to persist in the
filesystem.
"""
if suffix is None:
suffix = ".pickle"
if hasattr(id_obj, 'id'):
ident = id_obj.id
else:
ident = str(id(id_obj))
if filename is None:
filename = "{obj_id}{suffix}".format(obj_id=ident, suffix=suffix)
with open(filename, 'wb') as obj_file:
dill.dump(id_obj, obj_file)
return os.path.abspath(filename) | python | def persist(id_obj, filename=None, suffix=None):
"""Persist an object in the filesystem.
This will generate a pickled version of the given obj in the filename path.
Objects shall provide an id() method to be able to use this persistence API.
If not, we will use the id() builtin of python to generate an identifier
for you.
The file will be created, if it does not exist.
If the file already exists, we will overwrite it.
Args:
id_obj (Any): An identifiable object you want to persist in the
filesystem.
"""
if suffix is None:
suffix = ".pickle"
if hasattr(id_obj, 'id'):
ident = id_obj.id
else:
ident = str(id(id_obj))
if filename is None:
filename = "{obj_id}{suffix}".format(obj_id=ident, suffix=suffix)
with open(filename, 'wb') as obj_file:
dill.dump(id_obj, obj_file)
return os.path.abspath(filename) | [
"def",
"persist",
"(",
"id_obj",
",",
"filename",
"=",
"None",
",",
"suffix",
"=",
"None",
")",
":",
"if",
"suffix",
"is",
"None",
":",
"suffix",
"=",
"\".pickle\"",
"if",
"hasattr",
"(",
"id_obj",
",",
"'id'",
")",
":",
"ident",
"=",
"id_obj",
".",
... | Persist an object in the filesystem.
This will generate a pickled version of the given obj in the filename path.
Objects shall provide an id() method to be able to use this persistence API.
If not, we will use the id() builtin of python to generate an identifier
for you.
The file will be created, if it does not exist.
If the file already exists, we will overwrite it.
Args:
id_obj (Any): An identifiable object you want to persist in the
filesystem. | [
"Persist",
"an",
"object",
"in",
"the",
"filesystem",
"."
] | 9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58 | https://github.com/PolyJIT/benchbuild/blob/9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58/benchbuild/utils/wrapping.py#L247-L274 | train | 53,608 |
PolyJIT/benchbuild | benchbuild/utils/wrapping.py | load | def load(filename):
"""Load a pickled obj from the filesystem.
You better know what you expect from the given pickle, because we don't check it.
Args:
filename (str): The filename we load the object from.
Returns:
The object we were able to unpickle, else None.
"""
if not os.path.exists(filename):
LOG.error("load object - File '%s' does not exist.", filename)
return None
obj = None
with open(filename, 'rb') as obj_file:
obj = dill.load(obj_file)
return obj | python | def load(filename):
"""Load a pickled obj from the filesystem.
You better know what you expect from the given pickle, because we don't check it.
Args:
filename (str): The filename we load the object from.
Returns:
The object we were able to unpickle, else None.
"""
if not os.path.exists(filename):
LOG.error("load object - File '%s' does not exist.", filename)
return None
obj = None
with open(filename, 'rb') as obj_file:
obj = dill.load(obj_file)
return obj | [
"def",
"load",
"(",
"filename",
")",
":",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"filename",
")",
":",
"LOG",
".",
"error",
"(",
"\"load object - File '%s' does not exist.\"",
",",
"filename",
")",
"return",
"None",
"obj",
"=",
"None",
"with",... | Load a pickled obj from the filesystem.
You better know what you expect from the given pickle, because we don't check it.
Args:
filename (str): The filename we load the object from.
Returns:
The object we were able to unpickle, else None. | [
"Load",
"a",
"pickled",
"obj",
"from",
"the",
"filesystem",
"."
] | 9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58 | https://github.com/PolyJIT/benchbuild/blob/9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58/benchbuild/utils/wrapping.py#L277-L295 | train | 53,609 |
sci-bots/svg-model | svg_model/connections.py | extract_adjacent_shapes | def extract_adjacent_shapes(df_shapes, shape_i_column, extend=.5):
'''
Generate list of connections between "adjacent" polygon shapes based on
geometrical "closeness".
Parameters
----------
df_shapes : pandas.DataFrame
Table of polygon shape vertices (one row per vertex).
Table rows with the same value in the :data:`shape_i_column` column
are grouped together as a polygon.
shape_i_column : str or list[str]
Column name(s) that identify the polygon each row belongs to.
extend : float, optional
Extend ``x``/``y`` coords by the specified number of absolute units
from the center point of each polygon.
Each polygon is stretched independently in the ``x`` and ``y`` direction.
In each direction, a polygon considered adjacent to all polygons that
are overlapped by the extended shape.
Returns
-------
pandas.DataFrame
Adjacency list as a frame containing the columns ``source`` and
``target``.
The ``source`` and ``target`` of each adjacency connection is ordered
such that the ``source`` is less than the ``target``.
'''
# Find corners of each solid shape outline.
# Extend x coords by abs units
df_scaled_x = extend_shapes(df_shapes, 'x', extend)
# Extend y coords by abs units
df_scaled_y = extend_shapes(df_shapes, 'y', extend)
df_corners = df_shapes.groupby(shape_i_column).agg({'x': ['min', 'max'],
'y': ['min', 'max']})
# Find adjacent electrodes
row_list = []
for shapeNumber in df_shapes[shape_i_column].drop_duplicates():
df_stretched = df_scaled_x[df_scaled_x[shape_i_column]
.isin([shapeNumber])]
xmin_x, xmax_x, ymin_x, ymax_x = (df_stretched.x.min(),
df_stretched.x.max(),
df_stretched.y.min(),
df_stretched.y.max())
df_stretched = df_scaled_y[df_scaled_y[shape_i_column]
.isin([shapeNumber])]
xmin_y, xmax_y, ymin_y, ymax_y = (df_stretched.x.min(),
df_stretched.x.max(),
df_stretched.y.min(),
df_stretched.y.max())
#Some conditions unnecessary if it is assumed that electrodes don't overlap
adjacent = df_corners[
((df_corners.x['min'] < xmax_x) & (df_corners.x['max'] >= xmax_x)
# Check in x stretched direction
|(df_corners.x['min'] < xmin_x) & (df_corners.x['max'] >= xmin_x))
# Check if y is within bounds
& (df_corners.y['min'] < ymax_x) & (df_corners.y['max'] > ymin_x) |
#maybe do ymax_x - df_corners.y['min'] > threshold &
# df_corners.y['max'] - ymin_x > threshold
((df_corners.y['min'] < ymax_y) & (df_corners.y['max'] >= ymax_y)
# Checks in y stretched direction
|(df_corners.y['min'] < ymin_y) & (df_corners.y['max'] >= ymin_y))
# Check if x in within bounds
& ((df_corners.x['min'] < xmax_y) & (df_corners.x['max'] > xmin_y))
].index.values
for shape in adjacent:
temp_dict = {}
reverse_dict = {}
temp_dict ['source'] = shapeNumber
reverse_dict['source'] = shape
temp_dict ['target'] = shape
reverse_dict['target'] = shapeNumber
if(reverse_dict not in row_list):
row_list.append(temp_dict)
df_connected = (pd.DataFrame(row_list)[['source', 'target']]
.sort_index(axis=1, ascending=True)
.sort_values(['source', 'target']))
return df_connected | python | def extract_adjacent_shapes(df_shapes, shape_i_column, extend=.5):
'''
Generate list of connections between "adjacent" polygon shapes based on
geometrical "closeness".
Parameters
----------
df_shapes : pandas.DataFrame
Table of polygon shape vertices (one row per vertex).
Table rows with the same value in the :data:`shape_i_column` column
are grouped together as a polygon.
shape_i_column : str or list[str]
Column name(s) that identify the polygon each row belongs to.
extend : float, optional
Extend ``x``/``y`` coords by the specified number of absolute units
from the center point of each polygon.
Each polygon is stretched independently in the ``x`` and ``y`` direction.
In each direction, a polygon considered adjacent to all polygons that
are overlapped by the extended shape.
Returns
-------
pandas.DataFrame
Adjacency list as a frame containing the columns ``source`` and
``target``.
The ``source`` and ``target`` of each adjacency connection is ordered
such that the ``source`` is less than the ``target``.
'''
# Find corners of each solid shape outline.
# Extend x coords by abs units
df_scaled_x = extend_shapes(df_shapes, 'x', extend)
# Extend y coords by abs units
df_scaled_y = extend_shapes(df_shapes, 'y', extend)
df_corners = df_shapes.groupby(shape_i_column).agg({'x': ['min', 'max'],
'y': ['min', 'max']})
# Find adjacent electrodes
row_list = []
for shapeNumber in df_shapes[shape_i_column].drop_duplicates():
df_stretched = df_scaled_x[df_scaled_x[shape_i_column]
.isin([shapeNumber])]
xmin_x, xmax_x, ymin_x, ymax_x = (df_stretched.x.min(),
df_stretched.x.max(),
df_stretched.y.min(),
df_stretched.y.max())
df_stretched = df_scaled_y[df_scaled_y[shape_i_column]
.isin([shapeNumber])]
xmin_y, xmax_y, ymin_y, ymax_y = (df_stretched.x.min(),
df_stretched.x.max(),
df_stretched.y.min(),
df_stretched.y.max())
#Some conditions unnecessary if it is assumed that electrodes don't overlap
adjacent = df_corners[
((df_corners.x['min'] < xmax_x) & (df_corners.x['max'] >= xmax_x)
# Check in x stretched direction
|(df_corners.x['min'] < xmin_x) & (df_corners.x['max'] >= xmin_x))
# Check if y is within bounds
& (df_corners.y['min'] < ymax_x) & (df_corners.y['max'] > ymin_x) |
#maybe do ymax_x - df_corners.y['min'] > threshold &
# df_corners.y['max'] - ymin_x > threshold
((df_corners.y['min'] < ymax_y) & (df_corners.y['max'] >= ymax_y)
# Checks in y stretched direction
|(df_corners.y['min'] < ymin_y) & (df_corners.y['max'] >= ymin_y))
# Check if x in within bounds
& ((df_corners.x['min'] < xmax_y) & (df_corners.x['max'] > xmin_y))
].index.values
for shape in adjacent:
temp_dict = {}
reverse_dict = {}
temp_dict ['source'] = shapeNumber
reverse_dict['source'] = shape
temp_dict ['target'] = shape
reverse_dict['target'] = shapeNumber
if(reverse_dict not in row_list):
row_list.append(temp_dict)
df_connected = (pd.DataFrame(row_list)[['source', 'target']]
.sort_index(axis=1, ascending=True)
.sort_values(['source', 'target']))
return df_connected | [
"def",
"extract_adjacent_shapes",
"(",
"df_shapes",
",",
"shape_i_column",
",",
"extend",
"=",
".5",
")",
":",
"# Find corners of each solid shape outline.",
"# Extend x coords by abs units",
"df_scaled_x",
"=",
"extend_shapes",
"(",
"df_shapes",
",",
"'x'",
",",
"extend"... | Generate list of connections between "adjacent" polygon shapes based on
geometrical "closeness".
Parameters
----------
df_shapes : pandas.DataFrame
Table of polygon shape vertices (one row per vertex).
Table rows with the same value in the :data:`shape_i_column` column
are grouped together as a polygon.
shape_i_column : str or list[str]
Column name(s) that identify the polygon each row belongs to.
extend : float, optional
Extend ``x``/``y`` coords by the specified number of absolute units
from the center point of each polygon.
Each polygon is stretched independently in the ``x`` and ``y`` direction.
In each direction, a polygon considered adjacent to all polygons that
are overlapped by the extended shape.
Returns
-------
pandas.DataFrame
Adjacency list as a frame containing the columns ``source`` and
``target``.
The ``source`` and ``target`` of each adjacency connection is ordered
such that the ``source`` is less than the ``target``. | [
"Generate",
"list",
"of",
"connections",
"between",
"adjacent",
"polygon",
"shapes",
"based",
"on",
"geometrical",
"closeness",
"."
] | 2d119650f995e62b29ce0b3151a23f3b957cb072 | https://github.com/sci-bots/svg-model/blob/2d119650f995e62b29ce0b3151a23f3b957cb072/svg_model/connections.py#L30-L119 | train | 53,610 |
BlueBrain/hpcbench | hpcbench/export/es.py | ESExporter.es_client | def es_client(self):
"""Get Elasticsearch client
"""
es_conf = self.campaign.export.elasticsearch
return Elasticsearch(self.hosts, **es_conf.connection_params) | python | def es_client(self):
"""Get Elasticsearch client
"""
es_conf = self.campaign.export.elasticsearch
return Elasticsearch(self.hosts, **es_conf.connection_params) | [
"def",
"es_client",
"(",
"self",
")",
":",
"es_conf",
"=",
"self",
".",
"campaign",
".",
"export",
".",
"elasticsearch",
"return",
"Elasticsearch",
"(",
"self",
".",
"hosts",
",",
"*",
"*",
"es_conf",
".",
"connection_params",
")"
] | Get Elasticsearch client | [
"Get",
"Elasticsearch",
"client"
] | 192d0ec142b897157ec25f131d1ef28f84752592 | https://github.com/BlueBrain/hpcbench/blob/192d0ec142b897157ec25f131d1ef28f84752592/hpcbench/export/es.py#L42-L46 | train | 53,611 |
BlueBrain/hpcbench | hpcbench/export/es.py | ESExporter.index_name | def index_name(self):
"""Get Elasticsearch index name associated to the campaign
"""
fmt = self.campaign.export.elasticsearch.index_name
fields = dict(date=self.report['date'])
return fmt.format(**fields).lower() | python | def index_name(self):
"""Get Elasticsearch index name associated to the campaign
"""
fmt = self.campaign.export.elasticsearch.index_name
fields = dict(date=self.report['date'])
return fmt.format(**fields).lower() | [
"def",
"index_name",
"(",
"self",
")",
":",
"fmt",
"=",
"self",
".",
"campaign",
".",
"export",
".",
"elasticsearch",
".",
"index_name",
"fields",
"=",
"dict",
"(",
"date",
"=",
"self",
".",
"report",
"[",
"'date'",
"]",
")",
"return",
"fmt",
".",
"f... | Get Elasticsearch index name associated to the campaign | [
"Get",
"Elasticsearch",
"index",
"name",
"associated",
"to",
"the",
"campaign"
] | 192d0ec142b897157ec25f131d1ef28f84752592 | https://github.com/BlueBrain/hpcbench/blob/192d0ec142b897157ec25f131d1ef28f84752592/hpcbench/export/es.py#L55-L60 | train | 53,612 |
BlueBrain/hpcbench | hpcbench/export/es.py | ESExporter.remove_index | def remove_index(self):
"""Remove Elasticsearch index associated to the campaign"""
self.index_client.close(self.index_name)
self.index_client.delete(self.index_name) | python | def remove_index(self):
"""Remove Elasticsearch index associated to the campaign"""
self.index_client.close(self.index_name)
self.index_client.delete(self.index_name) | [
"def",
"remove_index",
"(",
"self",
")",
":",
"self",
".",
"index_client",
".",
"close",
"(",
"self",
".",
"index_name",
")",
"self",
".",
"index_client",
".",
"delete",
"(",
"self",
".",
"index_name",
")"
] | Remove Elasticsearch index associated to the campaign | [
"Remove",
"Elasticsearch",
"index",
"associated",
"to",
"the",
"campaign"
] | 192d0ec142b897157ec25f131d1ef28f84752592 | https://github.com/BlueBrain/hpcbench/blob/192d0ec142b897157ec25f131d1ef28f84752592/hpcbench/export/es.py#L69-L72 | train | 53,613 |
BlueBrain/hpcbench | hpcbench/toolbox/buildinfo.py | extract_build_info | def extract_build_info(exe_path, elf_section=ELF_SECTION):
"""Extracts the build information from a given executable.
The build information is expected to be in json format, which is parsed
and returned as a dictionary.
If no build information is found an empty dictionary is returned.
This assumes binutils 2.25 to work.
Args:
exe_path (str): The full path to the executable to be examined
Returns:
dict: A dictionary of the extracted information.
"""
build_info = {}
with mkdtemp() as tempd, pushd(tempd):
proc = subprocess.Popen(
[
OBJCOPY,
DUMP_SECTION,
"{secn}={ofile}".format(secn=elf_section, ofile=BUILDINFO_FILE),
exe_path,
],
stderr=subprocess.PIPE,
)
proc.wait()
errno = proc.returncode
stderr = proc.stderr.read()
if errno or len(stderr): # just return the empty dict
LOGGER.warning('objcopy failed with errno %s.', errno)
if len(stderr):
LOGGER.warning('objcopy failed with following msg:\n%s', stderr)
return build_info
with open(BUILDINFO_FILE) as build_info_f:
try:
build_info = json.load(build_info_f, object_hook=byteify)
except JSONDcdError as jsde:
LOGGER.warning('benchmark executable build is not valid json:')
LOGGER.warning(jsde.msg)
LOGGER.warning('build info section content:')
LOGGER.warning(jsde.doc)
return build_info | python | def extract_build_info(exe_path, elf_section=ELF_SECTION):
"""Extracts the build information from a given executable.
The build information is expected to be in json format, which is parsed
and returned as a dictionary.
If no build information is found an empty dictionary is returned.
This assumes binutils 2.25 to work.
Args:
exe_path (str): The full path to the executable to be examined
Returns:
dict: A dictionary of the extracted information.
"""
build_info = {}
with mkdtemp() as tempd, pushd(tempd):
proc = subprocess.Popen(
[
OBJCOPY,
DUMP_SECTION,
"{secn}={ofile}".format(secn=elf_section, ofile=BUILDINFO_FILE),
exe_path,
],
stderr=subprocess.PIPE,
)
proc.wait()
errno = proc.returncode
stderr = proc.stderr.read()
if errno or len(stderr): # just return the empty dict
LOGGER.warning('objcopy failed with errno %s.', errno)
if len(stderr):
LOGGER.warning('objcopy failed with following msg:\n%s', stderr)
return build_info
with open(BUILDINFO_FILE) as build_info_f:
try:
build_info = json.load(build_info_f, object_hook=byteify)
except JSONDcdError as jsde:
LOGGER.warning('benchmark executable build is not valid json:')
LOGGER.warning(jsde.msg)
LOGGER.warning('build info section content:')
LOGGER.warning(jsde.doc)
return build_info | [
"def",
"extract_build_info",
"(",
"exe_path",
",",
"elf_section",
"=",
"ELF_SECTION",
")",
":",
"build_info",
"=",
"{",
"}",
"with",
"mkdtemp",
"(",
")",
"as",
"tempd",
",",
"pushd",
"(",
"tempd",
")",
":",
"proc",
"=",
"subprocess",
".",
"Popen",
"(",
... | Extracts the build information from a given executable.
The build information is expected to be in json format, which is parsed
and returned as a dictionary.
If no build information is found an empty dictionary is returned.
This assumes binutils 2.25 to work.
Args:
exe_path (str): The full path to the executable to be examined
Returns:
dict: A dictionary of the extracted information. | [
"Extracts",
"the",
"build",
"information",
"from",
"a",
"given",
"executable",
"."
] | 192d0ec142b897157ec25f131d1ef28f84752592 | https://github.com/BlueBrain/hpcbench/blob/192d0ec142b897157ec25f131d1ef28f84752592/hpcbench/toolbox/buildinfo.py#L25-L68 | train | 53,614 |
PolyJIT/benchbuild | benchbuild/utils/uchroot.py | no_args | def no_args(**kwargs):
"""Return the uchroot command without any customizations."""
from benchbuild.utils.cmd import uchroot as uchrt
prefixes = CFG["container"]["prefixes"].value
p_paths, p_libs = env(prefixes)
uchrt = run.with_env_recursive(
uchrt,
LD_LIBRARY_PATH=path.list_to_path(p_libs),
PATH=path.list_to_path(p_paths))
return uchrt | python | def no_args(**kwargs):
"""Return the uchroot command without any customizations."""
from benchbuild.utils.cmd import uchroot as uchrt
prefixes = CFG["container"]["prefixes"].value
p_paths, p_libs = env(prefixes)
uchrt = run.with_env_recursive(
uchrt,
LD_LIBRARY_PATH=path.list_to_path(p_libs),
PATH=path.list_to_path(p_paths))
return uchrt | [
"def",
"no_args",
"(",
"*",
"*",
"kwargs",
")",
":",
"from",
"benchbuild",
".",
"utils",
".",
"cmd",
"import",
"uchroot",
"as",
"uchrt",
"prefixes",
"=",
"CFG",
"[",
"\"container\"",
"]",
"[",
"\"prefixes\"",
"]",
".",
"value",
"p_paths",
",",
"p_libs",
... | Return the uchroot command without any customizations. | [
"Return",
"the",
"uchroot",
"command",
"without",
"any",
"customizations",
"."
] | 9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58 | https://github.com/PolyJIT/benchbuild/blob/9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58/benchbuild/utils/uchroot.py#L50-L61 | train | 53,615 |
PolyJIT/benchbuild | benchbuild/utils/uchroot.py | with_mounts | def with_mounts(*args, uchroot_cmd_fn=no_args, **kwargs):
"""Return a uchroot command with all mounts enabled."""
mounts = CFG["container"]["mounts"].value
prefixes = CFG["container"]["prefixes"].value
uchroot_opts, mounts = __mounts__("mnt", mounts)
uchroot_cmd = uchroot_cmd_fn(**kwargs)
uchroot_cmd = uchroot_cmd[uchroot_opts]
uchroot_cmd = uchroot_cmd[args]
paths, libs = env(mounts)
prefix_paths, prefix_libs = env(prefixes)
uchroot_cmd = run.with_env_recursive(
uchroot_cmd,
LD_LIBRARY_PATH=path.list_to_path(libs + prefix_libs),
PATH=path.list_to_path(paths + prefix_paths))
return uchroot_cmd | python | def with_mounts(*args, uchroot_cmd_fn=no_args, **kwargs):
"""Return a uchroot command with all mounts enabled."""
mounts = CFG["container"]["mounts"].value
prefixes = CFG["container"]["prefixes"].value
uchroot_opts, mounts = __mounts__("mnt", mounts)
uchroot_cmd = uchroot_cmd_fn(**kwargs)
uchroot_cmd = uchroot_cmd[uchroot_opts]
uchroot_cmd = uchroot_cmd[args]
paths, libs = env(mounts)
prefix_paths, prefix_libs = env(prefixes)
uchroot_cmd = run.with_env_recursive(
uchroot_cmd,
LD_LIBRARY_PATH=path.list_to_path(libs + prefix_libs),
PATH=path.list_to_path(paths + prefix_paths))
return uchroot_cmd | [
"def",
"with_mounts",
"(",
"*",
"args",
",",
"uchroot_cmd_fn",
"=",
"no_args",
",",
"*",
"*",
"kwargs",
")",
":",
"mounts",
"=",
"CFG",
"[",
"\"container\"",
"]",
"[",
"\"mounts\"",
"]",
".",
"value",
"prefixes",
"=",
"CFG",
"[",
"\"container\"",
"]",
... | Return a uchroot command with all mounts enabled. | [
"Return",
"a",
"uchroot",
"command",
"with",
"all",
"mounts",
"enabled",
"."
] | 9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58 | https://github.com/PolyJIT/benchbuild/blob/9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58/benchbuild/utils/uchroot.py#L64-L80 | train | 53,616 |
PolyJIT/benchbuild | benchbuild/utils/uchroot.py | clean_env | def clean_env(uchroot_cmd, varnames):
"""Returns a uchroot cmd that runs inside a filtered environment."""
env = uchroot_cmd["/usr/bin/env"]
__clean_env = env["-u", ",".join(varnames)]
return __clean_env | python | def clean_env(uchroot_cmd, varnames):
"""Returns a uchroot cmd that runs inside a filtered environment."""
env = uchroot_cmd["/usr/bin/env"]
__clean_env = env["-u", ",".join(varnames)]
return __clean_env | [
"def",
"clean_env",
"(",
"uchroot_cmd",
",",
"varnames",
")",
":",
"env",
"=",
"uchroot_cmd",
"[",
"\"/usr/bin/env\"",
"]",
"__clean_env",
"=",
"env",
"[",
"\"-u\"",
",",
"\",\"",
".",
"join",
"(",
"varnames",
")",
"]",
"return",
"__clean_env"
] | Returns a uchroot cmd that runs inside a filtered environment. | [
"Returns",
"a",
"uchroot",
"cmd",
"that",
"runs",
"inside",
"a",
"filtered",
"environment",
"."
] | 9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58 | https://github.com/PolyJIT/benchbuild/blob/9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58/benchbuild/utils/uchroot.py#L121-L125 | train | 53,617 |
PolyJIT/benchbuild | benchbuild/utils/uchroot.py | mounts | def mounts(prefix, __mounts):
"""
Compute the mountpoints of the current user.
Args:
prefix: Define where the job was running if it ran on a cluster.
mounts: All mounts the user currently uses in his file system.
Return:
mntpoints
"""
i = 0
mntpoints = []
for mount in __mounts:
if not isinstance(mount, dict):
mntpoint = "{0}/{1}".format(prefix, str(i))
mntpoints.append(mntpoint)
i = i + 1
return mntpoints | python | def mounts(prefix, __mounts):
"""
Compute the mountpoints of the current user.
Args:
prefix: Define where the job was running if it ran on a cluster.
mounts: All mounts the user currently uses in his file system.
Return:
mntpoints
"""
i = 0
mntpoints = []
for mount in __mounts:
if not isinstance(mount, dict):
mntpoint = "{0}/{1}".format(prefix, str(i))
mntpoints.append(mntpoint)
i = i + 1
return mntpoints | [
"def",
"mounts",
"(",
"prefix",
",",
"__mounts",
")",
":",
"i",
"=",
"0",
"mntpoints",
"=",
"[",
"]",
"for",
"mount",
"in",
"__mounts",
":",
"if",
"not",
"isinstance",
"(",
"mount",
",",
"dict",
")",
":",
"mntpoint",
"=",
"\"{0}/{1}\"",
".",
"format"... | Compute the mountpoints of the current user.
Args:
prefix: Define where the job was running if it ran on a cluster.
mounts: All mounts the user currently uses in his file system.
Return:
mntpoints | [
"Compute",
"the",
"mountpoints",
"of",
"the",
"current",
"user",
"."
] | 9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58 | https://github.com/PolyJIT/benchbuild/blob/9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58/benchbuild/utils/uchroot.py#L128-L145 | train | 53,618 |
PolyJIT/benchbuild | benchbuild/utils/uchroot.py | env | def env(mounts):
"""
Compute the environment of the change root for the user.
Args:
mounts: The mountpoints of the current user.
Return:
paths
ld_libs
"""
f_mounts = [m.strip("/") for m in mounts]
root = local.path("/")
ld_libs = [root / m / "lib" for m in f_mounts]
ld_libs.extend([root / m / "lib64" for m in f_mounts])
paths = [root / m / "bin" for m in f_mounts]
paths.extend([root / m / "sbin" for m in f_mounts])
paths.extend([root / m for m in f_mounts])
return paths, ld_libs | python | def env(mounts):
"""
Compute the environment of the change root for the user.
Args:
mounts: The mountpoints of the current user.
Return:
paths
ld_libs
"""
f_mounts = [m.strip("/") for m in mounts]
root = local.path("/")
ld_libs = [root / m / "lib" for m in f_mounts]
ld_libs.extend([root / m / "lib64" for m in f_mounts])
paths = [root / m / "bin" for m in f_mounts]
paths.extend([root / m / "sbin" for m in f_mounts])
paths.extend([root / m for m in f_mounts])
return paths, ld_libs | [
"def",
"env",
"(",
"mounts",
")",
":",
"f_mounts",
"=",
"[",
"m",
".",
"strip",
"(",
"\"/\"",
")",
"for",
"m",
"in",
"mounts",
"]",
"root",
"=",
"local",
".",
"path",
"(",
"\"/\"",
")",
"ld_libs",
"=",
"[",
"root",
"/",
"m",
"/",
"\"lib\"",
"fo... | Compute the environment of the change root for the user.
Args:
mounts: The mountpoints of the current user.
Return:
paths
ld_libs | [
"Compute",
"the",
"environment",
"of",
"the",
"change",
"root",
"for",
"the",
"user",
"."
] | 9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58 | https://github.com/PolyJIT/benchbuild/blob/9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58/benchbuild/utils/uchroot.py#L166-L186 | train | 53,619 |
mromanello/hucitlib | knowledge_base/__init__.py | get_abbreviations | def get_abbreviations(kb):
"""
For the sake of profiling.
"""
return {"%s$$n%i" % (author.get_urn(), i): abbrev
for author in kb.get_authors()
for i, abbrev in enumerate(author.get_abbreviations())
if author.get_urn() is not None} | python | def get_abbreviations(kb):
"""
For the sake of profiling.
"""
return {"%s$$n%i" % (author.get_urn(), i): abbrev
for author in kb.get_authors()
for i, abbrev in enumerate(author.get_abbreviations())
if author.get_urn() is not None} | [
"def",
"get_abbreviations",
"(",
"kb",
")",
":",
"return",
"{",
"\"%s$$n%i\"",
"%",
"(",
"author",
".",
"get_urn",
"(",
")",
",",
"i",
")",
":",
"abbrev",
"for",
"author",
"in",
"kb",
".",
"get_authors",
"(",
")",
"for",
"i",
",",
"abbrev",
"in",
"... | For the sake of profiling. | [
"For",
"the",
"sake",
"of",
"profiling",
"."
] | 6587d1b04eb7e5b48ad7359be845e5d3b444d6fa | https://github.com/mromanello/hucitlib/blob/6587d1b04eb7e5b48ad7359be845e5d3b444d6fa/knowledge_base/__init__.py#L21-L28 | train | 53,620 |
mromanello/hucitlib | knowledge_base/__init__.py | KnowledgeBase.get_authors | def get_authors(self):
"""
Returns the authors in the Knowledge Base.
:return: a list of `HucitAuthor` instances.
"""
Person = self._session.get_class(surf.ns.EFRBROO['F10_Person'])
return list(Person.all()) | python | def get_authors(self):
"""
Returns the authors in the Knowledge Base.
:return: a list of `HucitAuthor` instances.
"""
Person = self._session.get_class(surf.ns.EFRBROO['F10_Person'])
return list(Person.all()) | [
"def",
"get_authors",
"(",
"self",
")",
":",
"Person",
"=",
"self",
".",
"_session",
".",
"get_class",
"(",
"surf",
".",
"ns",
".",
"EFRBROO",
"[",
"'F10_Person'",
"]",
")",
"return",
"list",
"(",
"Person",
".",
"all",
"(",
")",
")"
] | Returns the authors in the Knowledge Base.
:return: a list of `HucitAuthor` instances. | [
"Returns",
"the",
"authors",
"in",
"the",
"Knowledge",
"Base",
"."
] | 6587d1b04eb7e5b48ad7359be845e5d3b444d6fa | https://github.com/mromanello/hucitlib/blob/6587d1b04eb7e5b48ad7359be845e5d3b444d6fa/knowledge_base/__init__.py#L253-L261 | train | 53,621 |
mromanello/hucitlib | knowledge_base/__init__.py | KnowledgeBase.get_works | def get_works(self):
"""Return the author's works.
:return: a list of `HucitWork` instances.
"""
Work = self._session.get_class(surf.ns.EFRBROO['F1_Work'])
return list(Work.all()) | python | def get_works(self):
"""Return the author's works.
:return: a list of `HucitWork` instances.
"""
Work = self._session.get_class(surf.ns.EFRBROO['F1_Work'])
return list(Work.all()) | [
"def",
"get_works",
"(",
"self",
")",
":",
"Work",
"=",
"self",
".",
"_session",
".",
"get_class",
"(",
"surf",
".",
"ns",
".",
"EFRBROO",
"[",
"'F1_Work'",
"]",
")",
"return",
"list",
"(",
"Work",
".",
"all",
"(",
")",
")"
] | Return the author's works.
:return: a list of `HucitWork` instances. | [
"Return",
"the",
"author",
"s",
"works",
"."
] | 6587d1b04eb7e5b48ad7359be845e5d3b444d6fa | https://github.com/mromanello/hucitlib/blob/6587d1b04eb7e5b48ad7359be845e5d3b444d6fa/knowledge_base/__init__.py#L263-L270 | train | 53,622 |
mromanello/hucitlib | knowledge_base/__init__.py | KnowledgeBase.get_author_label | def get_author_label(self, urn):
"""Get the label corresponding to the author identified by the CTS URN.
try to get an lang=en label (if multiple labels in this lang pick the shortest)
try to get a lang=la label (if multiple labels in this lang exist pick the shortest)
try to get a lang=None label (if multiple labels in this lang exist pick the shortest)
returns None if no name is found
"""
author = self.get_resource_by_urn(urn)
names = author.get_names()
en_names = sorted([name[1] for name in names if name[0] == "en"], key=len)
try:
assert len(en_names) > 0
return en_names[0]
except Exception as e:
none_names = sorted([name[1] for name in names if name[0] == None], key=len)
try:
return none_names[0]
except Exception as e:
la_names = sorted([name[1] for name in names if name[0] == "la"], key=len)
try:
assert len(la_names) > 0
return la_names[0]
except Exception as e:
return None | python | def get_author_label(self, urn):
"""Get the label corresponding to the author identified by the CTS URN.
try to get an lang=en label (if multiple labels in this lang pick the shortest)
try to get a lang=la label (if multiple labels in this lang exist pick the shortest)
try to get a lang=None label (if multiple labels in this lang exist pick the shortest)
returns None if no name is found
"""
author = self.get_resource_by_urn(urn)
names = author.get_names()
en_names = sorted([name[1] for name in names if name[0] == "en"], key=len)
try:
assert len(en_names) > 0
return en_names[0]
except Exception as e:
none_names = sorted([name[1] for name in names if name[0] == None], key=len)
try:
return none_names[0]
except Exception as e:
la_names = sorted([name[1] for name in names if name[0] == "la"], key=len)
try:
assert len(la_names) > 0
return la_names[0]
except Exception as e:
return None | [
"def",
"get_author_label",
"(",
"self",
",",
"urn",
")",
":",
"author",
"=",
"self",
".",
"get_resource_by_urn",
"(",
"urn",
")",
"names",
"=",
"author",
".",
"get_names",
"(",
")",
"en_names",
"=",
"sorted",
"(",
"[",
"name",
"[",
"1",
"]",
"for",
"... | Get the label corresponding to the author identified by the CTS URN.
try to get an lang=en label (if multiple labels in this lang pick the shortest)
try to get a lang=la label (if multiple labels in this lang exist pick the shortest)
try to get a lang=None label (if multiple labels in this lang exist pick the shortest)
returns None if no name is found | [
"Get",
"the",
"label",
"corresponding",
"to",
"the",
"author",
"identified",
"by",
"the",
"CTS",
"URN",
"."
] | 6587d1b04eb7e5b48ad7359be845e5d3b444d6fa | https://github.com/mromanello/hucitlib/blob/6587d1b04eb7e5b48ad7359be845e5d3b444d6fa/knowledge_base/__init__.py#L272-L298 | train | 53,623 |
mromanello/hucitlib | knowledge_base/__init__.py | KnowledgeBase.get_statistics | def get_statistics(self):
"""
Gather basic stats about the Knowledge Base and its contents.
:return: a dictionary
"""
statistics = {
"number_authors": 0,
"number_author_names": 0,
"number_author_abbreviations": 0,
"number_works": 0,
"number_work_titles": 0,
"number_title_abbreviations": 0,
"number_opus_maximum":0,
}
for author in self.get_authors():
if author.get_urn() is not None:
opmax = True if self.get_opus_maximum_of(author.get_urn())\
is not None else False
if opmax:
statistics["number_opus_maximum"] += 1
statistics["number_authors"] += 1
statistics["number_author_names"] += len(author.get_names())
statistics["number_author_abbreviations"] += len(
author.get_abbreviations()
)
for work in author.get_works():
statistics["number_works"] += 1
statistics["number_work_titles"] += len(work.get_titles())
statistics["number_title_abbreviations"] += len(
work.get_abbreviations()
)
return statistics | python | def get_statistics(self):
"""
Gather basic stats about the Knowledge Base and its contents.
:return: a dictionary
"""
statistics = {
"number_authors": 0,
"number_author_names": 0,
"number_author_abbreviations": 0,
"number_works": 0,
"number_work_titles": 0,
"number_title_abbreviations": 0,
"number_opus_maximum":0,
}
for author in self.get_authors():
if author.get_urn() is not None:
opmax = True if self.get_opus_maximum_of(author.get_urn())\
is not None else False
if opmax:
statistics["number_opus_maximum"] += 1
statistics["number_authors"] += 1
statistics["number_author_names"] += len(author.get_names())
statistics["number_author_abbreviations"] += len(
author.get_abbreviations()
)
for work in author.get_works():
statistics["number_works"] += 1
statistics["number_work_titles"] += len(work.get_titles())
statistics["number_title_abbreviations"] += len(
work.get_abbreviations()
)
return statistics | [
"def",
"get_statistics",
"(",
"self",
")",
":",
"statistics",
"=",
"{",
"\"number_authors\"",
":",
"0",
",",
"\"number_author_names\"",
":",
"0",
",",
"\"number_author_abbreviations\"",
":",
"0",
",",
"\"number_works\"",
":",
"0",
",",
"\"number_work_titles\"",
":... | Gather basic stats about the Knowledge Base and its contents.
:return: a dictionary | [
"Gather",
"basic",
"stats",
"about",
"the",
"Knowledge",
"Base",
"and",
"its",
"contents",
"."
] | 6587d1b04eb7e5b48ad7359be845e5d3b444d6fa | https://github.com/mromanello/hucitlib/blob/6587d1b04eb7e5b48ad7359be845e5d3b444d6fa/knowledge_base/__init__.py#L334-L367 | train | 53,624 |
mromanello/hucitlib | knowledge_base/__init__.py | KnowledgeBase.to_json | def to_json(self):
"""
Serialises the content of the KnowledgeBase as JSON.
:return: TODO
"""
return json.dumps({
"statistics": self.get_statistics()
, "authors": [json.loads(author.to_json()) for author in self.get_authors()]
}, indent=2) | python | def to_json(self):
"""
Serialises the content of the KnowledgeBase as JSON.
:return: TODO
"""
return json.dumps({
"statistics": self.get_statistics()
, "authors": [json.loads(author.to_json()) for author in self.get_authors()]
}, indent=2) | [
"def",
"to_json",
"(",
"self",
")",
":",
"return",
"json",
".",
"dumps",
"(",
"{",
"\"statistics\"",
":",
"self",
".",
"get_statistics",
"(",
")",
",",
"\"authors\"",
":",
"[",
"json",
".",
"loads",
"(",
"author",
".",
"to_json",
"(",
")",
")",
"for"... | Serialises the content of the KnowledgeBase as JSON.
:return: TODO | [
"Serialises",
"the",
"content",
"of",
"the",
"KnowledgeBase",
"as",
"JSON",
"."
] | 6587d1b04eb7e5b48ad7359be845e5d3b444d6fa | https://github.com/mromanello/hucitlib/blob/6587d1b04eb7e5b48ad7359be845e5d3b444d6fa/knowledge_base/__init__.py#L398-L407 | train | 53,625 |
BlueBrain/hpcbench | hpcbench/driver/base.py | write_yaml_report | def write_yaml_report(func):
"""Decorator used in campaign node post-processing
"""
@wraps(func)
def _wrapper(*args, **kwargs):
now = datetime.datetime.now()
with Timer() as timer:
data = func(*args, **kwargs)
if isinstance(data, (SEQUENCES, types.GeneratorType)):
report = dict(children=list(map(str, data)))
elif isinstance(data, MAPPINGS):
report = data
else:
raise Exception('Unexpected data type: %s', type(data))
report['elapsed'] = timer.elapsed
report['date'] = now.isoformat()
if "no_exec" not in kwargs and report is not None:
with open(YAML_REPORT_FILE, 'w') as ostr:
yaml.dump(report, ostr, default_flow_style=False)
return report
return _wrapper | python | def write_yaml_report(func):
"""Decorator used in campaign node post-processing
"""
@wraps(func)
def _wrapper(*args, **kwargs):
now = datetime.datetime.now()
with Timer() as timer:
data = func(*args, **kwargs)
if isinstance(data, (SEQUENCES, types.GeneratorType)):
report = dict(children=list(map(str, data)))
elif isinstance(data, MAPPINGS):
report = data
else:
raise Exception('Unexpected data type: %s', type(data))
report['elapsed'] = timer.elapsed
report['date'] = now.isoformat()
if "no_exec" not in kwargs and report is not None:
with open(YAML_REPORT_FILE, 'w') as ostr:
yaml.dump(report, ostr, default_flow_style=False)
return report
return _wrapper | [
"def",
"write_yaml_report",
"(",
"func",
")",
":",
"@",
"wraps",
"(",
"func",
")",
"def",
"_wrapper",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"now",
"=",
"datetime",
".",
"datetime",
".",
"now",
"(",
")",
"with",
"Timer",
"(",
")",
"... | Decorator used in campaign node post-processing | [
"Decorator",
"used",
"in",
"campaign",
"node",
"post",
"-",
"processing"
] | 192d0ec142b897157ec25f131d1ef28f84752592 | https://github.com/BlueBrain/hpcbench/blob/192d0ec142b897157ec25f131d1ef28f84752592/hpcbench/driver/base.py#L32-L54 | train | 53,626 |
BlueBrain/hpcbench | hpcbench/driver/base.py | Enumerator.traverse | def traverse(self):
"""Enumerate children and build associated objects
"""
builder = self.child_builder
for child in self._children:
with pushd(str(child)):
yield child, builder(child) | python | def traverse(self):
"""Enumerate children and build associated objects
"""
builder = self.child_builder
for child in self._children:
with pushd(str(child)):
yield child, builder(child) | [
"def",
"traverse",
"(",
"self",
")",
":",
"builder",
"=",
"self",
".",
"child_builder",
"for",
"child",
"in",
"self",
".",
"_children",
":",
"with",
"pushd",
"(",
"str",
"(",
"child",
")",
")",
":",
"yield",
"child",
",",
"builder",
"(",
"child",
")"... | Enumerate children and build associated objects | [
"Enumerate",
"children",
"and",
"build",
"associated",
"objects"
] | 192d0ec142b897157ec25f131d1ef28f84752592 | https://github.com/BlueBrain/hpcbench/blob/192d0ec142b897157ec25f131d1ef28f84752592/hpcbench/driver/base.py#L162-L168 | train | 53,627 |
eng-tools/sfsimodels | sfsimodels/models/foundations.py | PadFoundation.pad_position_l | def pad_position_l(self, i):
"""
Determines the position of the ith pad in the length direction.
Assumes equally spaced pads.
:param i: ith number of pad in length direction (0-indexed)
:return:
"""
if i >= self.n_pads_l:
raise ModelError("pad index out-of-bounds")
return (self.length - self.pad_length) / (self.n_pads_l - 1) * i + self.pad_length / 2 | python | def pad_position_l(self, i):
"""
Determines the position of the ith pad in the length direction.
Assumes equally spaced pads.
:param i: ith number of pad in length direction (0-indexed)
:return:
"""
if i >= self.n_pads_l:
raise ModelError("pad index out-of-bounds")
return (self.length - self.pad_length) / (self.n_pads_l - 1) * i + self.pad_length / 2 | [
"def",
"pad_position_l",
"(",
"self",
",",
"i",
")",
":",
"if",
"i",
">=",
"self",
".",
"n_pads_l",
":",
"raise",
"ModelError",
"(",
"\"pad index out-of-bounds\"",
")",
"return",
"(",
"self",
".",
"length",
"-",
"self",
".",
"pad_length",
")",
"/",
"(",
... | Determines the position of the ith pad in the length direction.
Assumes equally spaced pads.
:param i: ith number of pad in length direction (0-indexed)
:return: | [
"Determines",
"the",
"position",
"of",
"the",
"ith",
"pad",
"in",
"the",
"length",
"direction",
".",
"Assumes",
"equally",
"spaced",
"pads",
"."
] | 65a690ca440d61307f5a9b8478e4704f203a5925 | https://github.com/eng-tools/sfsimodels/blob/65a690ca440d61307f5a9b8478e4704f203a5925/sfsimodels/models/foundations.py#L335-L345 | train | 53,628 |
eng-tools/sfsimodels | sfsimodels/models/foundations.py | PadFoundation.pad_position_w | def pad_position_w(self, i):
"""
Determines the position of the ith pad in the width direction.
Assumes equally spaced pads.
:param i: ith number of pad in width direction (0-indexed)
:return:
"""
if i >= self.n_pads_w:
raise ModelError("pad index out-of-bounds")
return (self.width - self.pad_width) / (self.n_pads_w - 1) * i + self.pad_width / 2 | python | def pad_position_w(self, i):
"""
Determines the position of the ith pad in the width direction.
Assumes equally spaced pads.
:param i: ith number of pad in width direction (0-indexed)
:return:
"""
if i >= self.n_pads_w:
raise ModelError("pad index out-of-bounds")
return (self.width - self.pad_width) / (self.n_pads_w - 1) * i + self.pad_width / 2 | [
"def",
"pad_position_w",
"(",
"self",
",",
"i",
")",
":",
"if",
"i",
">=",
"self",
".",
"n_pads_w",
":",
"raise",
"ModelError",
"(",
"\"pad index out-of-bounds\"",
")",
"return",
"(",
"self",
".",
"width",
"-",
"self",
".",
"pad_width",
")",
"/",
"(",
... | Determines the position of the ith pad in the width direction.
Assumes equally spaced pads.
:param i: ith number of pad in width direction (0-indexed)
:return: | [
"Determines",
"the",
"position",
"of",
"the",
"ith",
"pad",
"in",
"the",
"width",
"direction",
".",
"Assumes",
"equally",
"spaced",
"pads",
"."
] | 65a690ca440d61307f5a9b8478e4704f203a5925 | https://github.com/eng-tools/sfsimodels/blob/65a690ca440d61307f5a9b8478e4704f203a5925/sfsimodels/models/foundations.py#L347-L357 | train | 53,629 |
sci-bots/svg-model | svg_model/svgload/svg_parser.py | Svg.add_to_batch | def add_to_batch(self, batch):
'''
Adds paths to the given batch object. They are all added as
GL_TRIANGLES, so the batch will aggregate them all into a single OpenGL
primitive.
'''
for name in self.paths:
svg_path = self.paths[name]
svg_path.add_to_batch(batch) | python | def add_to_batch(self, batch):
'''
Adds paths to the given batch object. They are all added as
GL_TRIANGLES, so the batch will aggregate them all into a single OpenGL
primitive.
'''
for name in self.paths:
svg_path = self.paths[name]
svg_path.add_to_batch(batch) | [
"def",
"add_to_batch",
"(",
"self",
",",
"batch",
")",
":",
"for",
"name",
"in",
"self",
".",
"paths",
":",
"svg_path",
"=",
"self",
".",
"paths",
"[",
"name",
"]",
"svg_path",
".",
"add_to_batch",
"(",
"batch",
")"
] | Adds paths to the given batch object. They are all added as
GL_TRIANGLES, so the batch will aggregate them all into a single OpenGL
primitive. | [
"Adds",
"paths",
"to",
"the",
"given",
"batch",
"object",
".",
"They",
"are",
"all",
"added",
"as",
"GL_TRIANGLES",
"so",
"the",
"batch",
"will",
"aggregate",
"them",
"all",
"into",
"a",
"single",
"OpenGL",
"primitive",
"."
] | 2d119650f995e62b29ce0b3151a23f3b957cb072 | https://github.com/sci-bots/svg-model/blob/2d119650f995e62b29ce0b3151a23f3b957cb072/svg_model/svgload/svg_parser.py#L55-L63 | train | 53,630 |
portfoliome/foil | foil/util.py | alphanum_key | def alphanum_key(s):
"""Turn a string into a list of string and number chunks.
"z23a" -> ["z", 23, "a"]
"""
return [int(c) if c.isdigit() else c for c in _RE_INT.split(s)] | python | def alphanum_key(s):
"""Turn a string into a list of string and number chunks.
"z23a" -> ["z", 23, "a"]
"""
return [int(c) if c.isdigit() else c for c in _RE_INT.split(s)] | [
"def",
"alphanum_key",
"(",
"s",
")",
":",
"return",
"[",
"int",
"(",
"c",
")",
"if",
"c",
".",
"isdigit",
"(",
")",
"else",
"c",
"for",
"c",
"in",
"_RE_INT",
".",
"split",
"(",
"s",
")",
"]"
] | Turn a string into a list of string and number chunks.
"z23a" -> ["z", 23, "a"] | [
"Turn",
"a",
"string",
"into",
"a",
"list",
"of",
"string",
"and",
"number",
"chunks",
"."
] | b66d8cf4ab048a387d8c7a033b47e922ed6917d6 | https://github.com/portfoliome/foil/blob/b66d8cf4ab048a387d8c7a033b47e922ed6917d6/foil/util.py#L6-L12 | train | 53,631 |
PolyJIT/benchbuild | benchbuild/experiments/__init__.py | discover | def discover():
"""
Import all experiments listed in PLUGINS_EXPERIMENTS.
Tests:
>>> from benchbuild.settings import CFG
>>> from benchbuild.experiments import discover
>>> import logging as lg
>>> import sys
>>> l = lg.getLogger('benchbuild')
>>> lg.getLogger('benchbuild').setLevel(lg.DEBUG)
>>> lg.getLogger('benchbuild').handlers = [lg.StreamHandler(stream=sys.stdout)]
>>> CFG["plugins"]["experiments"] = ["benchbuild.non.existing", "benchbuild.experiments.raw"]
>>> discover()
Could not find 'benchbuild.non.existing'
ImportError: No module named 'benchbuild.non'
"""
if CFG["plugins"]["autoload"]:
experiment_plugins = CFG["plugins"]["experiments"].value
for exp_plugin in experiment_plugins:
try:
importlib.import_module(exp_plugin)
except ImportError as import_error:
LOG.error("Could not find '%s'", exp_plugin)
LOG.error("ImportError: %s", import_error.msg) | python | def discover():
"""
Import all experiments listed in PLUGINS_EXPERIMENTS.
Tests:
>>> from benchbuild.settings import CFG
>>> from benchbuild.experiments import discover
>>> import logging as lg
>>> import sys
>>> l = lg.getLogger('benchbuild')
>>> lg.getLogger('benchbuild').setLevel(lg.DEBUG)
>>> lg.getLogger('benchbuild').handlers = [lg.StreamHandler(stream=sys.stdout)]
>>> CFG["plugins"]["experiments"] = ["benchbuild.non.existing", "benchbuild.experiments.raw"]
>>> discover()
Could not find 'benchbuild.non.existing'
ImportError: No module named 'benchbuild.non'
"""
if CFG["plugins"]["autoload"]:
experiment_plugins = CFG["plugins"]["experiments"].value
for exp_plugin in experiment_plugins:
try:
importlib.import_module(exp_plugin)
except ImportError as import_error:
LOG.error("Could not find '%s'", exp_plugin)
LOG.error("ImportError: %s", import_error.msg) | [
"def",
"discover",
"(",
")",
":",
"if",
"CFG",
"[",
"\"plugins\"",
"]",
"[",
"\"autoload\"",
"]",
":",
"experiment_plugins",
"=",
"CFG",
"[",
"\"plugins\"",
"]",
"[",
"\"experiments\"",
"]",
".",
"value",
"for",
"exp_plugin",
"in",
"experiment_plugins",
":",... | Import all experiments listed in PLUGINS_EXPERIMENTS.
Tests:
>>> from benchbuild.settings import CFG
>>> from benchbuild.experiments import discover
>>> import logging as lg
>>> import sys
>>> l = lg.getLogger('benchbuild')
>>> lg.getLogger('benchbuild').setLevel(lg.DEBUG)
>>> lg.getLogger('benchbuild').handlers = [lg.StreamHandler(stream=sys.stdout)]
>>> CFG["plugins"]["experiments"] = ["benchbuild.non.existing", "benchbuild.experiments.raw"]
>>> discover()
Could not find 'benchbuild.non.existing'
ImportError: No module named 'benchbuild.non' | [
"Import",
"all",
"experiments",
"listed",
"in",
"PLUGINS_EXPERIMENTS",
"."
] | 9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58 | https://github.com/PolyJIT/benchbuild/blob/9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58/benchbuild/experiments/__init__.py#L20-L44 | train | 53,632 |
PolyJIT/benchbuild | benchbuild/cli/project.py | print_projects | def print_projects(projects=None):
"""
Print a list of projects registered for that experiment.
Args:
exp: The experiment to print all projects for.
"""
grouped_by = {}
if not projects:
print(
"Your selection didn't include any projects for this experiment.")
return
for name in projects:
prj = projects[name]
if prj.GROUP not in grouped_by:
grouped_by[prj.GROUP] = []
grouped_by[prj.GROUP].append("{name}/{group}".format(
name=prj.NAME, group=prj.GROUP))
for name in grouped_by:
print("group: {0}".format(name))
group_projects = sorted(grouped_by[name])
for prj in group_projects:
prj_cls = projects[prj]
version_str = None
if hasattr(prj_cls, 'versions'):
version_str = ", ".join(prj_cls.versions())
project_id = "{0}/{1}".format(prj_cls.NAME, prj_cls.GROUP)
project_str = \
" name: {id:<32} version: {version:<24} source: {src}".format(
id=str(project_id),
version=str(prj_cls.VERSION),
src=str(prj_cls.SRC_FILE))
print(project_str)
if prj_cls.__doc__:
docstr = prj_cls.__doc__.strip("\n ")
print(" description: {desc}".format(desc=docstr))
if version_str:
print(" versions: {versions}".format(versions=version_str))
print() | python | def print_projects(projects=None):
"""
Print a list of projects registered for that experiment.
Args:
exp: The experiment to print all projects for.
"""
grouped_by = {}
if not projects:
print(
"Your selection didn't include any projects for this experiment.")
return
for name in projects:
prj = projects[name]
if prj.GROUP not in grouped_by:
grouped_by[prj.GROUP] = []
grouped_by[prj.GROUP].append("{name}/{group}".format(
name=prj.NAME, group=prj.GROUP))
for name in grouped_by:
print("group: {0}".format(name))
group_projects = sorted(grouped_by[name])
for prj in group_projects:
prj_cls = projects[prj]
version_str = None
if hasattr(prj_cls, 'versions'):
version_str = ", ".join(prj_cls.versions())
project_id = "{0}/{1}".format(prj_cls.NAME, prj_cls.GROUP)
project_str = \
" name: {id:<32} version: {version:<24} source: {src}".format(
id=str(project_id),
version=str(prj_cls.VERSION),
src=str(prj_cls.SRC_FILE))
print(project_str)
if prj_cls.__doc__:
docstr = prj_cls.__doc__.strip("\n ")
print(" description: {desc}".format(desc=docstr))
if version_str:
print(" versions: {versions}".format(versions=version_str))
print() | [
"def",
"print_projects",
"(",
"projects",
"=",
"None",
")",
":",
"grouped_by",
"=",
"{",
"}",
"if",
"not",
"projects",
":",
"print",
"(",
"\"Your selection didn't include any projects for this experiment.\"",
")",
"return",
"for",
"name",
"in",
"projects",
":",
"p... | Print a list of projects registered for that experiment.
Args:
exp: The experiment to print all projects for. | [
"Print",
"a",
"list",
"of",
"projects",
"registered",
"for",
"that",
"experiment",
"."
] | 9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58 | https://github.com/PolyJIT/benchbuild/blob/9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58/benchbuild/cli/project.py#L35-L81 | train | 53,633 |
Capitains/Nautilus | capitains_nautilus/cmd.py | _commandline | def _commandline(repositories,
port=8000, host="127.0.0.1", debug=False,
cache=None, cache_path="./cache", redis=None):
""" Run a CTS API from command line.
.. warning:: This function should not be used in the production context
:param repositories:
:param port:
:param ip:
:param debug:
:param cache:
:param cache_path:
:return:
"""
if cache == "redis":
nautilus_cache = RedisCache(redis)
cache_type = "redis"
elif cache == "filesystem":
nautilus_cache = FileSystemCache(cache_path)
cache_type = "simple"
else:
nautilus_cache = NullCache()
cache_type = "simple"
app = Flask("Nautilus")
if debug:
app.logger.setLevel(logging.INFO)
resolver = NautilusCtsResolver(resource=repositories)
nautilus = FlaskNautilus(
app=app,
resolver=resolver
#parser_cache=WerkzeugCacheWrapper(nautilus_cache),
#logger=None
)
nautilus.resolver.parse()
app.run(debug=debug, port=port, host=host) | python | def _commandline(repositories,
port=8000, host="127.0.0.1", debug=False,
cache=None, cache_path="./cache", redis=None):
""" Run a CTS API from command line.
.. warning:: This function should not be used in the production context
:param repositories:
:param port:
:param ip:
:param debug:
:param cache:
:param cache_path:
:return:
"""
if cache == "redis":
nautilus_cache = RedisCache(redis)
cache_type = "redis"
elif cache == "filesystem":
nautilus_cache = FileSystemCache(cache_path)
cache_type = "simple"
else:
nautilus_cache = NullCache()
cache_type = "simple"
app = Flask("Nautilus")
if debug:
app.logger.setLevel(logging.INFO)
resolver = NautilusCtsResolver(resource=repositories)
nautilus = FlaskNautilus(
app=app,
resolver=resolver
#parser_cache=WerkzeugCacheWrapper(nautilus_cache),
#logger=None
)
nautilus.resolver.parse()
app.run(debug=debug, port=port, host=host) | [
"def",
"_commandline",
"(",
"repositories",
",",
"port",
"=",
"8000",
",",
"host",
"=",
"\"127.0.0.1\"",
",",
"debug",
"=",
"False",
",",
"cache",
"=",
"None",
",",
"cache_path",
"=",
"\"./cache\"",
",",
"redis",
"=",
"None",
")",
":",
"if",
"cache",
"... | Run a CTS API from command line.
.. warning:: This function should not be used in the production context
:param repositories:
:param port:
:param ip:
:param debug:
:param cache:
:param cache_path:
:return: | [
"Run",
"a",
"CTS",
"API",
"from",
"command",
"line",
"."
] | 6be453fe0cc0e2c1b89ff06e5af1409165fc1411 | https://github.com/Capitains/Nautilus/blob/6be453fe0cc0e2c1b89ff06e5af1409165fc1411/capitains_nautilus/cmd.py#L11-L49 | train | 53,634 |
BlueBrain/hpcbench | hpcbench/report.py | render | def render(template=None, ostr=None, **kwargs):
"""Generate report from a campaign
:param template: Jinja template to use, ``DEFAULT_TEMPLATE`` is used
if not specified
:param ostr: output file or filename. Default is standard output
"""
jinja_environment.filters['texscape'] = tex_escape
template = template or DEFAULT_TEMPLATE
ostr = ostr or sys.stdout
jinja_template = jinja_environment.get_template(template)
jinja_template.stream(**kwargs).dump(ostr) | python | def render(template=None, ostr=None, **kwargs):
"""Generate report from a campaign
:param template: Jinja template to use, ``DEFAULT_TEMPLATE`` is used
if not specified
:param ostr: output file or filename. Default is standard output
"""
jinja_environment.filters['texscape'] = tex_escape
template = template or DEFAULT_TEMPLATE
ostr = ostr or sys.stdout
jinja_template = jinja_environment.get_template(template)
jinja_template.stream(**kwargs).dump(ostr) | [
"def",
"render",
"(",
"template",
"=",
"None",
",",
"ostr",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"jinja_environment",
".",
"filters",
"[",
"'texscape'",
"]",
"=",
"tex_escape",
"template",
"=",
"template",
"or",
"DEFAULT_TEMPLATE",
"ostr",
"=",
... | Generate report from a campaign
:param template: Jinja template to use, ``DEFAULT_TEMPLATE`` is used
if not specified
:param ostr: output file or filename. Default is standard output | [
"Generate",
"report",
"from",
"a",
"campaign"
] | 192d0ec142b897157ec25f131d1ef28f84752592 | https://github.com/BlueBrain/hpcbench/blob/192d0ec142b897157ec25f131d1ef28f84752592/hpcbench/report.py#L41-L52 | train | 53,635 |
eng-tools/sfsimodels | sfsimodels/files.py | load_json | def load_json(ffp, custom=None, verbose=0):
"""
Given a json file it creates a dictionary of sfsi objects
:param ffp: str, Full file path to json file
:param custom: dict, used to load custom objects, {model type: custom object}
:param verbose: int, console output
:return: dict
"""
data = json.load(open(ffp))
return ecp_dict_to_objects(data, custom, verbose=verbose) | python | def load_json(ffp, custom=None, verbose=0):
"""
Given a json file it creates a dictionary of sfsi objects
:param ffp: str, Full file path to json file
:param custom: dict, used to load custom objects, {model type: custom object}
:param verbose: int, console output
:return: dict
"""
data = json.load(open(ffp))
return ecp_dict_to_objects(data, custom, verbose=verbose) | [
"def",
"load_json",
"(",
"ffp",
",",
"custom",
"=",
"None",
",",
"verbose",
"=",
"0",
")",
":",
"data",
"=",
"json",
".",
"load",
"(",
"open",
"(",
"ffp",
")",
")",
"return",
"ecp_dict_to_objects",
"(",
"data",
",",
"custom",
",",
"verbose",
"=",
"... | Given a json file it creates a dictionary of sfsi objects
:param ffp: str, Full file path to json file
:param custom: dict, used to load custom objects, {model type: custom object}
:param verbose: int, console output
:return: dict | [
"Given",
"a",
"json",
"file",
"it",
"creates",
"a",
"dictionary",
"of",
"sfsi",
"objects"
] | 65a690ca440d61307f5a9b8478e4704f203a5925 | https://github.com/eng-tools/sfsimodels/blob/65a690ca440d61307f5a9b8478e4704f203a5925/sfsimodels/files.py#L11-L21 | train | 53,636 |
eng-tools/sfsimodels | sfsimodels/files.py | loads_json | def loads_json(p_str, custom=None, meta=False, verbose=0):
"""
Given a json string it creates a dictionary of sfsi objects
:param ffp: str, Full file path to json file
:param custom: dict, used to load custom objects, {model type: custom object}
:param meta: bool, if true then also return all ecp meta data in separate dict
:param verbose: int, console output
:return: dict
"""
data = json.loads(p_str)
if meta:
md = {}
for item in data:
if item != "models":
md[item] = data[item]
return ecp_dict_to_objects(data, custom, verbose=verbose), md
else:
return ecp_dict_to_objects(data, custom, verbose=verbose) | python | def loads_json(p_str, custom=None, meta=False, verbose=0):
"""
Given a json string it creates a dictionary of sfsi objects
:param ffp: str, Full file path to json file
:param custom: dict, used to load custom objects, {model type: custom object}
:param meta: bool, if true then also return all ecp meta data in separate dict
:param verbose: int, console output
:return: dict
"""
data = json.loads(p_str)
if meta:
md = {}
for item in data:
if item != "models":
md[item] = data[item]
return ecp_dict_to_objects(data, custom, verbose=verbose), md
else:
return ecp_dict_to_objects(data, custom, verbose=verbose) | [
"def",
"loads_json",
"(",
"p_str",
",",
"custom",
"=",
"None",
",",
"meta",
"=",
"False",
",",
"verbose",
"=",
"0",
")",
":",
"data",
"=",
"json",
".",
"loads",
"(",
"p_str",
")",
"if",
"meta",
":",
"md",
"=",
"{",
"}",
"for",
"item",
"in",
"da... | Given a json string it creates a dictionary of sfsi objects
:param ffp: str, Full file path to json file
:param custom: dict, used to load custom objects, {model type: custom object}
:param meta: bool, if true then also return all ecp meta data in separate dict
:param verbose: int, console output
:return: dict | [
"Given",
"a",
"json",
"string",
"it",
"creates",
"a",
"dictionary",
"of",
"sfsi",
"objects"
] | 65a690ca440d61307f5a9b8478e4704f203a5925 | https://github.com/eng-tools/sfsimodels/blob/65a690ca440d61307f5a9b8478e4704f203a5925/sfsimodels/files.py#L33-L51 | train | 53,637 |
eng-tools/sfsimodels | sfsimodels/files.py | migrate_ecp | def migrate_ecp(in_ffp, out_ffp):
"""Migrates and ECP file to the current version of sfsimodels"""
objs, meta_data = load_json_and_meta(in_ffp)
ecp_output = Output()
for m_type in objs:
for instance in objs[m_type]:
ecp_output.add_to_dict(objs[m_type][instance])
ecp_output.name = meta_data["name"]
ecp_output.units = meta_data["units"]
ecp_output.comments = meta_data["comments"]
ecp_output.sfsimodels_version = meta_data["sfsimodels_version"]
p_str = json.dumps(ecp_output.to_dict(), skipkeys=["__repr__"], indent=4)
a = open(out_ffp, "w")
a.write(p_str)
a.close() | python | def migrate_ecp(in_ffp, out_ffp):
"""Migrates and ECP file to the current version of sfsimodels"""
objs, meta_data = load_json_and_meta(in_ffp)
ecp_output = Output()
for m_type in objs:
for instance in objs[m_type]:
ecp_output.add_to_dict(objs[m_type][instance])
ecp_output.name = meta_data["name"]
ecp_output.units = meta_data["units"]
ecp_output.comments = meta_data["comments"]
ecp_output.sfsimodels_version = meta_data["sfsimodels_version"]
p_str = json.dumps(ecp_output.to_dict(), skipkeys=["__repr__"], indent=4)
a = open(out_ffp, "w")
a.write(p_str)
a.close() | [
"def",
"migrate_ecp",
"(",
"in_ffp",
",",
"out_ffp",
")",
":",
"objs",
",",
"meta_data",
"=",
"load_json_and_meta",
"(",
"in_ffp",
")",
"ecp_output",
"=",
"Output",
"(",
")",
"for",
"m_type",
"in",
"objs",
":",
"for",
"instance",
"in",
"objs",
"[",
"m_ty... | Migrates and ECP file to the current version of sfsimodels | [
"Migrates",
"and",
"ECP",
"file",
"to",
"the",
"current",
"version",
"of",
"sfsimodels"
] | 65a690ca440d61307f5a9b8478e4704f203a5925 | https://github.com/eng-tools/sfsimodels/blob/65a690ca440d61307f5a9b8478e4704f203a5925/sfsimodels/files.py#L287-L301 | train | 53,638 |
eng-tools/sfsimodels | sfsimodels/files.py | Output.add_to_dict | def add_to_dict(self, an_object, extras=None):
"""
Convert models to json serialisable output
:param an_object: An instance of a model object
:param extras: A dictionary of extra variables that should be
:return:
"""
if an_object.id is None:
raise ModelError("id must be set on object before adding to output.")
if hasattr(an_object, "base_type"):
mtype = an_object.base_type
elif hasattr(an_object, "type"):
if an_object.type in standard_types:
mtype = an_object.type
else:
mtype = "custom_type"
else:
raise ModelError("Object does not have attribute 'base_type' or 'type', cannot add to output.")
if mtype not in self.unordered_models: # Catch any custom objects
self.unordered_models[mtype] = OrderedDict()
if hasattr(an_object, "add_to_dict"):
an_object.add_to_dict(self.unordered_models)
elif hasattr(an_object, "to_dict"):
self.unordered_models[mtype][an_object.unique_hash] = an_object.to_dict(compression=self.compression)
else:
raise ModelError("Object does not have method 'to_dict', cannot add to output.") | python | def add_to_dict(self, an_object, extras=None):
"""
Convert models to json serialisable output
:param an_object: An instance of a model object
:param extras: A dictionary of extra variables that should be
:return:
"""
if an_object.id is None:
raise ModelError("id must be set on object before adding to output.")
if hasattr(an_object, "base_type"):
mtype = an_object.base_type
elif hasattr(an_object, "type"):
if an_object.type in standard_types:
mtype = an_object.type
else:
mtype = "custom_type"
else:
raise ModelError("Object does not have attribute 'base_type' or 'type', cannot add to output.")
if mtype not in self.unordered_models: # Catch any custom objects
self.unordered_models[mtype] = OrderedDict()
if hasattr(an_object, "add_to_dict"):
an_object.add_to_dict(self.unordered_models)
elif hasattr(an_object, "to_dict"):
self.unordered_models[mtype][an_object.unique_hash] = an_object.to_dict(compression=self.compression)
else:
raise ModelError("Object does not have method 'to_dict', cannot add to output.") | [
"def",
"add_to_dict",
"(",
"self",
",",
"an_object",
",",
"extras",
"=",
"None",
")",
":",
"if",
"an_object",
".",
"id",
"is",
"None",
":",
"raise",
"ModelError",
"(",
"\"id must be set on object before adding to output.\"",
")",
"if",
"hasattr",
"(",
"an_object... | Convert models to json serialisable output
:param an_object: An instance of a model object
:param extras: A dictionary of extra variables that should be
:return: | [
"Convert",
"models",
"to",
"json",
"serialisable",
"output"
] | 65a690ca440d61307f5a9b8478e4704f203a5925 | https://github.com/eng-tools/sfsimodels/blob/65a690ca440d61307f5a9b8478e4704f203a5925/sfsimodels/files.py#L216-L244 | train | 53,639 |
eng-tools/sfsimodels | sfsimodels/files.py | Output.add_to_output | def add_to_output(self, mtype, m_id, serialisable_dict):
"""
Can add additional objects or dictionaries to output file that don't conform to standard objects.
:param mtype:
:param m_id:
:param serialisable_dict:
:return:
"""
if mtype not in self.unordered_models:
self.unordered_models[mtype] = OrderedDict()
self.unordered_models[mtype][m_id] = serialisable_dict | python | def add_to_output(self, mtype, m_id, serialisable_dict):
"""
Can add additional objects or dictionaries to output file that don't conform to standard objects.
:param mtype:
:param m_id:
:param serialisable_dict:
:return:
"""
if mtype not in self.unordered_models:
self.unordered_models[mtype] = OrderedDict()
self.unordered_models[mtype][m_id] = serialisable_dict | [
"def",
"add_to_output",
"(",
"self",
",",
"mtype",
",",
"m_id",
",",
"serialisable_dict",
")",
":",
"if",
"mtype",
"not",
"in",
"self",
".",
"unordered_models",
":",
"self",
".",
"unordered_models",
"[",
"mtype",
"]",
"=",
"OrderedDict",
"(",
")",
"self",
... | Can add additional objects or dictionaries to output file that don't conform to standard objects.
:param mtype:
:param m_id:
:param serialisable_dict:
:return: | [
"Can",
"add",
"additional",
"objects",
"or",
"dictionaries",
"to",
"output",
"file",
"that",
"don",
"t",
"conform",
"to",
"standard",
"objects",
"."
] | 65a690ca440d61307f5a9b8478e4704f203a5925 | https://github.com/eng-tools/sfsimodels/blob/65a690ca440d61307f5a9b8478e4704f203a5925/sfsimodels/files.py#L246-L257 | train | 53,640 |
PolyJIT/benchbuild | benchbuild/utils/settings.py | available_cpu_count | def available_cpu_count() -> int:
"""
Get the number of available CPUs.
Number of available virtual or physical CPUs on this system, i.e.
user/real as output by time(1) when called with an optimally scaling
userspace-only program.
Returns:
Number of avaialable CPUs.
"""
# cpuset
# cpuset may restrict the number of *available* processors
try:
match = re.search(r'(?m)^Cpus_allowed:\s*(.*)$',
open('/proc/self/status').read())
if match:
res = bin(int(match.group(1).replace(',', ''), 16)).count('1')
if res > 0:
return res
except IOError:
LOG.debug("Could not get the number of allowed CPUs")
# http://code.google.com/p/psutil/
try:
import psutil
return psutil.cpu_count() # psutil.NUM_CPUS on old versions
except (ImportError, AttributeError):
LOG.debug("Could not get the number of allowed CPUs")
# POSIX
try:
res = int(os.sysconf('SC_NPROCESSORS_ONLN'))
if res > 0:
return res
except (AttributeError, ValueError):
LOG.debug("Could not get the number of allowed CPUs")
# Linux
try:
res = open('/proc/cpuinfo').read().count('processor\t:')
if res > 0:
return res
except IOError:
LOG.debug("Could not get the number of allowed CPUs")
raise Exception('Can not determine number of CPUs on this system') | python | def available_cpu_count() -> int:
"""
Get the number of available CPUs.
Number of available virtual or physical CPUs on this system, i.e.
user/real as output by time(1) when called with an optimally scaling
userspace-only program.
Returns:
Number of avaialable CPUs.
"""
# cpuset
# cpuset may restrict the number of *available* processors
try:
match = re.search(r'(?m)^Cpus_allowed:\s*(.*)$',
open('/proc/self/status').read())
if match:
res = bin(int(match.group(1).replace(',', ''), 16)).count('1')
if res > 0:
return res
except IOError:
LOG.debug("Could not get the number of allowed CPUs")
# http://code.google.com/p/psutil/
try:
import psutil
return psutil.cpu_count() # psutil.NUM_CPUS on old versions
except (ImportError, AttributeError):
LOG.debug("Could not get the number of allowed CPUs")
# POSIX
try:
res = int(os.sysconf('SC_NPROCESSORS_ONLN'))
if res > 0:
return res
except (AttributeError, ValueError):
LOG.debug("Could not get the number of allowed CPUs")
# Linux
try:
res = open('/proc/cpuinfo').read().count('processor\t:')
if res > 0:
return res
except IOError:
LOG.debug("Could not get the number of allowed CPUs")
raise Exception('Can not determine number of CPUs on this system') | [
"def",
"available_cpu_count",
"(",
")",
"->",
"int",
":",
"# cpuset",
"# cpuset may restrict the number of *available* processors",
"try",
":",
"match",
"=",
"re",
".",
"search",
"(",
"r'(?m)^Cpus_allowed:\\s*(.*)$'",
",",
"open",
"(",
"'/proc/self/status'",
")",
".",
... | Get the number of available CPUs.
Number of available virtual or physical CPUs on this system, i.e.
user/real as output by time(1) when called with an optimally scaling
userspace-only program.
Returns:
Number of avaialable CPUs. | [
"Get",
"the",
"number",
"of",
"available",
"CPUs",
"."
] | 9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58 | https://github.com/PolyJIT/benchbuild/blob/9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58/benchbuild/utils/settings.py#L36-L85 | train | 53,641 |
PolyJIT/benchbuild | benchbuild/utils/settings.py | escape_yaml | def escape_yaml(raw_str: str) -> str:
"""
Shell-Escape a yaml input string.
Args:
raw_str: The unescaped string.
"""
escape_list = [char for char in raw_str if char in ['!', '{', '[']]
if len(escape_list) == 0:
return raw_str
str_quotes = '"'
i_str_quotes = "'"
if str_quotes in raw_str and str_quotes not in raw_str[1:-1]:
return raw_str
if str_quotes in raw_str[1:-1]:
raw_str = i_str_quotes + raw_str + i_str_quotes
else:
raw_str = str_quotes + raw_str + str_quotes
return raw_str | python | def escape_yaml(raw_str: str) -> str:
"""
Shell-Escape a yaml input string.
Args:
raw_str: The unescaped string.
"""
escape_list = [char for char in raw_str if char in ['!', '{', '[']]
if len(escape_list) == 0:
return raw_str
str_quotes = '"'
i_str_quotes = "'"
if str_quotes in raw_str and str_quotes not in raw_str[1:-1]:
return raw_str
if str_quotes in raw_str[1:-1]:
raw_str = i_str_quotes + raw_str + i_str_quotes
else:
raw_str = str_quotes + raw_str + str_quotes
return raw_str | [
"def",
"escape_yaml",
"(",
"raw_str",
":",
"str",
")",
"->",
"str",
":",
"escape_list",
"=",
"[",
"char",
"for",
"char",
"in",
"raw_str",
"if",
"char",
"in",
"[",
"'!'",
",",
"'{'",
",",
"'['",
"]",
"]",
"if",
"len",
"(",
"escape_list",
")",
"==",
... | Shell-Escape a yaml input string.
Args:
raw_str: The unescaped string. | [
"Shell",
"-",
"Escape",
"a",
"yaml",
"input",
"string",
"."
] | 9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58 | https://github.com/PolyJIT/benchbuild/blob/9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58/benchbuild/utils/settings.py#L92-L112 | train | 53,642 |
PolyJIT/benchbuild | benchbuild/utils/settings.py | to_yaml | def to_yaml(value) -> str:
"""Convert a given value to a YAML string."""
stream = yaml.io.StringIO()
dumper = ConfigDumper(stream, default_flow_style=True, width=sys.maxsize)
val = None
try:
dumper.open()
dumper.represent(value)
val = stream.getvalue().strip()
dumper.close()
finally:
dumper.dispose()
return val | python | def to_yaml(value) -> str:
"""Convert a given value to a YAML string."""
stream = yaml.io.StringIO()
dumper = ConfigDumper(stream, default_flow_style=True, width=sys.maxsize)
val = None
try:
dumper.open()
dumper.represent(value)
val = stream.getvalue().strip()
dumper.close()
finally:
dumper.dispose()
return val | [
"def",
"to_yaml",
"(",
"value",
")",
"->",
"str",
":",
"stream",
"=",
"yaml",
".",
"io",
".",
"StringIO",
"(",
")",
"dumper",
"=",
"ConfigDumper",
"(",
"stream",
",",
"default_flow_style",
"=",
"True",
",",
"width",
"=",
"sys",
".",
"maxsize",
")",
"... | Convert a given value to a YAML string. | [
"Convert",
"a",
"given",
"value",
"to",
"a",
"YAML",
"string",
"."
] | 9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58 | https://github.com/PolyJIT/benchbuild/blob/9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58/benchbuild/utils/settings.py#L130-L143 | train | 53,643 |
PolyJIT/benchbuild | benchbuild/utils/settings.py | to_env_var | def to_env_var(env_var: str, value) -> str:
"""
Create an environment variable from a name and a value.
This generates a shell-compatible representation of an
environment variable that is assigned a YAML representation of
a value.
Args:
env_var (str): Name of the environment variable.
value (Any): A value we convert from.
"""
val = to_yaml(value)
ret_val = "%s=%s" % (env_var, escape_yaml(val))
return ret_val | python | def to_env_var(env_var: str, value) -> str:
"""
Create an environment variable from a name and a value.
This generates a shell-compatible representation of an
environment variable that is assigned a YAML representation of
a value.
Args:
env_var (str): Name of the environment variable.
value (Any): A value we convert from.
"""
val = to_yaml(value)
ret_val = "%s=%s" % (env_var, escape_yaml(val))
return ret_val | [
"def",
"to_env_var",
"(",
"env_var",
":",
"str",
",",
"value",
")",
"->",
"str",
":",
"val",
"=",
"to_yaml",
"(",
"value",
")",
"ret_val",
"=",
"\"%s=%s\"",
"%",
"(",
"env_var",
",",
"escape_yaml",
"(",
"val",
")",
")",
"return",
"ret_val"
] | Create an environment variable from a name and a value.
This generates a shell-compatible representation of an
environment variable that is assigned a YAML representation of
a value.
Args:
env_var (str): Name of the environment variable.
value (Any): A value we convert from. | [
"Create",
"an",
"environment",
"variable",
"from",
"a",
"name",
"and",
"a",
"value",
"."
] | 9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58 | https://github.com/PolyJIT/benchbuild/blob/9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58/benchbuild/utils/settings.py#L146-L160 | train | 53,644 |
PolyJIT/benchbuild | benchbuild/utils/settings.py | find_config | def find_config(test_file=None, defaults=None, root=os.curdir):
"""
Find the path to the default config file.
We look at :root: for the :default: config file. If we can't find it
there we start looking at the parent directory recursively until we
find a file named :default: and return the absolute path to it.
If we can't find anything, we return None.
Args:
default: The name of the config file we look for.
root: The directory to start looking for.
Returns:
Path to the default config file, None if we can't find anything.
"""
if defaults is None:
defaults = [".benchbuild.yml", ".benchbuild.yaml"]
def walk_rec(cur_path, root):
cur_path = local.path(root) / test_file
if cur_path.exists():
return cur_path
new_root = local.path(root) / os.pardir
return walk_rec(cur_path, new_root) if new_root != root else None
if test_file is not None:
return walk_rec(test_file, root)
for test_file in defaults:
ret = walk_rec(test_file, root)
if ret is not None:
return ret | python | def find_config(test_file=None, defaults=None, root=os.curdir):
"""
Find the path to the default config file.
We look at :root: for the :default: config file. If we can't find it
there we start looking at the parent directory recursively until we
find a file named :default: and return the absolute path to it.
If we can't find anything, we return None.
Args:
default: The name of the config file we look for.
root: The directory to start looking for.
Returns:
Path to the default config file, None if we can't find anything.
"""
if defaults is None:
defaults = [".benchbuild.yml", ".benchbuild.yaml"]
def walk_rec(cur_path, root):
cur_path = local.path(root) / test_file
if cur_path.exists():
return cur_path
new_root = local.path(root) / os.pardir
return walk_rec(cur_path, new_root) if new_root != root else None
if test_file is not None:
return walk_rec(test_file, root)
for test_file in defaults:
ret = walk_rec(test_file, root)
if ret is not None:
return ret | [
"def",
"find_config",
"(",
"test_file",
"=",
"None",
",",
"defaults",
"=",
"None",
",",
"root",
"=",
"os",
".",
"curdir",
")",
":",
"if",
"defaults",
"is",
"None",
":",
"defaults",
"=",
"[",
"\".benchbuild.yml\"",
",",
"\".benchbuild.yaml\"",
"]",
"def",
... | Find the path to the default config file.
We look at :root: for the :default: config file. If we can't find it
there we start looking at the parent directory recursively until we
find a file named :default: and return the absolute path to it.
If we can't find anything, we return None.
Args:
default: The name of the config file we look for.
root: The directory to start looking for.
Returns:
Path to the default config file, None if we can't find anything. | [
"Find",
"the",
"path",
"to",
"the",
"default",
"config",
"file",
"."
] | 9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58 | https://github.com/PolyJIT/benchbuild/blob/9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58/benchbuild/utils/settings.py#L552-L585 | train | 53,645 |
PolyJIT/benchbuild | benchbuild/utils/settings.py | setup_config | def setup_config(cfg, config_filenames=None, env_var_name=None):
"""
This will initialize the given configuration object.
The following resources are available in the same order:
1) Default settings.
2) Config file.
3) Environment variables.
WARNING: Environment variables do _not_ take precedence over the config
file right now. (init_from_env will refuse to update the
value, if there is already one.)
Args:
config_filenames: list of possible config filenames
env_var_name: name of the environment variable holding the config path
"""
if env_var_name is None:
env_var_name = "BB_CONFIG_FILE"
config_path = os.getenv(env_var_name, None)
if not config_path:
config_path = find_config(defaults=config_filenames)
if config_path:
cfg.load(config_path)
cfg["config_file"] = os.path.abspath(config_path)
cfg.init_from_env() | python | def setup_config(cfg, config_filenames=None, env_var_name=None):
"""
This will initialize the given configuration object.
The following resources are available in the same order:
1) Default settings.
2) Config file.
3) Environment variables.
WARNING: Environment variables do _not_ take precedence over the config
file right now. (init_from_env will refuse to update the
value, if there is already one.)
Args:
config_filenames: list of possible config filenames
env_var_name: name of the environment variable holding the config path
"""
if env_var_name is None:
env_var_name = "BB_CONFIG_FILE"
config_path = os.getenv(env_var_name, None)
if not config_path:
config_path = find_config(defaults=config_filenames)
if config_path:
cfg.load(config_path)
cfg["config_file"] = os.path.abspath(config_path)
cfg.init_from_env() | [
"def",
"setup_config",
"(",
"cfg",
",",
"config_filenames",
"=",
"None",
",",
"env_var_name",
"=",
"None",
")",
":",
"if",
"env_var_name",
"is",
"None",
":",
"env_var_name",
"=",
"\"BB_CONFIG_FILE\"",
"config_path",
"=",
"os",
".",
"getenv",
"(",
"env_var_name... | This will initialize the given configuration object.
The following resources are available in the same order:
1) Default settings.
2) Config file.
3) Environment variables.
WARNING: Environment variables do _not_ take precedence over the config
file right now. (init_from_env will refuse to update the
value, if there is already one.)
Args:
config_filenames: list of possible config filenames
env_var_name: name of the environment variable holding the config path | [
"This",
"will",
"initialize",
"the",
"given",
"configuration",
"object",
"."
] | 9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58 | https://github.com/PolyJIT/benchbuild/blob/9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58/benchbuild/utils/settings.py#L588-L615 | train | 53,646 |
PolyJIT/benchbuild | benchbuild/utils/settings.py | upgrade | def upgrade(cfg):
"""Provide forward migration for configuration files."""
db_node = cfg["db"]
old_db_elems = ["host", "name", "port", "pass", "user", "dialect"]
has_old_db_elems = [x in db_node for x in old_db_elems]
if any(has_old_db_elems):
print("Old database configuration found. "
"Converting to new connect_string. "
"This will *not* be stored in the configuration automatically.")
cfg["db"]["connect_string"] = \
"{dialect}://{user}:{password}@{host}:{port}/{name}".format(
dialect=cfg["db"]["dialect"]["value"],
user=cfg["db"]["user"]["value"],
password=cfg["db"]["pass"]["value"],
host=cfg["db"]["host"]["value"],
port=cfg["db"]["port"]["value"],
name=cfg["db"]["name"]["value"]) | python | def upgrade(cfg):
"""Provide forward migration for configuration files."""
db_node = cfg["db"]
old_db_elems = ["host", "name", "port", "pass", "user", "dialect"]
has_old_db_elems = [x in db_node for x in old_db_elems]
if any(has_old_db_elems):
print("Old database configuration found. "
"Converting to new connect_string. "
"This will *not* be stored in the configuration automatically.")
cfg["db"]["connect_string"] = \
"{dialect}://{user}:{password}@{host}:{port}/{name}".format(
dialect=cfg["db"]["dialect"]["value"],
user=cfg["db"]["user"]["value"],
password=cfg["db"]["pass"]["value"],
host=cfg["db"]["host"]["value"],
port=cfg["db"]["port"]["value"],
name=cfg["db"]["name"]["value"]) | [
"def",
"upgrade",
"(",
"cfg",
")",
":",
"db_node",
"=",
"cfg",
"[",
"\"db\"",
"]",
"old_db_elems",
"=",
"[",
"\"host\"",
",",
"\"name\"",
",",
"\"port\"",
",",
"\"pass\"",
",",
"\"user\"",
",",
"\"dialect\"",
"]",
"has_old_db_elems",
"=",
"[",
"x",
"in",... | Provide forward migration for configuration files. | [
"Provide",
"forward",
"migration",
"for",
"configuration",
"files",
"."
] | 9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58 | https://github.com/PolyJIT/benchbuild/blob/9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58/benchbuild/utils/settings.py#L640-L657 | train | 53,647 |
PolyJIT/benchbuild | benchbuild/utils/settings.py | uuid_constructor | def uuid_constructor(loader, node):
""""
Construct a uuid.UUID object form a scalar YAML node.
Tests:
>>> yaml.add_constructor("!uuid", uuid_constructor, Loader=yaml.SafeLoader)
>>> yaml.safe_load("{'test': !uuid 'cc3702ca-699a-4aa6-8226-4c938f294d9b'}")
{'test': UUID('cc3702ca-699a-4aa6-8226-4c938f294d9b')}
"""
value = loader.construct_scalar(node)
return uuid.UUID(value) | python | def uuid_constructor(loader, node):
""""
Construct a uuid.UUID object form a scalar YAML node.
Tests:
>>> yaml.add_constructor("!uuid", uuid_constructor, Loader=yaml.SafeLoader)
>>> yaml.safe_load("{'test': !uuid 'cc3702ca-699a-4aa6-8226-4c938f294d9b'}")
{'test': UUID('cc3702ca-699a-4aa6-8226-4c938f294d9b')}
"""
value = loader.construct_scalar(node)
return uuid.UUID(value) | [
"def",
"uuid_constructor",
"(",
"loader",
",",
"node",
")",
":",
"value",
"=",
"loader",
".",
"construct_scalar",
"(",
"node",
")",
"return",
"uuid",
".",
"UUID",
"(",
"value",
")"
] | Construct a uuid.UUID object form a scalar YAML node.
Tests:
>>> yaml.add_constructor("!uuid", uuid_constructor, Loader=yaml.SafeLoader)
>>> yaml.safe_load("{'test': !uuid 'cc3702ca-699a-4aa6-8226-4c938f294d9b'}")
{'test': UUID('cc3702ca-699a-4aa6-8226-4c938f294d9b')} | [
"Construct",
"a",
"uuid",
".",
"UUID",
"object",
"form",
"a",
"scalar",
"YAML",
"node",
"."
] | 9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58 | https://github.com/PolyJIT/benchbuild/blob/9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58/benchbuild/utils/settings.py#L672-L683 | train | 53,648 |
PolyJIT/benchbuild | benchbuild/utils/settings.py | uuid_add_implicit_resolver | def uuid_add_implicit_resolver(Loader=ConfigLoader, Dumper=ConfigDumper):
"""
Attach an implicit pattern resolver for UUID objects.
Tests:
>>> class TestDumper(yaml.SafeDumper): pass
>>> class TestLoader(yaml.SafeLoader): pass
>>> TUUID = 'cc3702ca-699a-4aa6-8226-4c938f294d9b'
>>> IN = {'test': uuid.UUID(TUUID)}
>>> OUT = '{test: cc3702ca-699a-4aa6-8226-4c938f294d9b}'
>>> yaml.add_representer(uuid.UUID, uuid_representer, Dumper=TestDumper)
>>> yaml.add_constructor('!uuid', uuid_constructor, Loader=TestLoader)
>>> uuid_add_implicit_resolver(Loader=TestLoader, Dumper=TestDumper)
>>> yaml.dump(IN, Dumper=TestDumper)
'test: cc3702ca-699a-4aa6-8226-4c938f294d9b\\n'
>>> yaml.load(OUT, Loader=TestLoader)
{'test': UUID('cc3702ca-699a-4aa6-8226-4c938f294d9b')}
"""
uuid_regex = r'^\b[a-f0-9]{8}-\b[a-f0-9]{4}-\b[a-f0-9]{4}-\b[a-f0-9]{4}-\b[a-f0-9]{12}$'
pattern = re.compile(uuid_regex)
yaml.add_implicit_resolver('!uuid', pattern, Loader=Loader, Dumper=Dumper) | python | def uuid_add_implicit_resolver(Loader=ConfigLoader, Dumper=ConfigDumper):
"""
Attach an implicit pattern resolver for UUID objects.
Tests:
>>> class TestDumper(yaml.SafeDumper): pass
>>> class TestLoader(yaml.SafeLoader): pass
>>> TUUID = 'cc3702ca-699a-4aa6-8226-4c938f294d9b'
>>> IN = {'test': uuid.UUID(TUUID)}
>>> OUT = '{test: cc3702ca-699a-4aa6-8226-4c938f294d9b}'
>>> yaml.add_representer(uuid.UUID, uuid_representer, Dumper=TestDumper)
>>> yaml.add_constructor('!uuid', uuid_constructor, Loader=TestLoader)
>>> uuid_add_implicit_resolver(Loader=TestLoader, Dumper=TestDumper)
>>> yaml.dump(IN, Dumper=TestDumper)
'test: cc3702ca-699a-4aa6-8226-4c938f294d9b\\n'
>>> yaml.load(OUT, Loader=TestLoader)
{'test': UUID('cc3702ca-699a-4aa6-8226-4c938f294d9b')}
"""
uuid_regex = r'^\b[a-f0-9]{8}-\b[a-f0-9]{4}-\b[a-f0-9]{4}-\b[a-f0-9]{4}-\b[a-f0-9]{12}$'
pattern = re.compile(uuid_regex)
yaml.add_implicit_resolver('!uuid', pattern, Loader=Loader, Dumper=Dumper) | [
"def",
"uuid_add_implicit_resolver",
"(",
"Loader",
"=",
"ConfigLoader",
",",
"Dumper",
"=",
"ConfigDumper",
")",
":",
"uuid_regex",
"=",
"r'^\\b[a-f0-9]{8}-\\b[a-f0-9]{4}-\\b[a-f0-9]{4}-\\b[a-f0-9]{4}-\\b[a-f0-9]{12}$'",
"pattern",
"=",
"re",
".",
"compile",
"(",
"uuid_reg... | Attach an implicit pattern resolver for UUID objects.
Tests:
>>> class TestDumper(yaml.SafeDumper): pass
>>> class TestLoader(yaml.SafeLoader): pass
>>> TUUID = 'cc3702ca-699a-4aa6-8226-4c938f294d9b'
>>> IN = {'test': uuid.UUID(TUUID)}
>>> OUT = '{test: cc3702ca-699a-4aa6-8226-4c938f294d9b}'
>>> yaml.add_representer(uuid.UUID, uuid_representer, Dumper=TestDumper)
>>> yaml.add_constructor('!uuid', uuid_constructor, Loader=TestLoader)
>>> uuid_add_implicit_resolver(Loader=TestLoader, Dumper=TestDumper)
>>> yaml.dump(IN, Dumper=TestDumper)
'test: cc3702ca-699a-4aa6-8226-4c938f294d9b\\n'
>>> yaml.load(OUT, Loader=TestLoader)
{'test': UUID('cc3702ca-699a-4aa6-8226-4c938f294d9b')} | [
"Attach",
"an",
"implicit",
"pattern",
"resolver",
"for",
"UUID",
"objects",
"."
] | 9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58 | https://github.com/PolyJIT/benchbuild/blob/9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58/benchbuild/utils/settings.py#L686-L708 | train | 53,649 |
PolyJIT/benchbuild | benchbuild/utils/settings.py | Configuration.store | def store(self, config_file):
""" Store the configuration dictionary to a file."""
selfcopy = copy.deepcopy(self)
selfcopy.filter_exports()
with open(config_file, 'w') as outf:
yaml.dump(
selfcopy.node,
outf,
width=80,
indent=4,
default_flow_style=False,
Dumper=ConfigDumper) | python | def store(self, config_file):
""" Store the configuration dictionary to a file."""
selfcopy = copy.deepcopy(self)
selfcopy.filter_exports()
with open(config_file, 'w') as outf:
yaml.dump(
selfcopy.node,
outf,
width=80,
indent=4,
default_flow_style=False,
Dumper=ConfigDumper) | [
"def",
"store",
"(",
"self",
",",
"config_file",
")",
":",
"selfcopy",
"=",
"copy",
".",
"deepcopy",
"(",
"self",
")",
"selfcopy",
".",
"filter_exports",
"(",
")",
"with",
"open",
"(",
"config_file",
",",
"'w'",
")",
"as",
"outf",
":",
"yaml",
".",
"... | Store the configuration dictionary to a file. | [
"Store",
"the",
"configuration",
"dictionary",
"to",
"a",
"file",
"."
] | 9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58 | https://github.com/PolyJIT/benchbuild/blob/9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58/benchbuild/utils/settings.py#L216-L229 | train | 53,650 |
PolyJIT/benchbuild | benchbuild/utils/settings.py | Configuration.load | def load(self, _from):
"""Load the configuration dictionary from file."""
def load_rec(inode, config):
"""Recursive part of loading."""
for k in config:
if isinstance(config[k], dict) and \
k not in ['value', 'default']:
if k in inode:
load_rec(inode[k], config[k])
else:
LOG.debug("+ config element: '%s'", k)
else:
inode[k] = config[k]
with open(_from, 'r') as infile:
obj = yaml.load(infile, Loader=ConfigLoader)
upgrade(obj)
load_rec(self.node, obj)
self['config_file'] = os.path.abspath(_from) | python | def load(self, _from):
"""Load the configuration dictionary from file."""
def load_rec(inode, config):
"""Recursive part of loading."""
for k in config:
if isinstance(config[k], dict) and \
k not in ['value', 'default']:
if k in inode:
load_rec(inode[k], config[k])
else:
LOG.debug("+ config element: '%s'", k)
else:
inode[k] = config[k]
with open(_from, 'r') as infile:
obj = yaml.load(infile, Loader=ConfigLoader)
upgrade(obj)
load_rec(self.node, obj)
self['config_file'] = os.path.abspath(_from) | [
"def",
"load",
"(",
"self",
",",
"_from",
")",
":",
"def",
"load_rec",
"(",
"inode",
",",
"config",
")",
":",
"\"\"\"Recursive part of loading.\"\"\"",
"for",
"k",
"in",
"config",
":",
"if",
"isinstance",
"(",
"config",
"[",
"k",
"]",
",",
"dict",
")",
... | Load the configuration dictionary from file. | [
"Load",
"the",
"configuration",
"dictionary",
"from",
"file",
"."
] | 9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58 | https://github.com/PolyJIT/benchbuild/blob/9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58/benchbuild/utils/settings.py#L231-L250 | train | 53,651 |
PolyJIT/benchbuild | benchbuild/utils/settings.py | Configuration.init_from_env | def init_from_env(self):
"""
Initialize this node from environment.
If we're a leaf node, i.e., a node containing a dictionary that
consist of a 'default' key, compute our env variable and initialize
our value from the environment.
Otherwise, init our children.
"""
if 'default' in self.node:
env_var = self.__to_env_var__().upper()
if self.has_value():
env_val = self.node['value']
else:
env_val = self.node['default']
env_val = os.getenv(env_var, to_yaml(env_val))
try:
self.node['value'] = yaml.load(
str(env_val), Loader=ConfigLoader)
except ValueError:
self.node['value'] = env_val
else:
if isinstance(self.node, dict):
for k in self.node:
self[k].init_from_env() | python | def init_from_env(self):
"""
Initialize this node from environment.
If we're a leaf node, i.e., a node containing a dictionary that
consist of a 'default' key, compute our env variable and initialize
our value from the environment.
Otherwise, init our children.
"""
if 'default' in self.node:
env_var = self.__to_env_var__().upper()
if self.has_value():
env_val = self.node['value']
else:
env_val = self.node['default']
env_val = os.getenv(env_var, to_yaml(env_val))
try:
self.node['value'] = yaml.load(
str(env_val), Loader=ConfigLoader)
except ValueError:
self.node['value'] = env_val
else:
if isinstance(self.node, dict):
for k in self.node:
self[k].init_from_env() | [
"def",
"init_from_env",
"(",
"self",
")",
":",
"if",
"'default'",
"in",
"self",
".",
"node",
":",
"env_var",
"=",
"self",
".",
"__to_env_var__",
"(",
")",
".",
"upper",
"(",
")",
"if",
"self",
".",
"has_value",
"(",
")",
":",
"env_val",
"=",
"self",
... | Initialize this node from environment.
If we're a leaf node, i.e., a node containing a dictionary that
consist of a 'default' key, compute our env variable and initialize
our value from the environment.
Otherwise, init our children. | [
"Initialize",
"this",
"node",
"from",
"environment",
"."
] | 9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58 | https://github.com/PolyJIT/benchbuild/blob/9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58/benchbuild/utils/settings.py#L264-L289 | train | 53,652 |
PolyJIT/benchbuild | benchbuild/utils/settings.py | Configuration.value | def value(self):
"""
Return the node value, if we're a leaf node.
Examples:
>>> c = Configuration("test")
>>> c['x'] = { "y" : { "value" : None }, "z" : { "value" : 2 }}
>>> c['x']['y'].value == None
True
>>> c['x']['z'].value
2
>>> c['x'].value
TEST_X_Y=null
TEST_X_Z=2
"""
def validate(node_value):
if hasattr(node_value, 'validate'):
node_value.validate()
return node_value
if 'value' in self.node:
return validate(self.node['value'])
return self | python | def value(self):
"""
Return the node value, if we're a leaf node.
Examples:
>>> c = Configuration("test")
>>> c['x'] = { "y" : { "value" : None }, "z" : { "value" : 2 }}
>>> c['x']['y'].value == None
True
>>> c['x']['z'].value
2
>>> c['x'].value
TEST_X_Y=null
TEST_X_Z=2
"""
def validate(node_value):
if hasattr(node_value, 'validate'):
node_value.validate()
return node_value
if 'value' in self.node:
return validate(self.node['value'])
return self | [
"def",
"value",
"(",
"self",
")",
":",
"def",
"validate",
"(",
"node_value",
")",
":",
"if",
"hasattr",
"(",
"node_value",
",",
"'validate'",
")",
":",
"node_value",
".",
"validate",
"(",
")",
"return",
"node_value",
"if",
"'value'",
"in",
"self",
".",
... | Return the node value, if we're a leaf node.
Examples:
>>> c = Configuration("test")
>>> c['x'] = { "y" : { "value" : None }, "z" : { "value" : 2 }}
>>> c['x']['y'].value == None
True
>>> c['x']['z'].value
2
>>> c['x'].value
TEST_X_Y=null
TEST_X_Z=2 | [
"Return",
"the",
"node",
"value",
"if",
"we",
"re",
"a",
"leaf",
"node",
"."
] | 9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58 | https://github.com/PolyJIT/benchbuild/blob/9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58/benchbuild/utils/settings.py#L292-L316 | train | 53,653 |
PolyJIT/benchbuild | benchbuild/utils/settings.py | Configuration.to_env_dict | def to_env_dict(self):
"""Convert configuration object to a flat dictionary."""
entries = {}
if self.has_value():
return {self.__to_env_var__(): self.node['value']}
if self.has_default():
return {self.__to_env_var__(): self.node['default']}
for k in self.node:
entries.update(self[k].to_env_dict())
return entries | python | def to_env_dict(self):
"""Convert configuration object to a flat dictionary."""
entries = {}
if self.has_value():
return {self.__to_env_var__(): self.node['value']}
if self.has_default():
return {self.__to_env_var__(): self.node['default']}
for k in self.node:
entries.update(self[k].to_env_dict())
return entries | [
"def",
"to_env_dict",
"(",
"self",
")",
":",
"entries",
"=",
"{",
"}",
"if",
"self",
".",
"has_value",
"(",
")",
":",
"return",
"{",
"self",
".",
"__to_env_var__",
"(",
")",
":",
"self",
".",
"node",
"[",
"'value'",
"]",
"}",
"if",
"self",
".",
"... | Convert configuration object to a flat dictionary. | [
"Convert",
"configuration",
"object",
"to",
"a",
"flat",
"dictionary",
"."
] | 9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58 | https://github.com/PolyJIT/benchbuild/blob/9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58/benchbuild/utils/settings.py#L454-L465 | train | 53,654 |
portfoliome/foil | foil/records.py | rename_keys | def rename_keys(record: Mapping, key_map: Mapping) -> dict:
"""New record with same keys or renamed keys if key found in key_map."""
new_record = dict()
for k, v in record.items():
key = key_map[k] if k in key_map else k
new_record[key] = v
return new_record | python | def rename_keys(record: Mapping, key_map: Mapping) -> dict:
"""New record with same keys or renamed keys if key found in key_map."""
new_record = dict()
for k, v in record.items():
key = key_map[k] if k in key_map else k
new_record[key] = v
return new_record | [
"def",
"rename_keys",
"(",
"record",
":",
"Mapping",
",",
"key_map",
":",
"Mapping",
")",
"->",
"dict",
":",
"new_record",
"=",
"dict",
"(",
")",
"for",
"k",
",",
"v",
"in",
"record",
".",
"items",
"(",
")",
":",
"key",
"=",
"key_map",
"[",
"k",
... | New record with same keys or renamed keys if key found in key_map. | [
"New",
"record",
"with",
"same",
"keys",
"or",
"renamed",
"keys",
"if",
"key",
"found",
"in",
"key_map",
"."
] | b66d8cf4ab048a387d8c7a033b47e922ed6917d6 | https://github.com/portfoliome/foil/blob/b66d8cf4ab048a387d8c7a033b47e922ed6917d6/foil/records.py#L6-L15 | train | 53,655 |
portfoliome/foil | foil/records.py | replace_keys | def replace_keys(record: Mapping, key_map: Mapping) -> dict:
"""New record with renamed keys including keys only found in key_map."""
return {key_map[k]: v for k, v in record.items() if k in key_map} | python | def replace_keys(record: Mapping, key_map: Mapping) -> dict:
"""New record with renamed keys including keys only found in key_map."""
return {key_map[k]: v for k, v in record.items() if k in key_map} | [
"def",
"replace_keys",
"(",
"record",
":",
"Mapping",
",",
"key_map",
":",
"Mapping",
")",
"->",
"dict",
":",
"return",
"{",
"key_map",
"[",
"k",
"]",
":",
"v",
"for",
"k",
",",
"v",
"in",
"record",
".",
"items",
"(",
")",
"if",
"k",
"in",
"key_m... | New record with renamed keys including keys only found in key_map. | [
"New",
"record",
"with",
"renamed",
"keys",
"including",
"keys",
"only",
"found",
"in",
"key_map",
"."
] | b66d8cf4ab048a387d8c7a033b47e922ed6917d6 | https://github.com/portfoliome/foil/blob/b66d8cf4ab048a387d8c7a033b47e922ed6917d6/foil/records.py#L18-L21 | train | 53,656 |
portfoliome/foil | foil/records.py | inject_nulls | def inject_nulls(data: Mapping, field_names) -> dict:
"""Insert None as value for missing fields."""
record = dict()
for field in field_names:
record[field] = data.get(field, None)
return record | python | def inject_nulls(data: Mapping, field_names) -> dict:
"""Insert None as value for missing fields."""
record = dict()
for field in field_names:
record[field] = data.get(field, None)
return record | [
"def",
"inject_nulls",
"(",
"data",
":",
"Mapping",
",",
"field_names",
")",
"->",
"dict",
":",
"record",
"=",
"dict",
"(",
")",
"for",
"field",
"in",
"field_names",
":",
"record",
"[",
"field",
"]",
"=",
"data",
".",
"get",
"(",
"field",
",",
"None"... | Insert None as value for missing fields. | [
"Insert",
"None",
"as",
"value",
"for",
"missing",
"fields",
"."
] | b66d8cf4ab048a387d8c7a033b47e922ed6917d6 | https://github.com/portfoliome/foil/blob/b66d8cf4ab048a387d8c7a033b47e922ed6917d6/foil/records.py#L24-L32 | train | 53,657 |
PolyJIT/benchbuild | benchbuild/likwid.py | read_struct | def read_struct(fstream):
"""
Read a likwid struct from the text stream.
Args:
fstream: Likwid's filestream.
Returns (dict(str: str)):
A dict containing all likwid's struct info as key/value pairs.
"""
line = fstream.readline().strip()
fragments = line.split(",")
fragments = [x for x in fragments if x is not None]
partition = dict()
if not len(fragments) >= 3:
return None
partition["struct"] = fragments[0]
partition["info"] = fragments[1]
partition["num_lines"] = fragments[2]
struct = None
if partition is not None and partition["struct"] == "STRUCT":
num_lines = int(partition["num_lines"].strip())
struct = {}
for _ in range(num_lines):
cols = fetch_cols(fstream)
struct.update({cols[0]: cols[1:]})
return struct | python | def read_struct(fstream):
"""
Read a likwid struct from the text stream.
Args:
fstream: Likwid's filestream.
Returns (dict(str: str)):
A dict containing all likwid's struct info as key/value pairs.
"""
line = fstream.readline().strip()
fragments = line.split(",")
fragments = [x for x in fragments if x is not None]
partition = dict()
if not len(fragments) >= 3:
return None
partition["struct"] = fragments[0]
partition["info"] = fragments[1]
partition["num_lines"] = fragments[2]
struct = None
if partition is not None and partition["struct"] == "STRUCT":
num_lines = int(partition["num_lines"].strip())
struct = {}
for _ in range(num_lines):
cols = fetch_cols(fstream)
struct.update({cols[0]: cols[1:]})
return struct | [
"def",
"read_struct",
"(",
"fstream",
")",
":",
"line",
"=",
"fstream",
".",
"readline",
"(",
")",
".",
"strip",
"(",
")",
"fragments",
"=",
"line",
".",
"split",
"(",
"\",\"",
")",
"fragments",
"=",
"[",
"x",
"for",
"x",
"in",
"fragments",
"if",
"... | Read a likwid struct from the text stream.
Args:
fstream: Likwid's filestream.
Returns (dict(str: str)):
A dict containing all likwid's struct info as key/value pairs. | [
"Read",
"a",
"likwid",
"struct",
"from",
"the",
"text",
"stream",
"."
] | 9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58 | https://github.com/PolyJIT/benchbuild/blob/9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58/benchbuild/likwid.py#L23-L51 | train | 53,658 |
PolyJIT/benchbuild | benchbuild/likwid.py | read_table | def read_table(fstream):
"""
Read a likwid table info from the text stream.
Args:
fstream: Likwid's filestream.
Returns (dict(str: str)):
A dict containing likwid's table info as key/value pairs.
"""
pos = fstream.tell()
line = fstream.readline().strip()
fragments = line.split(",")
fragments = [x for x in fragments if x is not None]
partition = dict()
if not len(fragments) >= 4:
return None
partition["table"] = fragments[0]
partition["group"] = fragments[1]
partition["set"] = fragments[2]
partition["num_lines"] = fragments[3]
struct = None
if partition is not None and partition["table"] == "TABLE":
num_lines = int(partition["num_lines"].strip())
struct = {}
header = fetch_cols(fstream)
struct.update({header[0]: header[1:]})
for _ in range(num_lines):
cols = fetch_cols(fstream)
struct.update({cols[0]: cols[1:]})
else:
fstream.seek(pos)
return struct | python | def read_table(fstream):
"""
Read a likwid table info from the text stream.
Args:
fstream: Likwid's filestream.
Returns (dict(str: str)):
A dict containing likwid's table info as key/value pairs.
"""
pos = fstream.tell()
line = fstream.readline().strip()
fragments = line.split(",")
fragments = [x for x in fragments if x is not None]
partition = dict()
if not len(fragments) >= 4:
return None
partition["table"] = fragments[0]
partition["group"] = fragments[1]
partition["set"] = fragments[2]
partition["num_lines"] = fragments[3]
struct = None
if partition is not None and partition["table"] == "TABLE":
num_lines = int(partition["num_lines"].strip())
struct = {}
header = fetch_cols(fstream)
struct.update({header[0]: header[1:]})
for _ in range(num_lines):
cols = fetch_cols(fstream)
struct.update({cols[0]: cols[1:]})
else:
fstream.seek(pos)
return struct | [
"def",
"read_table",
"(",
"fstream",
")",
":",
"pos",
"=",
"fstream",
".",
"tell",
"(",
")",
"line",
"=",
"fstream",
".",
"readline",
"(",
")",
".",
"strip",
"(",
")",
"fragments",
"=",
"line",
".",
"split",
"(",
"\",\"",
")",
"fragments",
"=",
"["... | Read a likwid table info from the text stream.
Args:
fstream: Likwid's filestream.
Returns (dict(str: str)):
A dict containing likwid's table info as key/value pairs. | [
"Read",
"a",
"likwid",
"table",
"info",
"from",
"the",
"text",
"stream",
"."
] | 9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58 | https://github.com/PolyJIT/benchbuild/blob/9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58/benchbuild/likwid.py#L54-L90 | train | 53,659 |
PolyJIT/benchbuild | benchbuild/likwid.py | read_structs | def read_structs(fstream):
"""
Read all structs from likwid's file stream.
Args:
fstream: Likwid's output file stream.
Returns:
A generator that can be used to iterate over all structs in the
fstream.
"""
struct = read_struct(fstream)
while struct is not None:
yield struct
struct = read_struct(fstream) | python | def read_structs(fstream):
"""
Read all structs from likwid's file stream.
Args:
fstream: Likwid's output file stream.
Returns:
A generator that can be used to iterate over all structs in the
fstream.
"""
struct = read_struct(fstream)
while struct is not None:
yield struct
struct = read_struct(fstream) | [
"def",
"read_structs",
"(",
"fstream",
")",
":",
"struct",
"=",
"read_struct",
"(",
"fstream",
")",
"while",
"struct",
"is",
"not",
"None",
":",
"yield",
"struct",
"struct",
"=",
"read_struct",
"(",
"fstream",
")"
] | Read all structs from likwid's file stream.
Args:
fstream: Likwid's output file stream.
Returns:
A generator that can be used to iterate over all structs in the
fstream. | [
"Read",
"all",
"structs",
"from",
"likwid",
"s",
"file",
"stream",
"."
] | 9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58 | https://github.com/PolyJIT/benchbuild/blob/9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58/benchbuild/likwid.py#L93-L107 | train | 53,660 |
PolyJIT/benchbuild | benchbuild/likwid.py | read_tables | def read_tables(fstream):
"""
Read all tables from likwid's file stream.
Args:
fstream: Likwid's output file stream.
Returns:
A generator that can be used to iterate over all tables in the fstream.
"""
table = read_table(fstream)
while table is not None:
yield table
table = read_table(fstream) | python | def read_tables(fstream):
"""
Read all tables from likwid's file stream.
Args:
fstream: Likwid's output file stream.
Returns:
A generator that can be used to iterate over all tables in the fstream.
"""
table = read_table(fstream)
while table is not None:
yield table
table = read_table(fstream) | [
"def",
"read_tables",
"(",
"fstream",
")",
":",
"table",
"=",
"read_table",
"(",
"fstream",
")",
"while",
"table",
"is",
"not",
"None",
":",
"yield",
"table",
"table",
"=",
"read_table",
"(",
"fstream",
")"
] | Read all tables from likwid's file stream.
Args:
fstream: Likwid's output file stream.
Returns:
A generator that can be used to iterate over all tables in the fstream. | [
"Read",
"all",
"tables",
"from",
"likwid",
"s",
"file",
"stream",
"."
] | 9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58 | https://github.com/PolyJIT/benchbuild/blob/9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58/benchbuild/likwid.py#L110-L123 | train | 53,661 |
PolyJIT/benchbuild | benchbuild/likwid.py | get_measurements | def get_measurements(region, core_info, data, extra_offset=0):
"""
Get the complete measurement info from likwid's region info.
Args:
region: The region we took a measurement in.
core_info: The core information.
data: The raw data.
extra_offset (int): default = 0
Returns (list((region, metric, core, value))):
A list of measurement tuples, a tuple contains the information about
the region, the metric, the core and the actual value.
"""
measurements = []
clean_core_info = [x for x in core_info if x]
cores = len(clean_core_info)
for k in data:
if k not in ["1", "Region Info", "Event", "Metric", "CPU clock"]:
slot = data[k]
for i in range(cores):
core = core_info[i]
idx = extra_offset + i
if core and slot[idx]:
measurements.append((region, k, core, slot[idx]))
return measurements | python | def get_measurements(region, core_info, data, extra_offset=0):
"""
Get the complete measurement info from likwid's region info.
Args:
region: The region we took a measurement in.
core_info: The core information.
data: The raw data.
extra_offset (int): default = 0
Returns (list((region, metric, core, value))):
A list of measurement tuples, a tuple contains the information about
the region, the metric, the core and the actual value.
"""
measurements = []
clean_core_info = [x for x in core_info if x]
cores = len(clean_core_info)
for k in data:
if k not in ["1", "Region Info", "Event", "Metric", "CPU clock"]:
slot = data[k]
for i in range(cores):
core = core_info[i]
idx = extra_offset + i
if core and slot[idx]:
measurements.append((region, k, core, slot[idx]))
return measurements | [
"def",
"get_measurements",
"(",
"region",
",",
"core_info",
",",
"data",
",",
"extra_offset",
"=",
"0",
")",
":",
"measurements",
"=",
"[",
"]",
"clean_core_info",
"=",
"[",
"x",
"for",
"x",
"in",
"core_info",
"if",
"x",
"]",
"cores",
"=",
"len",
"(",
... | Get the complete measurement info from likwid's region info.
Args:
region: The region we took a measurement in.
core_info: The core information.
data: The raw data.
extra_offset (int): default = 0
Returns (list((region, metric, core, value))):
A list of measurement tuples, a tuple contains the information about
the region, the metric, the core and the actual value. | [
"Get",
"the",
"complete",
"measurement",
"info",
"from",
"likwid",
"s",
"region",
"info",
"."
] | 9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58 | https://github.com/PolyJIT/benchbuild/blob/9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58/benchbuild/likwid.py#L126-L152 | train | 53,662 |
PolyJIT/benchbuild | benchbuild/likwid.py | perfcounters | def perfcounters(infile):
"""
Get a complete list of all measurements.
Args:
infile: The filestream containing all likwid output.
Returns:
A list of all measurements extracted from likwid's file stream.
"""
measurements = []
with open(infile, 'r') as in_file:
read_struct(in_file)
for region_struct in read_structs(in_file):
region = region_struct["1"][1]
core_info = region_struct["Region Info"]
measurements += \
get_measurements(region, core_info, region_struct)
for table_struct in read_tables(in_file):
core_info = None
if "Event" in table_struct:
offset = 1
core_info = table_struct["Event"][offset:]
measurements += get_measurements(region, core_info,
table_struct, offset)
elif "Metric" in table_struct:
core_info = table_struct["Metric"]
measurements += get_measurements(region, core_info,
table_struct)
return measurements | python | def perfcounters(infile):
"""
Get a complete list of all measurements.
Args:
infile: The filestream containing all likwid output.
Returns:
A list of all measurements extracted from likwid's file stream.
"""
measurements = []
with open(infile, 'r') as in_file:
read_struct(in_file)
for region_struct in read_structs(in_file):
region = region_struct["1"][1]
core_info = region_struct["Region Info"]
measurements += \
get_measurements(region, core_info, region_struct)
for table_struct in read_tables(in_file):
core_info = None
if "Event" in table_struct:
offset = 1
core_info = table_struct["Event"][offset:]
measurements += get_measurements(region, core_info,
table_struct, offset)
elif "Metric" in table_struct:
core_info = table_struct["Metric"]
measurements += get_measurements(region, core_info,
table_struct)
return measurements | [
"def",
"perfcounters",
"(",
"infile",
")",
":",
"measurements",
"=",
"[",
"]",
"with",
"open",
"(",
"infile",
",",
"'r'",
")",
"as",
"in_file",
":",
"read_struct",
"(",
"in_file",
")",
"for",
"region_struct",
"in",
"read_structs",
"(",
"in_file",
")",
":... | Get a complete list of all measurements.
Args:
infile: The filestream containing all likwid output.
Returns:
A list of all measurements extracted from likwid's file stream. | [
"Get",
"a",
"complete",
"list",
"of",
"all",
"measurements",
"."
] | 9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58 | https://github.com/PolyJIT/benchbuild/blob/9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58/benchbuild/likwid.py#L155-L185 | train | 53,663 |
BlueBrain/hpcbench | hpcbench/cli/__init__.py | cli_common | def cli_common(doc, **kwargs):
"""Program initialization for all provided executables
"""
arguments = docopt(doc, version='hpcbench ' + __version__, **kwargs)
setup_logger(arguments['-v'], arguments['--log'])
load_components()
try:
import matplotlib
except ImportError:
pass
else:
matplotlib.use('PS')
return arguments | python | def cli_common(doc, **kwargs):
"""Program initialization for all provided executables
"""
arguments = docopt(doc, version='hpcbench ' + __version__, **kwargs)
setup_logger(arguments['-v'], arguments['--log'])
load_components()
try:
import matplotlib
except ImportError:
pass
else:
matplotlib.use('PS')
return arguments | [
"def",
"cli_common",
"(",
"doc",
",",
"*",
"*",
"kwargs",
")",
":",
"arguments",
"=",
"docopt",
"(",
"doc",
",",
"version",
"=",
"'hpcbench '",
"+",
"__version__",
",",
"*",
"*",
"kwargs",
")",
"setup_logger",
"(",
"arguments",
"[",
"'-v'",
"]",
",",
... | Program initialization for all provided executables | [
"Program",
"initialization",
"for",
"all",
"provided",
"executables"
] | 192d0ec142b897157ec25f131d1ef28f84752592 | https://github.com/BlueBrain/hpcbench/blob/192d0ec142b897157ec25f131d1ef28f84752592/hpcbench/cli/__init__.py#L28-L40 | train | 53,664 |
sci-bots/svg-model | svg_model/__init__.py | compute_shape_centers | def compute_shape_centers(df_shapes, shape_i_column, inplace=False):
'''
Compute the center point of each polygon shape, and the offset of each
vertex to the corresponding polygon center point.
Parameters
----------
df_shapes : pandas.DataFrame
Table of polygon shape vertices (one row per vertex).
Must have at least the following columns:
- ``vertex_i``: The index of the vertex within the corresponding
shape.
- ``x``: The x-coordinate of the vertex.
- ``y``: The y-coordinate of the vertex.
shape_i_column : str or list, optional
Table rows with the same value in the :data:`shape_i_column` column are
grouped together as a shape.
in_place : bool, optional
If ``True``, center coordinate columns are added directly to the input
frame.
Otherwise, center coordinate columns are added to copy of the input
frame.
Returns
-------
pandas.DataFrame
Input frame with the following additional columns:
- ``x_center``/``y_center``: Absolute coordinates of shape center.
- ``x_center_offset``/``y_center_offset``:
* Coordinates of each vertex coordinate relative to shape center.
'''
if not isinstance(shape_i_column, bytes):
raise KeyError('Shape index must be a single column.')
if not inplace:
df_shapes = df_shapes.copy()
# Get coordinates of center of each path.
df_bounding_boxes = get_bounding_boxes(df_shapes, shape_i_column)
path_centers = (df_bounding_boxes[['x', 'y']] + .5 *
df_bounding_boxes[['width', 'height']].values)
df_shapes['x_center'] = path_centers.x[df_shapes[shape_i_column]].values
df_shapes['y_center'] = path_centers.y[df_shapes[shape_i_column]].values
# Calculate coordinate of each path vertex relative to center point of
# path.
center_offset = (df_shapes[['x', 'y']] -
df_shapes[['x_center', 'y_center']].values)
return df_shapes.join(center_offset, rsuffix='_center_offset') | python | def compute_shape_centers(df_shapes, shape_i_column, inplace=False):
'''
Compute the center point of each polygon shape, and the offset of each
vertex to the corresponding polygon center point.
Parameters
----------
df_shapes : pandas.DataFrame
Table of polygon shape vertices (one row per vertex).
Must have at least the following columns:
- ``vertex_i``: The index of the vertex within the corresponding
shape.
- ``x``: The x-coordinate of the vertex.
- ``y``: The y-coordinate of the vertex.
shape_i_column : str or list, optional
Table rows with the same value in the :data:`shape_i_column` column are
grouped together as a shape.
in_place : bool, optional
If ``True``, center coordinate columns are added directly to the input
frame.
Otherwise, center coordinate columns are added to copy of the input
frame.
Returns
-------
pandas.DataFrame
Input frame with the following additional columns:
- ``x_center``/``y_center``: Absolute coordinates of shape center.
- ``x_center_offset``/``y_center_offset``:
* Coordinates of each vertex coordinate relative to shape center.
'''
if not isinstance(shape_i_column, bytes):
raise KeyError('Shape index must be a single column.')
if not inplace:
df_shapes = df_shapes.copy()
# Get coordinates of center of each path.
df_bounding_boxes = get_bounding_boxes(df_shapes, shape_i_column)
path_centers = (df_bounding_boxes[['x', 'y']] + .5 *
df_bounding_boxes[['width', 'height']].values)
df_shapes['x_center'] = path_centers.x[df_shapes[shape_i_column]].values
df_shapes['y_center'] = path_centers.y[df_shapes[shape_i_column]].values
# Calculate coordinate of each path vertex relative to center point of
# path.
center_offset = (df_shapes[['x', 'y']] -
df_shapes[['x_center', 'y_center']].values)
return df_shapes.join(center_offset, rsuffix='_center_offset') | [
"def",
"compute_shape_centers",
"(",
"df_shapes",
",",
"shape_i_column",
",",
"inplace",
"=",
"False",
")",
":",
"if",
"not",
"isinstance",
"(",
"shape_i_column",
",",
"bytes",
")",
":",
"raise",
"KeyError",
"(",
"'Shape index must be a single column.'",
")",
"if"... | Compute the center point of each polygon shape, and the offset of each
vertex to the corresponding polygon center point.
Parameters
----------
df_shapes : pandas.DataFrame
Table of polygon shape vertices (one row per vertex).
Must have at least the following columns:
- ``vertex_i``: The index of the vertex within the corresponding
shape.
- ``x``: The x-coordinate of the vertex.
- ``y``: The y-coordinate of the vertex.
shape_i_column : str or list, optional
Table rows with the same value in the :data:`shape_i_column` column are
grouped together as a shape.
in_place : bool, optional
If ``True``, center coordinate columns are added directly to the input
frame.
Otherwise, center coordinate columns are added to copy of the input
frame.
Returns
-------
pandas.DataFrame
Input frame with the following additional columns:
- ``x_center``/``y_center``: Absolute coordinates of shape center.
- ``x_center_offset``/``y_center_offset``:
* Coordinates of each vertex coordinate relative to shape center. | [
"Compute",
"the",
"center",
"point",
"of",
"each",
"polygon",
"shape",
"and",
"the",
"offset",
"of",
"each",
"vertex",
"to",
"the",
"corresponding",
"polygon",
"center",
"point",
"."
] | 2d119650f995e62b29ce0b3151a23f3b957cb072 | https://github.com/sci-bots/svg-model/blob/2d119650f995e62b29ce0b3151a23f3b957cb072/svg_model/__init__.py#L207-L257 | train | 53,665 |
Capitains/Nautilus | capitains_nautilus/cts/collections.py | SparqlXmlCitation.refsDecl | def refsDecl(self):
""" ResfDecl expression of the citation scheme
:rtype: str
:Example: /tei:TEI/tei:text/tei:body/tei:div//tei:l[@n='$1']
"""
for refsDecl in self.graph.objects(self.asNode(), RDF_NAMESPACES.TEI.replacementPattern):
return str(refsDecl) | python | def refsDecl(self):
""" ResfDecl expression of the citation scheme
:rtype: str
:Example: /tei:TEI/tei:text/tei:body/tei:div//tei:l[@n='$1']
"""
for refsDecl in self.graph.objects(self.asNode(), RDF_NAMESPACES.TEI.replacementPattern):
return str(refsDecl) | [
"def",
"refsDecl",
"(",
"self",
")",
":",
"for",
"refsDecl",
"in",
"self",
".",
"graph",
".",
"objects",
"(",
"self",
".",
"asNode",
"(",
")",
",",
"RDF_NAMESPACES",
".",
"TEI",
".",
"replacementPattern",
")",
":",
"return",
"str",
"(",
"refsDecl",
")"... | ResfDecl expression of the citation scheme
:rtype: str
:Example: /tei:TEI/tei:text/tei:body/tei:div//tei:l[@n='$1'] | [
"ResfDecl",
"expression",
"of",
"the",
"citation",
"scheme"
] | 6be453fe0cc0e2c1b89ff06e5af1409165fc1411 | https://github.com/Capitains/Nautilus/blob/6be453fe0cc0e2c1b89ff06e5af1409165fc1411/capitains_nautilus/cts/collections.py#L108-L115 | train | 53,666 |
KelSolaar/Manager | manager/components_manager.py | Profile.initializeProfile | def initializeProfile(self):
"""
Initializes the Component Profile.
:return: Method success.
:rtype: bool
"""
LOGGER.debug("> Building '{0}' profile.".format(self.__file))
sections_file_parser = SectionsFileParser(self.__file)
sections_file_parser.parse()
if sections_file_parser.sections:
fileStructureParsingError = lambda attribute: foundations.exceptions.FileStructureParsingError(
"{0} | No '{1}' attribute found, '{2}' file structure seems invalid!".format(
self.__class__.__name__, attribute, self.__file))
self.__directory = os.path.dirname(self.__file)
self.__name = sections_file_parser.get_value("Name", "Component", default=None)
if self.__name is None:
raise fileStructureParsingError("Name")
self.__title = sections_file_parser.get_value("Title", "Component", default=None)
if self.__title is None:
self.__title = self.__name
self.__package = sections_file_parser.get_value("Module", "Component", default=None)
if self.__package is None:
raise fileStructureParsingError("Module")
self.__attribute = sections_file_parser.get_value("Object", "Component", default=None)
if self.__attribute is None:
raise fileStructureParsingError("Object")
self.__require = sections_file_parser.get_value("Require", "Component", default=None)
self.__require = list() if self.__require is None else self.__require.split("|")
self.__version = sections_file_parser.get_value("Version", "Component", default=None)
if self.__version is None:
raise fileStructureParsingError("Version")
self.__author = sections_file_parser.get_value("Author", "Informations", default=None)
self.__email = sections_file_parser.get_value("Email", "Informations", default=None)
self.__url = sections_file_parser.get_value("Url", "Informations", default=None)
self.__description = sections_file_parser.get_value("Description", "Informations", default=None)
return True
else:
raise foundations.exceptions.FileStructureParsingError(
"{0} | No sections found, '{1}' file structure seems invalid!".format(self.__class__.__name__,
self.__file)) | python | def initializeProfile(self):
"""
Initializes the Component Profile.
:return: Method success.
:rtype: bool
"""
LOGGER.debug("> Building '{0}' profile.".format(self.__file))
sections_file_parser = SectionsFileParser(self.__file)
sections_file_parser.parse()
if sections_file_parser.sections:
fileStructureParsingError = lambda attribute: foundations.exceptions.FileStructureParsingError(
"{0} | No '{1}' attribute found, '{2}' file structure seems invalid!".format(
self.__class__.__name__, attribute, self.__file))
self.__directory = os.path.dirname(self.__file)
self.__name = sections_file_parser.get_value("Name", "Component", default=None)
if self.__name is None:
raise fileStructureParsingError("Name")
self.__title = sections_file_parser.get_value("Title", "Component", default=None)
if self.__title is None:
self.__title = self.__name
self.__package = sections_file_parser.get_value("Module", "Component", default=None)
if self.__package is None:
raise fileStructureParsingError("Module")
self.__attribute = sections_file_parser.get_value("Object", "Component", default=None)
if self.__attribute is None:
raise fileStructureParsingError("Object")
self.__require = sections_file_parser.get_value("Require", "Component", default=None)
self.__require = list() if self.__require is None else self.__require.split("|")
self.__version = sections_file_parser.get_value("Version", "Component", default=None)
if self.__version is None:
raise fileStructureParsingError("Version")
self.__author = sections_file_parser.get_value("Author", "Informations", default=None)
self.__email = sections_file_parser.get_value("Email", "Informations", default=None)
self.__url = sections_file_parser.get_value("Url", "Informations", default=None)
self.__description = sections_file_parser.get_value("Description", "Informations", default=None)
return True
else:
raise foundations.exceptions.FileStructureParsingError(
"{0} | No sections found, '{1}' file structure seems invalid!".format(self.__class__.__name__,
self.__file)) | [
"def",
"initializeProfile",
"(",
"self",
")",
":",
"LOGGER",
".",
"debug",
"(",
"\"> Building '{0}' profile.\"",
".",
"format",
"(",
"self",
".",
"__file",
")",
")",
"sections_file_parser",
"=",
"SectionsFileParser",
"(",
"self",
".",
"__file",
")",
"sections_fi... | Initializes the Component Profile.
:return: Method success.
:rtype: bool | [
"Initializes",
"the",
"Component",
"Profile",
"."
] | 39c8153fc021fc8a76e345a6e336ec2644f089d1 | https://github.com/KelSolaar/Manager/blob/39c8153fc021fc8a76e345a6e336ec2644f089d1/manager/components_manager.py#L641-L695 | train | 53,667 |
KelSolaar/Manager | manager/components_manager.py | Manager.register_component | def register_component(self, path):
"""
Registers a Component using given path.
Usage::
>>> manager = Manager()
>>> manager.register_component("tests_component_a.rc")
True
>>> manager.components
{u'core.tests_component_a': <manager.components_manager.Profile object at 0x11c9eb0>}
:param path: Component path.
:type path: unicode
:return: Method success.
:rtype: bool
"""
component = foundations.strings.get_splitext_basename(path)
LOGGER.debug("> Current Component: '{0}'.".format(component))
profile = Profile(file=path)
if profile.initializeProfile():
if os.path.isfile(os.path.join(profile.directory, profile.package) + ".py") or \
os.path.isdir(os.path.join(profile.directory, profile.package)) or \
os.path.basename(profile.directory) == profile.package:
self.__components[profile.name] = profile
return True
else:
raise manager.exceptions.ComponentModuleError(
"{0} | '{1}' has no associated module and has been rejected!".format(self.__class__.__name__,
component))
else:
raise manager.exceptions.ComponentProfileError(
"{0} | '{1}' is not a valid Component and has been rejected!".format(self.__class__.__name__,
component)) | python | def register_component(self, path):
"""
Registers a Component using given path.
Usage::
>>> manager = Manager()
>>> manager.register_component("tests_component_a.rc")
True
>>> manager.components
{u'core.tests_component_a': <manager.components_manager.Profile object at 0x11c9eb0>}
:param path: Component path.
:type path: unicode
:return: Method success.
:rtype: bool
"""
component = foundations.strings.get_splitext_basename(path)
LOGGER.debug("> Current Component: '{0}'.".format(component))
profile = Profile(file=path)
if profile.initializeProfile():
if os.path.isfile(os.path.join(profile.directory, profile.package) + ".py") or \
os.path.isdir(os.path.join(profile.directory, profile.package)) or \
os.path.basename(profile.directory) == profile.package:
self.__components[profile.name] = profile
return True
else:
raise manager.exceptions.ComponentModuleError(
"{0} | '{1}' has no associated module and has been rejected!".format(self.__class__.__name__,
component))
else:
raise manager.exceptions.ComponentProfileError(
"{0} | '{1}' is not a valid Component and has been rejected!".format(self.__class__.__name__,
component)) | [
"def",
"register_component",
"(",
"self",
",",
"path",
")",
":",
"component",
"=",
"foundations",
".",
"strings",
".",
"get_splitext_basename",
"(",
"path",
")",
"LOGGER",
".",
"debug",
"(",
"\"> Current Component: '{0}'.\"",
".",
"format",
"(",
"component",
")"... | Registers a Component using given path.
Usage::
>>> manager = Manager()
>>> manager.register_component("tests_component_a.rc")
True
>>> manager.components
{u'core.tests_component_a': <manager.components_manager.Profile object at 0x11c9eb0>}
:param path: Component path.
:type path: unicode
:return: Method success.
:rtype: bool | [
"Registers",
"a",
"Component",
"using",
"given",
"path",
"."
] | 39c8153fc021fc8a76e345a6e336ec2644f089d1 | https://github.com/KelSolaar/Manager/blob/39c8153fc021fc8a76e345a6e336ec2644f089d1/manager/components_manager.py#L977-L1011 | train | 53,668 |
KelSolaar/Manager | manager/components_manager.py | Manager.register_components | def register_components(self):
"""
Registers the Components.
Usage::
>>> manager = Manager(("./manager/tests/tests_manager/resources/components/core",))
>>> manager.register_components()
True
>>> manager.components.keys()
[u'core.tests_component_a', u'core.tests_component_b']
:return: Method success.
:rtype: bool
"""
unregistered_components = []
for path in self.paths:
for file in foundations.walkers.files_walker(path, ("\.{0}$".format(self.__extension),), ("\._",)):
if not self.register_component(file):
unregistered_components.append(file)
if not unregistered_components:
return True
else:
raise manager.exceptions.ComponentRegistrationError(
"{0} | '{1}' Components failed to register!".format(self.__class__.__name__,
", ".join(unregistered_components))) | python | def register_components(self):
"""
Registers the Components.
Usage::
>>> manager = Manager(("./manager/tests/tests_manager/resources/components/core",))
>>> manager.register_components()
True
>>> manager.components.keys()
[u'core.tests_component_a', u'core.tests_component_b']
:return: Method success.
:rtype: bool
"""
unregistered_components = []
for path in self.paths:
for file in foundations.walkers.files_walker(path, ("\.{0}$".format(self.__extension),), ("\._",)):
if not self.register_component(file):
unregistered_components.append(file)
if not unregistered_components:
return True
else:
raise manager.exceptions.ComponentRegistrationError(
"{0} | '{1}' Components failed to register!".format(self.__class__.__name__,
", ".join(unregistered_components))) | [
"def",
"register_components",
"(",
"self",
")",
":",
"unregistered_components",
"=",
"[",
"]",
"for",
"path",
"in",
"self",
".",
"paths",
":",
"for",
"file",
"in",
"foundations",
".",
"walkers",
".",
"files_walker",
"(",
"path",
",",
"(",
"\"\\.{0}$\"",
".... | Registers the Components.
Usage::
>>> manager = Manager(("./manager/tests/tests_manager/resources/components/core",))
>>> manager.register_components()
True
>>> manager.components.keys()
[u'core.tests_component_a', u'core.tests_component_b']
:return: Method success.
:rtype: bool | [
"Registers",
"the",
"Components",
"."
] | 39c8153fc021fc8a76e345a6e336ec2644f089d1 | https://github.com/KelSolaar/Manager/blob/39c8153fc021fc8a76e345a6e336ec2644f089d1/manager/components_manager.py#L1042-L1069 | train | 53,669 |
KelSolaar/Manager | manager/components_manager.py | Manager.instantiate_component | def instantiate_component(self, component, callback=None):
"""
Instantiates given Component.
Usage::
>>> manager = Manager()
>>> manager.register_component("tests_component_a.rc")
True
>>> manager.instantiate_component("core.tests_component_a")
True
>>> manager.get_interface("core.tests_component_a")
<tests_component_a.TestsComponentA object at 0x17a5b90>
:param component: Component to instantiate.
:type component: unicode
:param callback: Callback object.
:type callback: object
"""
profile = self.__components[component]
callback and callback(profile)
LOGGER.debug("> Current Component: '{0}'.".format(component))
if os.path.isfile(os.path.join(profile.directory, profile.package) + ".py") or \
os.path.isdir(os.path.join(profile.directory, profile.package)):
path = profile.directory
elif os.path.basename(profile.directory) == profile.package:
path = os.path.join(profile.directory, "..")
not path in sys.path and sys.path.append(path)
profile.module = __import__(profile.package)
object = profile.attribute in profile.module.__dict__ and getattr(profile.module, profile.attribute) or None
if object and inspect.isclass(object):
instance = object(name=profile.name)
for category, type in self.__categories.iteritems():
if type.__name__ in (base.__name__ for base in object.__bases__):
profile.category = category
profile.interface = instance
LOGGER.info("{0} | '{1}' Component has been instantiated!".format(
self.__class__.__name__, profile.name))
return True
else:
del (self.__components[component])
raise manager.exceptions.ComponentInterfaceError(
"{0} | '{1}' Component has no Interface and has been rejected!".format(self.__class__.__name__,
profile.name)) | python | def instantiate_component(self, component, callback=None):
"""
Instantiates given Component.
Usage::
>>> manager = Manager()
>>> manager.register_component("tests_component_a.rc")
True
>>> manager.instantiate_component("core.tests_component_a")
True
>>> manager.get_interface("core.tests_component_a")
<tests_component_a.TestsComponentA object at 0x17a5b90>
:param component: Component to instantiate.
:type component: unicode
:param callback: Callback object.
:type callback: object
"""
profile = self.__components[component]
callback and callback(profile)
LOGGER.debug("> Current Component: '{0}'.".format(component))
if os.path.isfile(os.path.join(profile.directory, profile.package) + ".py") or \
os.path.isdir(os.path.join(profile.directory, profile.package)):
path = profile.directory
elif os.path.basename(profile.directory) == profile.package:
path = os.path.join(profile.directory, "..")
not path in sys.path and sys.path.append(path)
profile.module = __import__(profile.package)
object = profile.attribute in profile.module.__dict__ and getattr(profile.module, profile.attribute) or None
if object and inspect.isclass(object):
instance = object(name=profile.name)
for category, type in self.__categories.iteritems():
if type.__name__ in (base.__name__ for base in object.__bases__):
profile.category = category
profile.interface = instance
LOGGER.info("{0} | '{1}' Component has been instantiated!".format(
self.__class__.__name__, profile.name))
return True
else:
del (self.__components[component])
raise manager.exceptions.ComponentInterfaceError(
"{0} | '{1}' Component has no Interface and has been rejected!".format(self.__class__.__name__,
profile.name)) | [
"def",
"instantiate_component",
"(",
"self",
",",
"component",
",",
"callback",
"=",
"None",
")",
":",
"profile",
"=",
"self",
".",
"__components",
"[",
"component",
"]",
"callback",
"and",
"callback",
"(",
"profile",
")",
"LOGGER",
".",
"debug",
"(",
"\">... | Instantiates given Component.
Usage::
>>> manager = Manager()
>>> manager.register_component("tests_component_a.rc")
True
>>> manager.instantiate_component("core.tests_component_a")
True
>>> manager.get_interface("core.tests_component_a")
<tests_component_a.TestsComponentA object at 0x17a5b90>
:param component: Component to instantiate.
:type component: unicode
:param callback: Callback object.
:type callback: object | [
"Instantiates",
"given",
"Component",
"."
] | 39c8153fc021fc8a76e345a6e336ec2644f089d1 | https://github.com/KelSolaar/Manager/blob/39c8153fc021fc8a76e345a6e336ec2644f089d1/manager/components_manager.py#L1098-L1145 | train | 53,670 |
KelSolaar/Manager | manager/components_manager.py | Manager.instantiate_components | def instantiate_components(self, callback=None):
"""
Instantiates the Components.
Usage::
>>> manager = Manager((tests_manager,))
>>> manager.register_components()
True
>>> manager.instantiate_components()
True
>>> manager.get_interface("core.tests_component_a")
<tests_component_a.TestsComponentA object at 0x17a5bb0>
:param callback: Callback object.
:type callback: object
"""
uninstantiated_components = [component
for component in self.list_components()
if not self.instantiate_component(component, callback)]
if not uninstantiated_components:
return True
else:
raise manager.exceptions.ComponentInstantiationError(
"{0} | '{1}' Components failed to instantiate!".format(self.__class__.__name__,
", ".join(uninstantiated_components))) | python | def instantiate_components(self, callback=None):
"""
Instantiates the Components.
Usage::
>>> manager = Manager((tests_manager,))
>>> manager.register_components()
True
>>> manager.instantiate_components()
True
>>> manager.get_interface("core.tests_component_a")
<tests_component_a.TestsComponentA object at 0x17a5bb0>
:param callback: Callback object.
:type callback: object
"""
uninstantiated_components = [component
for component in self.list_components()
if not self.instantiate_component(component, callback)]
if not uninstantiated_components:
return True
else:
raise manager.exceptions.ComponentInstantiationError(
"{0} | '{1}' Components failed to instantiate!".format(self.__class__.__name__,
", ".join(uninstantiated_components))) | [
"def",
"instantiate_components",
"(",
"self",
",",
"callback",
"=",
"None",
")",
":",
"uninstantiated_components",
"=",
"[",
"component",
"for",
"component",
"in",
"self",
".",
"list_components",
"(",
")",
"if",
"not",
"self",
".",
"instantiate_component",
"(",
... | Instantiates the Components.
Usage::
>>> manager = Manager((tests_manager,))
>>> manager.register_components()
True
>>> manager.instantiate_components()
True
>>> manager.get_interface("core.tests_component_a")
<tests_component_a.TestsComponentA object at 0x17a5bb0>
:param callback: Callback object.
:type callback: object | [
"Instantiates",
"the",
"Components",
"."
] | 39c8153fc021fc8a76e345a6e336ec2644f089d1 | https://github.com/KelSolaar/Manager/blob/39c8153fc021fc8a76e345a6e336ec2644f089d1/manager/components_manager.py#L1147-L1173 | train | 53,671 |
KelSolaar/Manager | manager/components_manager.py | Manager.reload_component | def reload_component(self, component):
"""
Reload given Component module.
Usage::
>>> manager = Manager()
>>> manager.register_component("tests_component_a.rc")
True
>>> manager.instantiate_component("core.tests_component_a")
True
>>> manager.get_interface("core.tests_component_a")
<tests_component_a.TestsComponentA object at 0x17b4890>
>>> manager.reload_component("core.tests_component_a")
True
>>> manager.get_interface("core.tests_component_a")
<tests_component_a.TestsComponentA object at 0x17b0d70>
:param component: Component name.
:type component: unicode
:return: Reload success.
:rtype: bool
"""
dependents = list(reversed(self.list_dependents(component)))
dependents.append(component)
for dependent in dependents:
profile = self.__components[dependent]
module = __import__(profile.package)
reload(module)
object = profile.attribute in dir(module) and getattr(module, profile.attribute) or None
if object and inspect.isclass(object):
for type in self.__categories.itervalues():
if type.__name__ in (base.__name__ for base in object.__bases__):
instance = object(name=profile.name)
profile.module = module
profile.interface = instance
LOGGER.info("{0} | '{1}' Component has been reloaded!".format(
self.__class__.__name__, profile.name))
return True | python | def reload_component(self, component):
"""
Reload given Component module.
Usage::
>>> manager = Manager()
>>> manager.register_component("tests_component_a.rc")
True
>>> manager.instantiate_component("core.tests_component_a")
True
>>> manager.get_interface("core.tests_component_a")
<tests_component_a.TestsComponentA object at 0x17b4890>
>>> manager.reload_component("core.tests_component_a")
True
>>> manager.get_interface("core.tests_component_a")
<tests_component_a.TestsComponentA object at 0x17b0d70>
:param component: Component name.
:type component: unicode
:return: Reload success.
:rtype: bool
"""
dependents = list(reversed(self.list_dependents(component)))
dependents.append(component)
for dependent in dependents:
profile = self.__components[dependent]
module = __import__(profile.package)
reload(module)
object = profile.attribute in dir(module) and getattr(module, profile.attribute) or None
if object and inspect.isclass(object):
for type in self.__categories.itervalues():
if type.__name__ in (base.__name__ for base in object.__bases__):
instance = object(name=profile.name)
profile.module = module
profile.interface = instance
LOGGER.info("{0} | '{1}' Component has been reloaded!".format(
self.__class__.__name__, profile.name))
return True | [
"def",
"reload_component",
"(",
"self",
",",
"component",
")",
":",
"dependents",
"=",
"list",
"(",
"reversed",
"(",
"self",
".",
"list_dependents",
"(",
"component",
")",
")",
")",
"dependents",
".",
"append",
"(",
"component",
")",
"for",
"dependent",
"i... | Reload given Component module.
Usage::
>>> manager = Manager()
>>> manager.register_component("tests_component_a.rc")
True
>>> manager.instantiate_component("core.tests_component_a")
True
>>> manager.get_interface("core.tests_component_a")
<tests_component_a.TestsComponentA object at 0x17b4890>
>>> manager.reload_component("core.tests_component_a")
True
>>> manager.get_interface("core.tests_component_a")
<tests_component_a.TestsComponentA object at 0x17b0d70>
:param component: Component name.
:type component: unicode
:return: Reload success.
:rtype: bool | [
"Reload",
"given",
"Component",
"module",
"."
] | 39c8153fc021fc8a76e345a6e336ec2644f089d1 | https://github.com/KelSolaar/Manager/blob/39c8153fc021fc8a76e345a6e336ec2644f089d1/manager/components_manager.py#L1175-L1215 | train | 53,672 |
KelSolaar/Manager | manager/components_manager.py | Manager.list_components | def list_components(self, dependency_order=True):
"""
Lists the Components by dependency resolving.
Usage::
>>> manager = Manager(("./manager/tests/tests_manager/resources/components/core",))
>>> manager.register_components()
True
>>> manager.list_components()
[u'core.tests_component_a', u'core.tests_component_b']
:param dependency_order: Components are returned by dependency order.
:type dependency_order: bool
"""
if dependency_order:
return list(itertools.chain.from_iterable([sorted(list(batch)) for batch in
foundations.common.dependency_resolver(
dict((key, value.require) for (key, value) in self))]))
else:
return [key for (key, value) in self] | python | def list_components(self, dependency_order=True):
"""
Lists the Components by dependency resolving.
Usage::
>>> manager = Manager(("./manager/tests/tests_manager/resources/components/core",))
>>> manager.register_components()
True
>>> manager.list_components()
[u'core.tests_component_a', u'core.tests_component_b']
:param dependency_order: Components are returned by dependency order.
:type dependency_order: bool
"""
if dependency_order:
return list(itertools.chain.from_iterable([sorted(list(batch)) for batch in
foundations.common.dependency_resolver(
dict((key, value.require) for (key, value) in self))]))
else:
return [key for (key, value) in self] | [
"def",
"list_components",
"(",
"self",
",",
"dependency_order",
"=",
"True",
")",
":",
"if",
"dependency_order",
":",
"return",
"list",
"(",
"itertools",
".",
"chain",
".",
"from_iterable",
"(",
"[",
"sorted",
"(",
"list",
"(",
"batch",
")",
")",
"for",
... | Lists the Components by dependency resolving.
Usage::
>>> manager = Manager(("./manager/tests/tests_manager/resources/components/core",))
>>> manager.register_components()
True
>>> manager.list_components()
[u'core.tests_component_a', u'core.tests_component_b']
:param dependency_order: Components are returned by dependency order.
:type dependency_order: bool | [
"Lists",
"the",
"Components",
"by",
"dependency",
"resolving",
"."
] | 39c8153fc021fc8a76e345a6e336ec2644f089d1 | https://github.com/KelSolaar/Manager/blob/39c8153fc021fc8a76e345a6e336ec2644f089d1/manager/components_manager.py#L1217-L1238 | train | 53,673 |
KelSolaar/Manager | manager/components_manager.py | Manager.list_dependents | def list_dependents(self, component, dependents=None):
"""
Lists given Component dependents Components.
Usage::
>>> manager = Manager(("./manager/tests/tests_manager/resources/components/core",))
>>> manager.register_components()
True
>>> manager.list_dependents("core.tests_component_a")
[u'core.tests_component_b']
:param component: Component to retrieve the dependents Components.
:type component: unicode
:param dependents: Component dependents Components.
:type dependents: set
:return: Dependent Components.
:rtype: list
"""
dependents = set() if dependents is None else dependents
for name, profile in self:
if not component in profile.require:
continue
dependents.add(name)
self.list_dependents(name, dependents)
return sorted(list(dependents), key=(self.list_components()).index) | python | def list_dependents(self, component, dependents=None):
"""
Lists given Component dependents Components.
Usage::
>>> manager = Manager(("./manager/tests/tests_manager/resources/components/core",))
>>> manager.register_components()
True
>>> manager.list_dependents("core.tests_component_a")
[u'core.tests_component_b']
:param component: Component to retrieve the dependents Components.
:type component: unicode
:param dependents: Component dependents Components.
:type dependents: set
:return: Dependent Components.
:rtype: list
"""
dependents = set() if dependents is None else dependents
for name, profile in self:
if not component in profile.require:
continue
dependents.add(name)
self.list_dependents(name, dependents)
return sorted(list(dependents), key=(self.list_components()).index) | [
"def",
"list_dependents",
"(",
"self",
",",
"component",
",",
"dependents",
"=",
"None",
")",
":",
"dependents",
"=",
"set",
"(",
")",
"if",
"dependents",
"is",
"None",
"else",
"dependents",
"for",
"name",
",",
"profile",
"in",
"self",
":",
"if",
"not",
... | Lists given Component dependents Components.
Usage::
>>> manager = Manager(("./manager/tests/tests_manager/resources/components/core",))
>>> manager.register_components()
True
>>> manager.list_dependents("core.tests_component_a")
[u'core.tests_component_b']
:param component: Component to retrieve the dependents Components.
:type component: unicode
:param dependents: Component dependents Components.
:type dependents: set
:return: Dependent Components.
:rtype: list | [
"Lists",
"given",
"Component",
"dependents",
"Components",
"."
] | 39c8153fc021fc8a76e345a6e336ec2644f089d1 | https://github.com/KelSolaar/Manager/blob/39c8153fc021fc8a76e345a6e336ec2644f089d1/manager/components_manager.py#L1240-L1268 | train | 53,674 |
KelSolaar/Manager | manager/components_manager.py | Manager.filter_components | def filter_components(self, pattern, category=None):
"""
Filters the Components using given regex pattern.
Usage::
>>> manager = Manager(("./manager/tests/tests_manager/resources/components/core",))
>>> manager.register_components()
True
>>> manager.filter_components("\w+A$")
[u'core.tests_component_a']
:param pattern: Regex filtering pattern.
:type pattern: unicode
:param category: Category filter.
:type category: unicode
:return: Matching Components.
:rtype: list
"""
filtered_components = []
for component, profile in self:
if category:
if profile.category != category:
continue
if re.search(pattern, component):
filtered_components.append(component)
return filtered_components | python | def filter_components(self, pattern, category=None):
"""
Filters the Components using given regex pattern.
Usage::
>>> manager = Manager(("./manager/tests/tests_manager/resources/components/core",))
>>> manager.register_components()
True
>>> manager.filter_components("\w+A$")
[u'core.tests_component_a']
:param pattern: Regex filtering pattern.
:type pattern: unicode
:param category: Category filter.
:type category: unicode
:return: Matching Components.
:rtype: list
"""
filtered_components = []
for component, profile in self:
if category:
if profile.category != category:
continue
if re.search(pattern, component):
filtered_components.append(component)
return filtered_components | [
"def",
"filter_components",
"(",
"self",
",",
"pattern",
",",
"category",
"=",
"None",
")",
":",
"filtered_components",
"=",
"[",
"]",
"for",
"component",
",",
"profile",
"in",
"self",
":",
"if",
"category",
":",
"if",
"profile",
".",
"category",
"!=",
"... | Filters the Components using given regex pattern.
Usage::
>>> manager = Manager(("./manager/tests/tests_manager/resources/components/core",))
>>> manager.register_components()
True
>>> manager.filter_components("\w+A$")
[u'core.tests_component_a']
:param pattern: Regex filtering pattern.
:type pattern: unicode
:param category: Category filter.
:type category: unicode
:return: Matching Components.
:rtype: list | [
"Filters",
"the",
"Components",
"using",
"given",
"regex",
"pattern",
"."
] | 39c8153fc021fc8a76e345a6e336ec2644f089d1 | https://github.com/KelSolaar/Manager/blob/39c8153fc021fc8a76e345a6e336ec2644f089d1/manager/components_manager.py#L1270-L1298 | train | 53,675 |
KelSolaar/Manager | manager/components_manager.py | Manager.get_profile | def get_profile(self, component):
"""
Gets given Component profile.
Usage::
>>> manager = Manager()
>>> manager.register_component("tests_component_a.rc")
True
>>> manager.get_profile("core.tests_component_a")
<manager.components_manager.Profile object at 0x10258ef10>
:param component: Component to get the profile.
:type component: unicode
:return: Component profile.
:rtype: Profile
"""
components = self.filter_components(r"^{0}$".format(component))
if components != []:
return self.__components[foundations.common.get_first_item(components)] | python | def get_profile(self, component):
"""
Gets given Component profile.
Usage::
>>> manager = Manager()
>>> manager.register_component("tests_component_a.rc")
True
>>> manager.get_profile("core.tests_component_a")
<manager.components_manager.Profile object at 0x10258ef10>
:param component: Component to get the profile.
:type component: unicode
:return: Component profile.
:rtype: Profile
"""
components = self.filter_components(r"^{0}$".format(component))
if components != []:
return self.__components[foundations.common.get_first_item(components)] | [
"def",
"get_profile",
"(",
"self",
",",
"component",
")",
":",
"components",
"=",
"self",
".",
"filter_components",
"(",
"r\"^{0}$\"",
".",
"format",
"(",
"component",
")",
")",
"if",
"components",
"!=",
"[",
"]",
":",
"return",
"self",
".",
"__components"... | Gets given Component profile.
Usage::
>>> manager = Manager()
>>> manager.register_component("tests_component_a.rc")
True
>>> manager.get_profile("core.tests_component_a")
<manager.components_manager.Profile object at 0x10258ef10>
:param component: Component to get the profile.
:type component: unicode
:return: Component profile.
:rtype: Profile | [
"Gets",
"given",
"Component",
"profile",
"."
] | 39c8153fc021fc8a76e345a6e336ec2644f089d1 | https://github.com/KelSolaar/Manager/blob/39c8153fc021fc8a76e345a6e336ec2644f089d1/manager/components_manager.py#L1300-L1320 | train | 53,676 |
KelSolaar/Manager | manager/components_manager.py | Manager.get_interface | def get_interface(self, component):
"""
Gets given Component interface.
Usage::
>>> manager = Manager()
>>> manager.register_component("tests_component_a.rc")
True
>>> manager.get_interface("core.tests_component_a")
<tests_component_a.TestsComponentA object at 0x17b0d70>
:param component: Component to get the interface.
:type component: unicode
:return: Component interface.
:rtype: object
"""
profile = self.get_profile(component)
if profile:
return profile.interface | python | def get_interface(self, component):
"""
Gets given Component interface.
Usage::
>>> manager = Manager()
>>> manager.register_component("tests_component_a.rc")
True
>>> manager.get_interface("core.tests_component_a")
<tests_component_a.TestsComponentA object at 0x17b0d70>
:param component: Component to get the interface.
:type component: unicode
:return: Component interface.
:rtype: object
"""
profile = self.get_profile(component)
if profile:
return profile.interface | [
"def",
"get_interface",
"(",
"self",
",",
"component",
")",
":",
"profile",
"=",
"self",
".",
"get_profile",
"(",
"component",
")",
"if",
"profile",
":",
"return",
"profile",
".",
"interface"
] | Gets given Component interface.
Usage::
>>> manager = Manager()
>>> manager.register_component("tests_component_a.rc")
True
>>> manager.get_interface("core.tests_component_a")
<tests_component_a.TestsComponentA object at 0x17b0d70>
:param component: Component to get the interface.
:type component: unicode
:return: Component interface.
:rtype: object | [
"Gets",
"given",
"Component",
"interface",
"."
] | 39c8153fc021fc8a76e345a6e336ec2644f089d1 | https://github.com/KelSolaar/Manager/blob/39c8153fc021fc8a76e345a6e336ec2644f089d1/manager/components_manager.py#L1322-L1342 | train | 53,677 |
KelSolaar/Manager | manager/components_manager.py | Manager.get_component_attribute_name | def get_component_attribute_name(component):
"""
Gets given Component attribute name.
Usage::
>>> Manager.get_component_attribute_name("factory.components_manager_ui")
u'factoryComponentsManagerUi'
:param component: Component to get the attribute name.
:type component: unicode
:return: Component attribute name.
:rtype: object
"""
search = re.search(r"(?P<category>\w+)\.(?P<name>\w+)", component)
if search:
name = "{0}{1}{2}".format(
search.group("category"), search.group("name")[0].upper(), search.group("name")[1:])
LOGGER.debug("> Component name: '{0}' to attribute name Active_QLabel: '{1}'.".format(component, name))
else:
name = component
return name | python | def get_component_attribute_name(component):
"""
Gets given Component attribute name.
Usage::
>>> Manager.get_component_attribute_name("factory.components_manager_ui")
u'factoryComponentsManagerUi'
:param component: Component to get the attribute name.
:type component: unicode
:return: Component attribute name.
:rtype: object
"""
search = re.search(r"(?P<category>\w+)\.(?P<name>\w+)", component)
if search:
name = "{0}{1}{2}".format(
search.group("category"), search.group("name")[0].upper(), search.group("name")[1:])
LOGGER.debug("> Component name: '{0}' to attribute name Active_QLabel: '{1}'.".format(component, name))
else:
name = component
return name | [
"def",
"get_component_attribute_name",
"(",
"component",
")",
":",
"search",
"=",
"re",
".",
"search",
"(",
"r\"(?P<category>\\w+)\\.(?P<name>\\w+)\"",
",",
"component",
")",
"if",
"search",
":",
"name",
"=",
"\"{0}{1}{2}\"",
".",
"format",
"(",
"search",
".",
"... | Gets given Component attribute name.
Usage::
>>> Manager.get_component_attribute_name("factory.components_manager_ui")
u'factoryComponentsManagerUi'
:param component: Component to get the attribute name.
:type component: unicode
:return: Component attribute name.
:rtype: object | [
"Gets",
"given",
"Component",
"attribute",
"name",
"."
] | 39c8153fc021fc8a76e345a6e336ec2644f089d1 | https://github.com/KelSolaar/Manager/blob/39c8153fc021fc8a76e345a6e336ec2644f089d1/manager/components_manager.py#L1345-L1367 | train | 53,678 |
eng-tools/sfsimodels | sfsimodels/output.py | output_to_table | def output_to_table(obj, olist='inputs', oformat='latex', table_ends=False, prefix=""):
"""
Compile the properties to a table.
:param olist: list, Names of the parameters to be in the output table
:param oformat: str, The type of table to be output
:param table_ends: bool, Add ends to the table
:param prefix: str, A string to be added to the start of each parameter name
:return: para, str, table as a string
"""
para = ""
property_list = []
if olist == 'inputs':
property_list = obj.inputs
elif olist == 'all':
for item in obj.__dict__:
if "_" != item[0]:
property_list.append(item)
for item in property_list:
if hasattr(obj, item):
value = getattr(obj, item)
value_str = format_value(value)
if oformat == "latex":
delimeter = " & "
else:
delimeter = ","
para += "{0}{1}{2}\\\\\n".format(prefix + format_name(item), delimeter, value_str)
if table_ends:
para = add_table_ends(para, oformat)
return para | python | def output_to_table(obj, olist='inputs', oformat='latex', table_ends=False, prefix=""):
"""
Compile the properties to a table.
:param olist: list, Names of the parameters to be in the output table
:param oformat: str, The type of table to be output
:param table_ends: bool, Add ends to the table
:param prefix: str, A string to be added to the start of each parameter name
:return: para, str, table as a string
"""
para = ""
property_list = []
if olist == 'inputs':
property_list = obj.inputs
elif olist == 'all':
for item in obj.__dict__:
if "_" != item[0]:
property_list.append(item)
for item in property_list:
if hasattr(obj, item):
value = getattr(obj, item)
value_str = format_value(value)
if oformat == "latex":
delimeter = " & "
else:
delimeter = ","
para += "{0}{1}{2}\\\\\n".format(prefix + format_name(item), delimeter, value_str)
if table_ends:
para = add_table_ends(para, oformat)
return para | [
"def",
"output_to_table",
"(",
"obj",
",",
"olist",
"=",
"'inputs'",
",",
"oformat",
"=",
"'latex'",
",",
"table_ends",
"=",
"False",
",",
"prefix",
"=",
"\"\"",
")",
":",
"para",
"=",
"\"\"",
"property_list",
"=",
"[",
"]",
"if",
"olist",
"==",
"'inpu... | Compile the properties to a table.
:param olist: list, Names of the parameters to be in the output table
:param oformat: str, The type of table to be output
:param table_ends: bool, Add ends to the table
:param prefix: str, A string to be added to the start of each parameter name
:return: para, str, table as a string | [
"Compile",
"the",
"properties",
"to",
"a",
"table",
"."
] | 65a690ca440d61307f5a9b8478e4704f203a5925 | https://github.com/eng-tools/sfsimodels/blob/65a690ca440d61307f5a9b8478e4704f203a5925/sfsimodels/output.py#L4-L33 | train | 53,679 |
eng-tools/sfsimodels | sfsimodels/output.py | format_value | def format_value(value, sf=3):
"""
convert a parameter value into a formatted string with certain significant figures
:param value: the value to be formatted
:param sf: number of significant figures
:return: str
"""
if isinstance(value, str):
return value
elif isinstance(value, list) or isinstance(value, np.ndarray):
value = list(value)
for i in range(len(value)):
vv = format_value(value[i])
value[i] = vv
return "[" + ", ".join(value) + "]"
elif value is None:
return "N/A"
else:
fmt_str = "{0:.%ig}" % sf
return fmt_str.format(value) | python | def format_value(value, sf=3):
"""
convert a parameter value into a formatted string with certain significant figures
:param value: the value to be formatted
:param sf: number of significant figures
:return: str
"""
if isinstance(value, str):
return value
elif isinstance(value, list) or isinstance(value, np.ndarray):
value = list(value)
for i in range(len(value)):
vv = format_value(value[i])
value[i] = vv
return "[" + ", ".join(value) + "]"
elif value is None:
return "N/A"
else:
fmt_str = "{0:.%ig}" % sf
return fmt_str.format(value) | [
"def",
"format_value",
"(",
"value",
",",
"sf",
"=",
"3",
")",
":",
"if",
"isinstance",
"(",
"value",
",",
"str",
")",
":",
"return",
"value",
"elif",
"isinstance",
"(",
"value",
",",
"list",
")",
"or",
"isinstance",
"(",
"value",
",",
"np",
".",
"... | convert a parameter value into a formatted string with certain significant figures
:param value: the value to be formatted
:param sf: number of significant figures
:return: str | [
"convert",
"a",
"parameter",
"value",
"into",
"a",
"formatted",
"string",
"with",
"certain",
"significant",
"figures"
] | 65a690ca440d61307f5a9b8478e4704f203a5925 | https://github.com/eng-tools/sfsimodels/blob/65a690ca440d61307f5a9b8478e4704f203a5925/sfsimodels/output.py#L47-L70 | train | 53,680 |
eng-tools/sfsimodels | sfsimodels/output.py | add_table_ends | def add_table_ends(para, oformat='latex', caption="caption-text", label="table"):
"""
Adds the latex table ends
:param para:
:param oformat:
:param caption:
:param label:
:return:
"""
fpara = ""
if oformat == 'latex':
fpara += "\\begin{table}[H]\n"
fpara += "\\centering\n"
fpara += "\\begin{tabular}{cc}\n"
fpara += "\\toprule\n"
fpara += "Parameter & Value \\\\\n"
fpara += "\\midrule\n"
fpara += para
fpara += "\\bottomrule\n"
fpara += "\\end{tabular}\n"
fpara += "\\caption{%s \label{tab:%s}}\n" % (caption, label)
fpara += "\\end{table}\n\n"
return fpara | python | def add_table_ends(para, oformat='latex', caption="caption-text", label="table"):
"""
Adds the latex table ends
:param para:
:param oformat:
:param caption:
:param label:
:return:
"""
fpara = ""
if oformat == 'latex':
fpara += "\\begin{table}[H]\n"
fpara += "\\centering\n"
fpara += "\\begin{tabular}{cc}\n"
fpara += "\\toprule\n"
fpara += "Parameter & Value \\\\\n"
fpara += "\\midrule\n"
fpara += para
fpara += "\\bottomrule\n"
fpara += "\\end{tabular}\n"
fpara += "\\caption{%s \label{tab:%s}}\n" % (caption, label)
fpara += "\\end{table}\n\n"
return fpara | [
"def",
"add_table_ends",
"(",
"para",
",",
"oformat",
"=",
"'latex'",
",",
"caption",
"=",
"\"caption-text\"",
",",
"label",
"=",
"\"table\"",
")",
":",
"fpara",
"=",
"\"\"",
"if",
"oformat",
"==",
"'latex'",
":",
"fpara",
"+=",
"\"\\\\begin{table}[H]\\n\"",
... | Adds the latex table ends
:param para:
:param oformat:
:param caption:
:param label:
:return: | [
"Adds",
"the",
"latex",
"table",
"ends"
] | 65a690ca440d61307f5a9b8478e4704f203a5925 | https://github.com/eng-tools/sfsimodels/blob/65a690ca440d61307f5a9b8478e4704f203a5925/sfsimodels/output.py#L73-L96 | train | 53,681 |
sci-bots/svg-model | svg_model/draw.py | draw_shapes_svg_layer | def draw_shapes_svg_layer(df_shapes, shape_i_columns, layer_name,
layer_number=1, use_svg_path=True):
'''
Draw shapes as a layer in a SVG file.
Args:
df_shapes (pandas.DataFrame): Table of shape vertices (one row per
vertex).
shape_i_columns (str or list) : Either a single column name as a string
or a list of column names in ``df_shapes``. Rows in ``df_shapes``
with the same value in the ``shape_i_columns`` column(s) are
grouped together as a shape.
layer_name (str) : Name of Inkscape layer.
layer_number (int, optional) : Z-order index of Inkscape layer.
use_svg_path (bool, optional) : If ``True``, electrodes are drawn as
``svg:path`` elements. Otherwise, electrodes are drawn as
``svg:polygon`` elements.
Returns
-------
StringIO.StringIO
A file-like object containing SVG XML source.
The XML contains a layer named according to :data:`layer_name`, which
in turn contains ``svg:polygon`` or ``svg:path`` elements corresponding
to the shapes in the input :data:`df_shapes` table.
'''
# Note that `svgwrite.Drawing` requires a filepath to be specified during
# construction, *but* nothing is actually written to the path unless one of
# the `save*` methods is called.
#
# In this function, we do *not* call any of the `save*` methods. Instead,
# we use the `write` method to write to an in-memory file-like object.
minx, miny = df_shapes[['x', 'y']].min().values
maxx, maxy = df_shapes[['x', 'y']].max().values
width = maxx - minx
height = maxy - miny
dwg = svgwrite.Drawing('should_not_exist.svg', size=(width, height),
debug=False)
nsmap = INKSCAPE_NSMAP
dwg.attribs['xmlns:inkscape'] = nsmap['inkscape']
svg_root = dwg.g(id='layer%d' % layer_number,
**{'inkscape:label': layer_name,
'inkscape:groupmode': 'layer'})
minx, miny = df_shapes[['x', 'y']].min().values
for shape_i, df_shape_i in df_shapes.groupby(shape_i_columns):
attr_columns = [c for c in df_shape_i.columns
if c not in ('vertex_i', 'x', 'y')]
attrs = df_shape_i.iloc[0][attr_columns].to_dict()
vertices = df_shape_i[['x', 'y']].values.tolist()
if not use_svg_path:
# Draw electrode shape as an `svg:polygon` element.
p = Polygon(vertices, debug=False, **attrs)
else:
# Draw electrode shape as an `svg:path` element.
commands = ['M %s,%s' % tuple(vertices[0])]
commands += ['L %s,%s' % tuple(v) for v in vertices[1:]]
while vertices[0] == vertices[-1]:
# Start is equal to end of path, but we will use the `'Z'`
# command to close the path, so delete the last point in the
# path.
del vertices[-1]
commands += ['Z']
p = Path_(d=' '.join(commands), debug=False, **attrs)
svg_root.add(p)
dwg.add(svg_root)
# Write result to `StringIO`.
output = StringIO.StringIO()
dwg.write(output)
output.seek(0)
return output | python | def draw_shapes_svg_layer(df_shapes, shape_i_columns, layer_name,
layer_number=1, use_svg_path=True):
'''
Draw shapes as a layer in a SVG file.
Args:
df_shapes (pandas.DataFrame): Table of shape vertices (one row per
vertex).
shape_i_columns (str or list) : Either a single column name as a string
or a list of column names in ``df_shapes``. Rows in ``df_shapes``
with the same value in the ``shape_i_columns`` column(s) are
grouped together as a shape.
layer_name (str) : Name of Inkscape layer.
layer_number (int, optional) : Z-order index of Inkscape layer.
use_svg_path (bool, optional) : If ``True``, electrodes are drawn as
``svg:path`` elements. Otherwise, electrodes are drawn as
``svg:polygon`` elements.
Returns
-------
StringIO.StringIO
A file-like object containing SVG XML source.
The XML contains a layer named according to :data:`layer_name`, which
in turn contains ``svg:polygon`` or ``svg:path`` elements corresponding
to the shapes in the input :data:`df_shapes` table.
'''
# Note that `svgwrite.Drawing` requires a filepath to be specified during
# construction, *but* nothing is actually written to the path unless one of
# the `save*` methods is called.
#
# In this function, we do *not* call any of the `save*` methods. Instead,
# we use the `write` method to write to an in-memory file-like object.
minx, miny = df_shapes[['x', 'y']].min().values
maxx, maxy = df_shapes[['x', 'y']].max().values
width = maxx - minx
height = maxy - miny
dwg = svgwrite.Drawing('should_not_exist.svg', size=(width, height),
debug=False)
nsmap = INKSCAPE_NSMAP
dwg.attribs['xmlns:inkscape'] = nsmap['inkscape']
svg_root = dwg.g(id='layer%d' % layer_number,
**{'inkscape:label': layer_name,
'inkscape:groupmode': 'layer'})
minx, miny = df_shapes[['x', 'y']].min().values
for shape_i, df_shape_i in df_shapes.groupby(shape_i_columns):
attr_columns = [c for c in df_shape_i.columns
if c not in ('vertex_i', 'x', 'y')]
attrs = df_shape_i.iloc[0][attr_columns].to_dict()
vertices = df_shape_i[['x', 'y']].values.tolist()
if not use_svg_path:
# Draw electrode shape as an `svg:polygon` element.
p = Polygon(vertices, debug=False, **attrs)
else:
# Draw electrode shape as an `svg:path` element.
commands = ['M %s,%s' % tuple(vertices[0])]
commands += ['L %s,%s' % tuple(v) for v in vertices[1:]]
while vertices[0] == vertices[-1]:
# Start is equal to end of path, but we will use the `'Z'`
# command to close the path, so delete the last point in the
# path.
del vertices[-1]
commands += ['Z']
p = Path_(d=' '.join(commands), debug=False, **attrs)
svg_root.add(p)
dwg.add(svg_root)
# Write result to `StringIO`.
output = StringIO.StringIO()
dwg.write(output)
output.seek(0)
return output | [
"def",
"draw_shapes_svg_layer",
"(",
"df_shapes",
",",
"shape_i_columns",
",",
"layer_name",
",",
"layer_number",
"=",
"1",
",",
"use_svg_path",
"=",
"True",
")",
":",
"# Note that `svgwrite.Drawing` requires a filepath to be specified during",
"# construction, *but* nothing is... | Draw shapes as a layer in a SVG file.
Args:
df_shapes (pandas.DataFrame): Table of shape vertices (one row per
vertex).
shape_i_columns (str or list) : Either a single column name as a string
or a list of column names in ``df_shapes``. Rows in ``df_shapes``
with the same value in the ``shape_i_columns`` column(s) are
grouped together as a shape.
layer_name (str) : Name of Inkscape layer.
layer_number (int, optional) : Z-order index of Inkscape layer.
use_svg_path (bool, optional) : If ``True``, electrodes are drawn as
``svg:path`` elements. Otherwise, electrodes are drawn as
``svg:polygon`` elements.
Returns
-------
StringIO.StringIO
A file-like object containing SVG XML source.
The XML contains a layer named according to :data:`layer_name`, which
in turn contains ``svg:polygon`` or ``svg:path`` elements corresponding
to the shapes in the input :data:`df_shapes` table. | [
"Draw",
"shapes",
"as",
"a",
"layer",
"in",
"a",
"SVG",
"file",
"."
] | 2d119650f995e62b29ce0b3151a23f3b957cb072 | https://github.com/sci-bots/svg-model/blob/2d119650f995e62b29ce0b3151a23f3b957cb072/svg_model/draw.py#L13-L92 | train | 53,682 |
sci-bots/svg-model | svg_model/draw.py | draw_lines_svg_layer | def draw_lines_svg_layer(df_endpoints, layer_name, layer_number=1):
'''
Draw lines defined by endpoint coordinates as a layer in a SVG file.
Args:
df_endpoints (pandas.DataFrame) : Each row corresponds to the endpoints
of a single line, encoded through the columns: ``x_source``,
``y_source``, ``x_target``, and ``y_target``.
layer_name (str) : Name of Inkscape layer.
layer_number (int, optional) : Z-order index of Inkscape layer.
Returns
-------
StringIO.StringIO
A file-like object containing SVG XML source.
The XML contains a layer named ``"Connections"``, which in turn
contains one line per row in the input :data:`df_endpoints` table.
'''
# Note that `svgwrite.Drawing` requires a filepath to be specified during
# construction, *but* nothing is actually written to the path unless one of
# the `save*` methods is called.
#
# In this function, we do *not* call any of the `save*` methods. Instead,
# we use the `write` method to write to an in-memory file-like object.
dwg = svgwrite.Drawing('should_not_exist.svg', profile='tiny', debug=False)
dwg.attribs['width'] = df_endpoints[['x_source', 'x_target']].values.max()
dwg.attribs['height'] = df_endpoints[['y_source', 'y_target']].values.max()
nsmap = INKSCAPE_NSMAP
dwg.attribs['xmlns:inkscape'] = nsmap['inkscape']
coord_columns = ['x_source', 'y_source', 'x_target', 'y_target']
line_layer = dwg.g(id='layer%d' % layer_number,
**{'inkscape:label': layer_name,
'inkscape:groupmode': 'layer'})
for i, (x1, y1, x2, y2) in df_endpoints[coord_columns].iterrows():
line_i = dwg.line((x1, y1), (x2, y2), id='line%d' % i,
style='stroke:#000000; stroke-width:0.1;')
line_layer.add(line_i)
dwg.add(line_layer)
output = StringIO.StringIO()
dwg.write(output)
# Rewind file.
output.seek(0)
return output | python | def draw_lines_svg_layer(df_endpoints, layer_name, layer_number=1):
'''
Draw lines defined by endpoint coordinates as a layer in a SVG file.
Args:
df_endpoints (pandas.DataFrame) : Each row corresponds to the endpoints
of a single line, encoded through the columns: ``x_source``,
``y_source``, ``x_target``, and ``y_target``.
layer_name (str) : Name of Inkscape layer.
layer_number (int, optional) : Z-order index of Inkscape layer.
Returns
-------
StringIO.StringIO
A file-like object containing SVG XML source.
The XML contains a layer named ``"Connections"``, which in turn
contains one line per row in the input :data:`df_endpoints` table.
'''
# Note that `svgwrite.Drawing` requires a filepath to be specified during
# construction, *but* nothing is actually written to the path unless one of
# the `save*` methods is called.
#
# In this function, we do *not* call any of the `save*` methods. Instead,
# we use the `write` method to write to an in-memory file-like object.
dwg = svgwrite.Drawing('should_not_exist.svg', profile='tiny', debug=False)
dwg.attribs['width'] = df_endpoints[['x_source', 'x_target']].values.max()
dwg.attribs['height'] = df_endpoints[['y_source', 'y_target']].values.max()
nsmap = INKSCAPE_NSMAP
dwg.attribs['xmlns:inkscape'] = nsmap['inkscape']
coord_columns = ['x_source', 'y_source', 'x_target', 'y_target']
line_layer = dwg.g(id='layer%d' % layer_number,
**{'inkscape:label': layer_name,
'inkscape:groupmode': 'layer'})
for i, (x1, y1, x2, y2) in df_endpoints[coord_columns].iterrows():
line_i = dwg.line((x1, y1), (x2, y2), id='line%d' % i,
style='stroke:#000000; stroke-width:0.1;')
line_layer.add(line_i)
dwg.add(line_layer)
output = StringIO.StringIO()
dwg.write(output)
# Rewind file.
output.seek(0)
return output | [
"def",
"draw_lines_svg_layer",
"(",
"df_endpoints",
",",
"layer_name",
",",
"layer_number",
"=",
"1",
")",
":",
"# Note that `svgwrite.Drawing` requires a filepath to be specified during",
"# construction, *but* nothing is actually written to the path unless one of",
"# the `save*` metho... | Draw lines defined by endpoint coordinates as a layer in a SVG file.
Args:
df_endpoints (pandas.DataFrame) : Each row corresponds to the endpoints
of a single line, encoded through the columns: ``x_source``,
``y_source``, ``x_target``, and ``y_target``.
layer_name (str) : Name of Inkscape layer.
layer_number (int, optional) : Z-order index of Inkscape layer.
Returns
-------
StringIO.StringIO
A file-like object containing SVG XML source.
The XML contains a layer named ``"Connections"``, which in turn
contains one line per row in the input :data:`df_endpoints` table. | [
"Draw",
"lines",
"defined",
"by",
"endpoint",
"coordinates",
"as",
"a",
"layer",
"in",
"a",
"SVG",
"file",
"."
] | 2d119650f995e62b29ce0b3151a23f3b957cb072 | https://github.com/sci-bots/svg-model/blob/2d119650f995e62b29ce0b3151a23f3b957cb072/svg_model/draw.py#L95-L146 | train | 53,683 |
Capitains/Nautilus | capitains_nautilus/apis/dts.py | DTSApi.dts_error | def dts_error(self, error_name, message=None):
""" Create a DTS Error reply
:param error_name: Name of the error
:param message: Message of the Error
:return: DTS Error Response with information (JSON)
"""
self.nautilus_extension.logger.info("DTS error thrown {} for {} ({})".format(
error_name, request.path, message
))
j = jsonify({
"error": error_name,
"message": message
})
j.status_code = 404
return j | python | def dts_error(self, error_name, message=None):
""" Create a DTS Error reply
:param error_name: Name of the error
:param message: Message of the Error
:return: DTS Error Response with information (JSON)
"""
self.nautilus_extension.logger.info("DTS error thrown {} for {} ({})".format(
error_name, request.path, message
))
j = jsonify({
"error": error_name,
"message": message
})
j.status_code = 404
return j | [
"def",
"dts_error",
"(",
"self",
",",
"error_name",
",",
"message",
"=",
"None",
")",
":",
"self",
".",
"nautilus_extension",
".",
"logger",
".",
"info",
"(",
"\"DTS error thrown {} for {} ({})\"",
".",
"format",
"(",
"error_name",
",",
"request",
".",
"path",... | Create a DTS Error reply
:param error_name: Name of the error
:param message: Message of the Error
:return: DTS Error Response with information (JSON) | [
"Create",
"a",
"DTS",
"Error",
"reply"
] | 6be453fe0cc0e2c1b89ff06e5af1409165fc1411 | https://github.com/Capitains/Nautilus/blob/6be453fe0cc0e2c1b89ff06e5af1409165fc1411/capitains_nautilus/apis/dts.py#L24-L39 | train | 53,684 |
Capitains/Nautilus | capitains_nautilus/apis/dts.py | DTSApi.r_dts_collection | def r_dts_collection(self, objectId=None):
""" DTS Collection Metadata reply for given objectId
:param objectId: Collection Identifier
:return: JSON Format of DTS Collection
"""
try:
j = self.resolver.getMetadata(objectId=objectId).export(Mimetypes.JSON.DTS.Std)
j = jsonify(j)
j.status_code = 200
except NautilusError as E:
return self.dts_error(error_name=E.__class__.__name__, message=E.__doc__)
return j | python | def r_dts_collection(self, objectId=None):
""" DTS Collection Metadata reply for given objectId
:param objectId: Collection Identifier
:return: JSON Format of DTS Collection
"""
try:
j = self.resolver.getMetadata(objectId=objectId).export(Mimetypes.JSON.DTS.Std)
j = jsonify(j)
j.status_code = 200
except NautilusError as E:
return self.dts_error(error_name=E.__class__.__name__, message=E.__doc__)
return j | [
"def",
"r_dts_collection",
"(",
"self",
",",
"objectId",
"=",
"None",
")",
":",
"try",
":",
"j",
"=",
"self",
".",
"resolver",
".",
"getMetadata",
"(",
"objectId",
"=",
"objectId",
")",
".",
"export",
"(",
"Mimetypes",
".",
"JSON",
".",
"DTS",
".",
"... | DTS Collection Metadata reply for given objectId
:param objectId: Collection Identifier
:return: JSON Format of DTS Collection | [
"DTS",
"Collection",
"Metadata",
"reply",
"for",
"given",
"objectId"
] | 6be453fe0cc0e2c1b89ff06e5af1409165fc1411 | https://github.com/Capitains/Nautilus/blob/6be453fe0cc0e2c1b89ff06e5af1409165fc1411/capitains_nautilus/apis/dts.py#L41-L53 | train | 53,685 |
PolyJIT/benchbuild | benchbuild/utils/db.py | create_run | def create_run(cmd, project, exp, grp):
"""
Create a new 'run' in the database.
This creates a new transaction in the database and creates a new
run in this transaction. Afterwards we return both the transaction as
well as the run itself. The user is responsible for committing it when
the time comes.
Args:
cmd: The command that has been executed.
prj: The project this run belongs to.
exp: The experiment this run belongs to.
grp: The run_group (uuid) we blong to.
Returns:
The inserted tuple representing the run and the session opened with
the new run. Don't forget to commit it at some point.
"""
from benchbuild.utils import schema as s
session = s.Session()
run = s.Run(
command=str(cmd),
project_name=project.name,
project_group=project.group,
experiment_name=exp,
run_group=str(grp),
experiment_group=project.experiment.id)
session.add(run)
session.commit()
return (run, session) | python | def create_run(cmd, project, exp, grp):
"""
Create a new 'run' in the database.
This creates a new transaction in the database and creates a new
run in this transaction. Afterwards we return both the transaction as
well as the run itself. The user is responsible for committing it when
the time comes.
Args:
cmd: The command that has been executed.
prj: The project this run belongs to.
exp: The experiment this run belongs to.
grp: The run_group (uuid) we blong to.
Returns:
The inserted tuple representing the run and the session opened with
the new run. Don't forget to commit it at some point.
"""
from benchbuild.utils import schema as s
session = s.Session()
run = s.Run(
command=str(cmd),
project_name=project.name,
project_group=project.group,
experiment_name=exp,
run_group=str(grp),
experiment_group=project.experiment.id)
session.add(run)
session.commit()
return (run, session) | [
"def",
"create_run",
"(",
"cmd",
",",
"project",
",",
"exp",
",",
"grp",
")",
":",
"from",
"benchbuild",
".",
"utils",
"import",
"schema",
"as",
"s",
"session",
"=",
"s",
".",
"Session",
"(",
")",
"run",
"=",
"s",
".",
"Run",
"(",
"command",
"=",
... | Create a new 'run' in the database.
This creates a new transaction in the database and creates a new
run in this transaction. Afterwards we return both the transaction as
well as the run itself. The user is responsible for committing it when
the time comes.
Args:
cmd: The command that has been executed.
prj: The project this run belongs to.
exp: The experiment this run belongs to.
grp: The run_group (uuid) we blong to.
Returns:
The inserted tuple representing the run and the session opened with
the new run. Don't forget to commit it at some point. | [
"Create",
"a",
"new",
"run",
"in",
"the",
"database",
"."
] | 9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58 | https://github.com/PolyJIT/benchbuild/blob/9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58/benchbuild/utils/db.py#L23-L55 | train | 53,686 |
PolyJIT/benchbuild | benchbuild/utils/db.py | create_run_group | def create_run_group(prj):
"""
Create a new 'run_group' in the database.
This creates a new transaction in the database and creates a new run_group
within this transaction. Afterwards we return both the transaction as well
as the run_group itself. The user is responsible for committing it when the
time comes.
Args:
prj - The project for which we open the run_group.
Returns:
A tuple (group, session) containing both the newly created run_group and
the transaction object.
"""
from benchbuild.utils import schema as s
session = s.Session()
experiment = prj.experiment
group = s.RunGroup(id=prj.run_uuid, experiment=experiment.id)
session.add(group)
session.commit()
return (group, session) | python | def create_run_group(prj):
"""
Create a new 'run_group' in the database.
This creates a new transaction in the database and creates a new run_group
within this transaction. Afterwards we return both the transaction as well
as the run_group itself. The user is responsible for committing it when the
time comes.
Args:
prj - The project for which we open the run_group.
Returns:
A tuple (group, session) containing both the newly created run_group and
the transaction object.
"""
from benchbuild.utils import schema as s
session = s.Session()
experiment = prj.experiment
group = s.RunGroup(id=prj.run_uuid, experiment=experiment.id)
session.add(group)
session.commit()
return (group, session) | [
"def",
"create_run_group",
"(",
"prj",
")",
":",
"from",
"benchbuild",
".",
"utils",
"import",
"schema",
"as",
"s",
"session",
"=",
"s",
".",
"Session",
"(",
")",
"experiment",
"=",
"prj",
".",
"experiment",
"group",
"=",
"s",
".",
"RunGroup",
"(",
"id... | Create a new 'run_group' in the database.
This creates a new transaction in the database and creates a new run_group
within this transaction. Afterwards we return both the transaction as well
as the run_group itself. The user is responsible for committing it when the
time comes.
Args:
prj - The project for which we open the run_group.
Returns:
A tuple (group, session) containing both the newly created run_group and
the transaction object. | [
"Create",
"a",
"new",
"run_group",
"in",
"the",
"database",
"."
] | 9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58 | https://github.com/PolyJIT/benchbuild/blob/9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58/benchbuild/utils/db.py#L58-L82 | train | 53,687 |
PolyJIT/benchbuild | benchbuild/utils/db.py | persist_project | def persist_project(project):
"""
Persist this project in the benchbuild database.
Args:
project: The project we want to persist.
"""
from benchbuild.utils.schema import Project, Session
session = Session()
projects = session.query(Project) \
.filter(Project.name == project.name) \
.filter(Project.group_name == project.group)
name = project.name
desc = project.__doc__
domain = project.domain
group_name = project.group
version = project.version() \
if callable(project.version) else project.version
try:
src_url = project.src_uri
except AttributeError:
src_url = 'unknown'
if projects.count() == 0:
newp = Project()
newp.name = name
newp.description = desc
newp.src_url = src_url
newp.domain = domain
newp.group_name = group_name
newp.version = version
session.add(newp)
else:
newp_value = {
"name": name,
"description": desc,
"src_url": src_url,
"domain": domain,
"group_name": group_name,
"version": version
}
projects.update(newp_value)
session.commit()
return (projects, session) | python | def persist_project(project):
"""
Persist this project in the benchbuild database.
Args:
project: The project we want to persist.
"""
from benchbuild.utils.schema import Project, Session
session = Session()
projects = session.query(Project) \
.filter(Project.name == project.name) \
.filter(Project.group_name == project.group)
name = project.name
desc = project.__doc__
domain = project.domain
group_name = project.group
version = project.version() \
if callable(project.version) else project.version
try:
src_url = project.src_uri
except AttributeError:
src_url = 'unknown'
if projects.count() == 0:
newp = Project()
newp.name = name
newp.description = desc
newp.src_url = src_url
newp.domain = domain
newp.group_name = group_name
newp.version = version
session.add(newp)
else:
newp_value = {
"name": name,
"description": desc,
"src_url": src_url,
"domain": domain,
"group_name": group_name,
"version": version
}
projects.update(newp_value)
session.commit()
return (projects, session) | [
"def",
"persist_project",
"(",
"project",
")",
":",
"from",
"benchbuild",
".",
"utils",
".",
"schema",
"import",
"Project",
",",
"Session",
"session",
"=",
"Session",
"(",
")",
"projects",
"=",
"session",
".",
"query",
"(",
"Project",
")",
".",
"filter",
... | Persist this project in the benchbuild database.
Args:
project: The project we want to persist. | [
"Persist",
"this",
"project",
"in",
"the",
"benchbuild",
"database",
"."
] | 9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58 | https://github.com/PolyJIT/benchbuild/blob/9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58/benchbuild/utils/db.py#L85-L130 | train | 53,688 |
PolyJIT/benchbuild | benchbuild/utils/db.py | persist_experiment | def persist_experiment(experiment):
"""
Persist this experiment in the benchbuild database.
Args:
experiment: The experiment we want to persist.
"""
from benchbuild.utils.schema import Experiment, Session
session = Session()
cfg_exp = experiment.id
LOG.debug("Using experiment ID stored in config: %s", cfg_exp)
exps = session.query(Experiment).filter(Experiment.id == cfg_exp)
desc = str(CFG["experiment_description"])
name = experiment.name
if exps.count() == 0:
newe = Experiment()
newe.id = cfg_exp
newe.name = name
newe.description = desc
session.add(newe)
ret = newe
else:
exps.update({'name': name, 'description': desc})
ret = exps.first()
try:
session.commit()
except IntegrityError:
session.rollback()
persist_experiment(experiment)
return (ret, session) | python | def persist_experiment(experiment):
"""
Persist this experiment in the benchbuild database.
Args:
experiment: The experiment we want to persist.
"""
from benchbuild.utils.schema import Experiment, Session
session = Session()
cfg_exp = experiment.id
LOG.debug("Using experiment ID stored in config: %s", cfg_exp)
exps = session.query(Experiment).filter(Experiment.id == cfg_exp)
desc = str(CFG["experiment_description"])
name = experiment.name
if exps.count() == 0:
newe = Experiment()
newe.id = cfg_exp
newe.name = name
newe.description = desc
session.add(newe)
ret = newe
else:
exps.update({'name': name, 'description': desc})
ret = exps.first()
try:
session.commit()
except IntegrityError:
session.rollback()
persist_experiment(experiment)
return (ret, session) | [
"def",
"persist_experiment",
"(",
"experiment",
")",
":",
"from",
"benchbuild",
".",
"utils",
".",
"schema",
"import",
"Experiment",
",",
"Session",
"session",
"=",
"Session",
"(",
")",
"cfg_exp",
"=",
"experiment",
".",
"id",
"LOG",
".",
"debug",
"(",
"\"... | Persist this experiment in the benchbuild database.
Args:
experiment: The experiment we want to persist. | [
"Persist",
"this",
"experiment",
"in",
"the",
"benchbuild",
"database",
"."
] | 9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58 | https://github.com/PolyJIT/benchbuild/blob/9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58/benchbuild/utils/db.py#L133-L167 | train | 53,689 |
PolyJIT/benchbuild | benchbuild/utils/db.py | persist_perf | def persist_perf(run, session, svg_path):
"""
Persist the flamegraph in the database.
The flamegraph exists as a SVG image on disk until we persist it in the
database.
Args:
run: The run we attach these perf measurements to.
session: The db transaction we belong to.
svg_path: The path to the SVG file we want to store.
"""
from benchbuild.utils import schema as s
with open(svg_path, 'r') as svg_file:
svg_data = svg_file.read()
session.add(
s.Metadata(name="perf.flamegraph", value=svg_data, run_id=run.id)) | python | def persist_perf(run, session, svg_path):
"""
Persist the flamegraph in the database.
The flamegraph exists as a SVG image on disk until we persist it in the
database.
Args:
run: The run we attach these perf measurements to.
session: The db transaction we belong to.
svg_path: The path to the SVG file we want to store.
"""
from benchbuild.utils import schema as s
with open(svg_path, 'r') as svg_file:
svg_data = svg_file.read()
session.add(
s.Metadata(name="perf.flamegraph", value=svg_data, run_id=run.id)) | [
"def",
"persist_perf",
"(",
"run",
",",
"session",
",",
"svg_path",
")",
":",
"from",
"benchbuild",
".",
"utils",
"import",
"schema",
"as",
"s",
"with",
"open",
"(",
"svg_path",
",",
"'r'",
")",
"as",
"svg_file",
":",
"svg_data",
"=",
"svg_file",
".",
... | Persist the flamegraph in the database.
The flamegraph exists as a SVG image on disk until we persist it in the
database.
Args:
run: The run we attach these perf measurements to.
session: The db transaction we belong to.
svg_path: The path to the SVG file we want to store. | [
"Persist",
"the",
"flamegraph",
"in",
"the",
"database",
"."
] | 9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58 | https://github.com/PolyJIT/benchbuild/blob/9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58/benchbuild/utils/db.py#L191-L208 | train | 53,690 |
PolyJIT/benchbuild | benchbuild/utils/db.py | persist_config | def persist_config(run, session, cfg):
"""
Persist the configuration in as key-value pairs.
Args:
run: The run we attach the config to.
session: The db transaction we belong to.
cfg: The configuration we want to persist.
"""
from benchbuild.utils import schema as s
for cfg_elem in cfg:
session.add(
s.Config(name=cfg_elem, value=cfg[cfg_elem], run_id=run.id)) | python | def persist_config(run, session, cfg):
"""
Persist the configuration in as key-value pairs.
Args:
run: The run we attach the config to.
session: The db transaction we belong to.
cfg: The configuration we want to persist.
"""
from benchbuild.utils import schema as s
for cfg_elem in cfg:
session.add(
s.Config(name=cfg_elem, value=cfg[cfg_elem], run_id=run.id)) | [
"def",
"persist_config",
"(",
"run",
",",
"session",
",",
"cfg",
")",
":",
"from",
"benchbuild",
".",
"utils",
"import",
"schema",
"as",
"s",
"for",
"cfg_elem",
"in",
"cfg",
":",
"session",
".",
"add",
"(",
"s",
".",
"Config",
"(",
"name",
"=",
"cfg_... | Persist the configuration in as key-value pairs.
Args:
run: The run we attach the config to.
session: The db transaction we belong to.
cfg: The configuration we want to persist. | [
"Persist",
"the",
"configuration",
"in",
"as",
"key",
"-",
"value",
"pairs",
"."
] | 9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58 | https://github.com/PolyJIT/benchbuild/blob/9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58/benchbuild/utils/db.py#L225-L238 | train | 53,691 |
BlueBrain/hpcbench | hpcbench/benchmark/ior.py | IOR.apis | def apis(self):
"""List of API to test"""
value = self.attributes['apis']
if isinstance(value, six.string_types):
value = shlex.split(value)
return value | python | def apis(self):
"""List of API to test"""
value = self.attributes['apis']
if isinstance(value, six.string_types):
value = shlex.split(value)
return value | [
"def",
"apis",
"(",
"self",
")",
":",
"value",
"=",
"self",
".",
"attributes",
"[",
"'apis'",
"]",
"if",
"isinstance",
"(",
"value",
",",
"six",
".",
"string_types",
")",
":",
"value",
"=",
"shlex",
".",
"split",
"(",
"value",
")",
"return",
"value"
... | List of API to test | [
"List",
"of",
"API",
"to",
"test"
] | 192d0ec142b897157ec25f131d1ef28f84752592 | https://github.com/BlueBrain/hpcbench/blob/192d0ec142b897157ec25f131d1ef28f84752592/hpcbench/benchmark/ior.py#L220-L225 | train | 53,692 |
BlueBrain/hpcbench | hpcbench/benchmark/ior.py | IOR.pre_execute | def pre_execute(self, execution, context):
"""Make sure the named directory is created if possible"""
path = self._fspath
if path:
path = path.format(
benchmark=context.benchmark,
api=execution['category'],
**execution.get('metas', {})
)
if self.clean_path:
shutil.rmtree(path, ignore_errors=True)
if execution['metas']['file_mode'] == 'onefile':
path = osp.dirname(path)
if not osp.exists(path):
os.makedirs(path) | python | def pre_execute(self, execution, context):
"""Make sure the named directory is created if possible"""
path = self._fspath
if path:
path = path.format(
benchmark=context.benchmark,
api=execution['category'],
**execution.get('metas', {})
)
if self.clean_path:
shutil.rmtree(path, ignore_errors=True)
if execution['metas']['file_mode'] == 'onefile':
path = osp.dirname(path)
if not osp.exists(path):
os.makedirs(path) | [
"def",
"pre_execute",
"(",
"self",
",",
"execution",
",",
"context",
")",
":",
"path",
"=",
"self",
".",
"_fspath",
"if",
"path",
":",
"path",
"=",
"path",
".",
"format",
"(",
"benchmark",
"=",
"context",
".",
"benchmark",
",",
"api",
"=",
"execution",... | Make sure the named directory is created if possible | [
"Make",
"sure",
"the",
"named",
"directory",
"is",
"created",
"if",
"possible"
] | 192d0ec142b897157ec25f131d1ef28f84752592 | https://github.com/BlueBrain/hpcbench/blob/192d0ec142b897157ec25f131d1ef28f84752592/hpcbench/benchmark/ior.py#L227-L241 | train | 53,693 |
BlueBrain/hpcbench | hpcbench/benchmark/ior.py | IOR.file_mode | def file_mode(self):
"""onefile, fpp, or both"""
fms = self.attributes['file_mode']
eax = set()
if isinstance(fms, six.string_types):
fms = shlex.split(fms)
for fm in fms:
if fm == 'both':
eax.add('fpp')
eax.add('onefile')
elif fm in ['fpp', 'onefile']:
eax.add(fm)
else:
raise Exception('Invalid IOR file mode: ' + fm)
return eax | python | def file_mode(self):
"""onefile, fpp, or both"""
fms = self.attributes['file_mode']
eax = set()
if isinstance(fms, six.string_types):
fms = shlex.split(fms)
for fm in fms:
if fm == 'both':
eax.add('fpp')
eax.add('onefile')
elif fm in ['fpp', 'onefile']:
eax.add(fm)
else:
raise Exception('Invalid IOR file mode: ' + fm)
return eax | [
"def",
"file_mode",
"(",
"self",
")",
":",
"fms",
"=",
"self",
".",
"attributes",
"[",
"'file_mode'",
"]",
"eax",
"=",
"set",
"(",
")",
"if",
"isinstance",
"(",
"fms",
",",
"six",
".",
"string_types",
")",
":",
"fms",
"=",
"shlex",
".",
"split",
"(... | onefile, fpp, or both | [
"onefile",
"fpp",
"or",
"both"
] | 192d0ec142b897157ec25f131d1ef28f84752592 | https://github.com/BlueBrain/hpcbench/blob/192d0ec142b897157ec25f131d1ef28f84752592/hpcbench/benchmark/ior.py#L313-L327 | train | 53,694 |
OzymandiasTheGreat/python-libinput | libinput/event.py | Event.device | def device(self):
"""The device associated with this event.
For device added/removed events this is the device added or removed.
For all other device events, this is the device that generated the
event.
Returns:
~libinput.define.Device: Device object.
"""
hdevice = self._libinput.libinput_event_get_device(self._hevent)
return Device(hdevice, self._libinput) | python | def device(self):
"""The device associated with this event.
For device added/removed events this is the device added or removed.
For all other device events, this is the device that generated the
event.
Returns:
~libinput.define.Device: Device object.
"""
hdevice = self._libinput.libinput_event_get_device(self._hevent)
return Device(hdevice, self._libinput) | [
"def",
"device",
"(",
"self",
")",
":",
"hdevice",
"=",
"self",
".",
"_libinput",
".",
"libinput_event_get_device",
"(",
"self",
".",
"_hevent",
")",
"return",
"Device",
"(",
"hdevice",
",",
"self",
".",
"_libinput",
")"
] | The device associated with this event.
For device added/removed events this is the device added or removed.
For all other device events, this is the device that generated the
event.
Returns:
~libinput.define.Device: Device object. | [
"The",
"device",
"associated",
"with",
"this",
"event",
"."
] | 1f477ee9f1d56b284b20e0317ea8967c64ef1218 | https://github.com/OzymandiasTheGreat/python-libinput/blob/1f477ee9f1d56b284b20e0317ea8967c64ef1218/libinput/event.py#L53-L65 | train | 53,695 |
OzymandiasTheGreat/python-libinput | libinput/event.py | PointerEvent.absolute_coords | def absolute_coords(self):
"""The current absolute coordinates of the pointer event,
in mm from the top left corner of the device.
To get the corresponding output screen coordinate, use
:meth:`transform_absolute_coords`.
For pointer events that are not of type
:attr:`~libinput.constant.EventType.POINTER_MOTION_ABSOLUTE`,
this property raises :exc:`AttributeError`.
Returns:
(float, float): The current absolute coordinates.
Raises:
AttributeError
"""
if self.type != EventType.POINTER_MOTION_ABSOLUTE:
raise AttributeError(_wrong_prop.format(self.type))
abs_x = self._libinput.libinput_event_pointer_get_absolute_x(
self._handle)
abs_y = self._libinput.libinput_event_pointer_get_absolute_y(
self._handle)
return abs_x, abs_y | python | def absolute_coords(self):
"""The current absolute coordinates of the pointer event,
in mm from the top left corner of the device.
To get the corresponding output screen coordinate, use
:meth:`transform_absolute_coords`.
For pointer events that are not of type
:attr:`~libinput.constant.EventType.POINTER_MOTION_ABSOLUTE`,
this property raises :exc:`AttributeError`.
Returns:
(float, float): The current absolute coordinates.
Raises:
AttributeError
"""
if self.type != EventType.POINTER_MOTION_ABSOLUTE:
raise AttributeError(_wrong_prop.format(self.type))
abs_x = self._libinput.libinput_event_pointer_get_absolute_x(
self._handle)
abs_y = self._libinput.libinput_event_pointer_get_absolute_y(
self._handle)
return abs_x, abs_y | [
"def",
"absolute_coords",
"(",
"self",
")",
":",
"if",
"self",
".",
"type",
"!=",
"EventType",
".",
"POINTER_MOTION_ABSOLUTE",
":",
"raise",
"AttributeError",
"(",
"_wrong_prop",
".",
"format",
"(",
"self",
".",
"type",
")",
")",
"abs_x",
"=",
"self",
".",... | The current absolute coordinates of the pointer event,
in mm from the top left corner of the device.
To get the corresponding output screen coordinate, use
:meth:`transform_absolute_coords`.
For pointer events that are not of type
:attr:`~libinput.constant.EventType.POINTER_MOTION_ABSOLUTE`,
this property raises :exc:`AttributeError`.
Returns:
(float, float): The current absolute coordinates.
Raises:
AttributeError | [
"The",
"current",
"absolute",
"coordinates",
"of",
"the",
"pointer",
"event",
"in",
"mm",
"from",
"the",
"top",
"left",
"corner",
"of",
"the",
"device",
"."
] | 1f477ee9f1d56b284b20e0317ea8967c64ef1218 | https://github.com/OzymandiasTheGreat/python-libinput/blob/1f477ee9f1d56b284b20e0317ea8967c64ef1218/libinput/event.py#L216-L239 | train | 53,696 |
OzymandiasTheGreat/python-libinput | libinput/event.py | PointerEvent.transform_absolute_coords | def transform_absolute_coords(self, width, height):
"""Return the current absolute coordinates of the pointer event,
transformed to screen coordinates.
For pointer events that are not of type
:attr:`~libinput.constant.EventType.POINTER_MOTION_ABSOLUTE`,
this method raises :exc:`AttributeError`.
Args:
width (int): The current output screen width.
height (int): The current output screen height.
Returns:
(float, float): The current absolute (x, y) coordinates transformed
to a screen coordinates.
Raises:
AttributeError
"""
if self.type != EventType.POINTER_MOTION_ABSOLUTE:
raise AttributeError(_wrong_meth.format(self.type))
abs_x = self._libinput \
.libinput_event_pointer_get_absolute_x_transformed(
self._handle, width)
abs_y = self._libinput \
.libinput_event_pointer_get_absolute_y_transformed(
self._handle, height)
return abs_x, abs_y | python | def transform_absolute_coords(self, width, height):
"""Return the current absolute coordinates of the pointer event,
transformed to screen coordinates.
For pointer events that are not of type
:attr:`~libinput.constant.EventType.POINTER_MOTION_ABSOLUTE`,
this method raises :exc:`AttributeError`.
Args:
width (int): The current output screen width.
height (int): The current output screen height.
Returns:
(float, float): The current absolute (x, y) coordinates transformed
to a screen coordinates.
Raises:
AttributeError
"""
if self.type != EventType.POINTER_MOTION_ABSOLUTE:
raise AttributeError(_wrong_meth.format(self.type))
abs_x = self._libinput \
.libinput_event_pointer_get_absolute_x_transformed(
self._handle, width)
abs_y = self._libinput \
.libinput_event_pointer_get_absolute_y_transformed(
self._handle, height)
return abs_x, abs_y | [
"def",
"transform_absolute_coords",
"(",
"self",
",",
"width",
",",
"height",
")",
":",
"if",
"self",
".",
"type",
"!=",
"EventType",
".",
"POINTER_MOTION_ABSOLUTE",
":",
"raise",
"AttributeError",
"(",
"_wrong_meth",
".",
"format",
"(",
"self",
".",
"type",
... | Return the current absolute coordinates of the pointer event,
transformed to screen coordinates.
For pointer events that are not of type
:attr:`~libinput.constant.EventType.POINTER_MOTION_ABSOLUTE`,
this method raises :exc:`AttributeError`.
Args:
width (int): The current output screen width.
height (int): The current output screen height.
Returns:
(float, float): The current absolute (x, y) coordinates transformed
to a screen coordinates.
Raises:
AttributeError | [
"Return",
"the",
"current",
"absolute",
"coordinates",
"of",
"the",
"pointer",
"event",
"transformed",
"to",
"screen",
"coordinates",
"."
] | 1f477ee9f1d56b284b20e0317ea8967c64ef1218 | https://github.com/OzymandiasTheGreat/python-libinput/blob/1f477ee9f1d56b284b20e0317ea8967c64ef1218/libinput/event.py#L241-L267 | train | 53,697 |
OzymandiasTheGreat/python-libinput | libinput/event.py | PointerEvent.button_state | def button_state(self):
"""The button state that triggered this event.
For pointer events that are not of type
:attr:`~libinput.constant.EventType.POINTER_BUTTON`, this property
raises :exc:`AttributeError`.
Returns:
~libinput.constant.ButtonState: The button state triggering this
event.
Raises:
AttributeError
"""
if self.type != EventType.POINTER_BUTTON:
raise AttributeError(_wrong_prop.format(self.type))
return self._libinput.libinput_event_pointer_get_button_state(
self._handle) | python | def button_state(self):
"""The button state that triggered this event.
For pointer events that are not of type
:attr:`~libinput.constant.EventType.POINTER_BUTTON`, this property
raises :exc:`AttributeError`.
Returns:
~libinput.constant.ButtonState: The button state triggering this
event.
Raises:
AttributeError
"""
if self.type != EventType.POINTER_BUTTON:
raise AttributeError(_wrong_prop.format(self.type))
return self._libinput.libinput_event_pointer_get_button_state(
self._handle) | [
"def",
"button_state",
"(",
"self",
")",
":",
"if",
"self",
".",
"type",
"!=",
"EventType",
".",
"POINTER_BUTTON",
":",
"raise",
"AttributeError",
"(",
"_wrong_prop",
".",
"format",
"(",
"self",
".",
"type",
")",
")",
"return",
"self",
".",
"_libinput",
... | The button state that triggered this event.
For pointer events that are not of type
:attr:`~libinput.constant.EventType.POINTER_BUTTON`, this property
raises :exc:`AttributeError`.
Returns:
~libinput.constant.ButtonState: The button state triggering this
event.
Raises:
AttributeError | [
"The",
"button",
"state",
"that",
"triggered",
"this",
"event",
"."
] | 1f477ee9f1d56b284b20e0317ea8967c64ef1218 | https://github.com/OzymandiasTheGreat/python-libinput/blob/1f477ee9f1d56b284b20e0317ea8967c64ef1218/libinput/event.py#L288-L305 | train | 53,698 |
OzymandiasTheGreat/python-libinput | libinput/event.py | PointerEvent.seat_button_count | def seat_button_count(self):
"""The total number of buttons pressed on all devices on the
associated seat after the event was triggered.
For pointer events that are not of type
:attr:`~libinput.constant.EventType.POINTER_BUTTON`, this property
raises :exc:`AssertionError`.
Returns:
int: The seat wide pressed button count for the key of this event.
Raises:
AssertionError
"""
if self.type != EventType.POINTER_BUTTON:
raise AttributeError(_wrong_prop.format(self.type))
return self._libinput.libinput_event_pointer_get_seat_button_count(
self._handle) | python | def seat_button_count(self):
"""The total number of buttons pressed on all devices on the
associated seat after the event was triggered.
For pointer events that are not of type
:attr:`~libinput.constant.EventType.POINTER_BUTTON`, this property
raises :exc:`AssertionError`.
Returns:
int: The seat wide pressed button count for the key of this event.
Raises:
AssertionError
"""
if self.type != EventType.POINTER_BUTTON:
raise AttributeError(_wrong_prop.format(self.type))
return self._libinput.libinput_event_pointer_get_seat_button_count(
self._handle) | [
"def",
"seat_button_count",
"(",
"self",
")",
":",
"if",
"self",
".",
"type",
"!=",
"EventType",
".",
"POINTER_BUTTON",
":",
"raise",
"AttributeError",
"(",
"_wrong_prop",
".",
"format",
"(",
"self",
".",
"type",
")",
")",
"return",
"self",
".",
"_libinput... | The total number of buttons pressed on all devices on the
associated seat after the event was triggered.
For pointer events that are not of type
:attr:`~libinput.constant.EventType.POINTER_BUTTON`, this property
raises :exc:`AssertionError`.
Returns:
int: The seat wide pressed button count for the key of this event.
Raises:
AssertionError | [
"The",
"total",
"number",
"of",
"buttons",
"pressed",
"on",
"all",
"devices",
"on",
"the",
"associated",
"seat",
"after",
"the",
"event",
"was",
"triggered",
"."
] | 1f477ee9f1d56b284b20e0317ea8967c64ef1218 | https://github.com/OzymandiasTheGreat/python-libinput/blob/1f477ee9f1d56b284b20e0317ea8967c64ef1218/libinput/event.py#L308-L325 | train | 53,699 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.