repo
stringlengths 7
54
| path
stringlengths 4
192
| url
stringlengths 87
284
| code
stringlengths 78
104k
| code_tokens
list | docstring
stringlengths 1
46.9k
| docstring_tokens
list | language
stringclasses 1
value | partition
stringclasses 3
values |
|---|---|---|---|---|---|---|---|---|
StackStorm/pybind
|
pybind/slxos/v17s_1_02/isis_state/interface_brief/isis_intf_brief/__init__.py
|
https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/slxos/v17s_1_02/isis_state/interface_brief/isis_intf_brief/__init__.py#L147-L170
|
def _set_circuit_type(self, v, load=False):
"""
Setter method for circuit_type, mapped from YANG variable /isis_state/interface_brief/isis_intf_brief/circuit_type (isis-circ-type)
If this variable is read-only (config: false) in the
source YANG file, then _set_circuit_type is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_circuit_type() directly.
YANG Description: Type of ISIS Circuit
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'is-circ-lan': {'value': 2}, u'is-circ-ptpt': {'value': 1}, u'is-circ-unknown': {'value': 0}},), is_leaf=True, yang_name="circuit-type", rest_name="circuit-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='isis-circ-type', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """circuit_type must be of a type compatible with isis-circ-type""",
'defined-type': "brocade-isis-operational:isis-circ-type",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'is-circ-lan': {'value': 2}, u'is-circ-ptpt': {'value': 1}, u'is-circ-unknown': {'value': 0}},), is_leaf=True, yang_name="circuit-type", rest_name="circuit-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='isis-circ-type', is_config=False)""",
})
self.__circuit_type = t
if hasattr(self, '_set'):
self._set()
|
[
"def",
"_set_circuit_type",
"(",
"self",
",",
"v",
",",
"load",
"=",
"False",
")",
":",
"if",
"hasattr",
"(",
"v",
",",
"\"_utype\"",
")",
":",
"v",
"=",
"v",
".",
"_utype",
"(",
"v",
")",
"try",
":",
"t",
"=",
"YANGDynClass",
"(",
"v",
",",
"base",
"=",
"RestrictedClassType",
"(",
"base_type",
"=",
"unicode",
",",
"restriction_type",
"=",
"\"dict_key\"",
",",
"restriction_arg",
"=",
"{",
"u'is-circ-lan'",
":",
"{",
"'value'",
":",
"2",
"}",
",",
"u'is-circ-ptpt'",
":",
"{",
"'value'",
":",
"1",
"}",
",",
"u'is-circ-unknown'",
":",
"{",
"'value'",
":",
"0",
"}",
"}",
",",
")",
",",
"is_leaf",
"=",
"True",
",",
"yang_name",
"=",
"\"circuit-type\"",
",",
"rest_name",
"=",
"\"circuit-type\"",
",",
"parent",
"=",
"self",
",",
"path_helper",
"=",
"self",
".",
"_path_helper",
",",
"extmethods",
"=",
"self",
".",
"_extmethods",
",",
"register_paths",
"=",
"True",
",",
"namespace",
"=",
"'urn:brocade.com:mgmt:brocade-isis-operational'",
",",
"defining_module",
"=",
"'brocade-isis-operational'",
",",
"yang_type",
"=",
"'isis-circ-type'",
",",
"is_config",
"=",
"False",
")",
"except",
"(",
"TypeError",
",",
"ValueError",
")",
":",
"raise",
"ValueError",
"(",
"{",
"'error-string'",
":",
"\"\"\"circuit_type must be of a type compatible with isis-circ-type\"\"\"",
",",
"'defined-type'",
":",
"\"brocade-isis-operational:isis-circ-type\"",
",",
"'generated-type'",
":",
"\"\"\"YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type=\"dict_key\", restriction_arg={u'is-circ-lan': {'value': 2}, u'is-circ-ptpt': {'value': 1}, u'is-circ-unknown': {'value': 0}},), is_leaf=True, yang_name=\"circuit-type\", rest_name=\"circuit-type\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='isis-circ-type', is_config=False)\"\"\"",
",",
"}",
")",
"self",
".",
"__circuit_type",
"=",
"t",
"if",
"hasattr",
"(",
"self",
",",
"'_set'",
")",
":",
"self",
".",
"_set",
"(",
")"
] |
Setter method for circuit_type, mapped from YANG variable /isis_state/interface_brief/isis_intf_brief/circuit_type (isis-circ-type)
If this variable is read-only (config: false) in the
source YANG file, then _set_circuit_type is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_circuit_type() directly.
YANG Description: Type of ISIS Circuit
|
[
"Setter",
"method",
"for",
"circuit_type",
"mapped",
"from",
"YANG",
"variable",
"/",
"isis_state",
"/",
"interface_brief",
"/",
"isis_intf_brief",
"/",
"circuit_type",
"(",
"isis",
"-",
"circ",
"-",
"type",
")",
"If",
"this",
"variable",
"is",
"read",
"-",
"only",
"(",
"config",
":",
"false",
")",
"in",
"the",
"source",
"YANG",
"file",
"then",
"_set_circuit_type",
"is",
"considered",
"as",
"a",
"private",
"method",
".",
"Backends",
"looking",
"to",
"populate",
"this",
"variable",
"should",
"do",
"so",
"via",
"calling",
"thisObj",
".",
"_set_circuit_type",
"()",
"directly",
"."
] |
python
|
train
|
nalourie/django-macros
|
build/lib/macros/templatetags/macros.py
|
https://github.com/nalourie/django-macros/blob/94b836f5d4073bc0752491c36dfb03e79098ab27/build/lib/macros/templatetags/macros.py#L267-L279
|
def do_usemacro(parser, token):
""" The function taking a parsed template tag
and returning a UseMacroNode.
"""
tag_name, macro_name, args, kwargs = parse_macro_params(token)
try:
macro = parser._macros[macro_name]
except (AttributeError, KeyError):
raise template.TemplateSyntaxError(
"Macro '{0}' is not defined previously to the {1} tag".format(
macro_name, tag_name))
macro.parser = parser
return UseMacroNode(macro, args, kwargs)
|
[
"def",
"do_usemacro",
"(",
"parser",
",",
"token",
")",
":",
"tag_name",
",",
"macro_name",
",",
"args",
",",
"kwargs",
"=",
"parse_macro_params",
"(",
"token",
")",
"try",
":",
"macro",
"=",
"parser",
".",
"_macros",
"[",
"macro_name",
"]",
"except",
"(",
"AttributeError",
",",
"KeyError",
")",
":",
"raise",
"template",
".",
"TemplateSyntaxError",
"(",
"\"Macro '{0}' is not defined previously to the {1} tag\"",
".",
"format",
"(",
"macro_name",
",",
"tag_name",
")",
")",
"macro",
".",
"parser",
"=",
"parser",
"return",
"UseMacroNode",
"(",
"macro",
",",
"args",
",",
"kwargs",
")"
] |
The function taking a parsed template tag
and returning a UseMacroNode.
|
[
"The",
"function",
"taking",
"a",
"parsed",
"template",
"tag",
"and",
"returning",
"a",
"UseMacroNode",
"."
] |
python
|
train
|
contains-io/rcli
|
rcli/dispatcher.py
|
https://github.com/contains-io/rcli/blob/cdd6191a0e0a19bc767f84921650835d099349cf/rcli/dispatcher.py#L126-L144
|
def _get_parsed_args(command_name, doc, argv):
# type: (str, str, typing.List[str]) -> typing.Dict[str, typing.Any]
"""Parse the docstring with docopt.
Args:
command_name: The name of the subcommand to parse.
doc: A docopt-parseable string.
argv: The list of arguments to pass to docopt during parsing.
Returns:
The docopt results dictionary. If the subcommand has the same name as
the primary command, the subcommand value will be added to the
dictionary.
"""
_LOGGER.debug('Parsing docstring: """%s""" with arguments %s.', doc, argv)
args = docopt(doc, argv=argv)
if command_name == settings.command:
args[command_name] = True
return args
|
[
"def",
"_get_parsed_args",
"(",
"command_name",
",",
"doc",
",",
"argv",
")",
":",
"# type: (str, str, typing.List[str]) -> typing.Dict[str, typing.Any]",
"_LOGGER",
".",
"debug",
"(",
"'Parsing docstring: \"\"\"%s\"\"\" with arguments %s.'",
",",
"doc",
",",
"argv",
")",
"args",
"=",
"docopt",
"(",
"doc",
",",
"argv",
"=",
"argv",
")",
"if",
"command_name",
"==",
"settings",
".",
"command",
":",
"args",
"[",
"command_name",
"]",
"=",
"True",
"return",
"args"
] |
Parse the docstring with docopt.
Args:
command_name: The name of the subcommand to parse.
doc: A docopt-parseable string.
argv: The list of arguments to pass to docopt during parsing.
Returns:
The docopt results dictionary. If the subcommand has the same name as
the primary command, the subcommand value will be added to the
dictionary.
|
[
"Parse",
"the",
"docstring",
"with",
"docopt",
"."
] |
python
|
train
|
bmuller/kademlia
|
kademlia/network.py
|
https://github.com/bmuller/kademlia/blob/4a8d445c9ee8f3ca10f56107e4445daed4933c8a/kademlia/network.py#L102-L113
|
def bootstrappable_neighbors(self):
"""
Get a :class:`list` of (ip, port) :class:`tuple` pairs suitable for
use as an argument to the bootstrap method.
The server should have been bootstrapped
already - this is just a utility for getting some neighbors and then
storing them if this server is going down for a while. When it comes
back up, the list of nodes can be used to bootstrap.
"""
neighbors = self.protocol.router.find_neighbors(self.node)
return [tuple(n)[-2:] for n in neighbors]
|
[
"def",
"bootstrappable_neighbors",
"(",
"self",
")",
":",
"neighbors",
"=",
"self",
".",
"protocol",
".",
"router",
".",
"find_neighbors",
"(",
"self",
".",
"node",
")",
"return",
"[",
"tuple",
"(",
"n",
")",
"[",
"-",
"2",
":",
"]",
"for",
"n",
"in",
"neighbors",
"]"
] |
Get a :class:`list` of (ip, port) :class:`tuple` pairs suitable for
use as an argument to the bootstrap method.
The server should have been bootstrapped
already - this is just a utility for getting some neighbors and then
storing them if this server is going down for a while. When it comes
back up, the list of nodes can be used to bootstrap.
|
[
"Get",
"a",
":",
"class",
":",
"list",
"of",
"(",
"ip",
"port",
")",
":",
"class",
":",
"tuple",
"pairs",
"suitable",
"for",
"use",
"as",
"an",
"argument",
"to",
"the",
"bootstrap",
"method",
"."
] |
python
|
train
|
LEMS/pylems
|
lems/model/model.py
|
https://github.com/LEMS/pylems/blob/4eeb719d2f23650fe16c38626663b69b5c83818b/lems/model/model.py#L328-L347
|
def resolve(self):
"""
Resolves references in this model.
"""
model = self.copy()
for ct in model.component_types:
model.resolve_component_type(ct)
for c in model.components:
if c.id not in model.fat_components:
model.add(model.fatten_component(c))
for c in ct.constants:
c2 = c.copy()
c2.numeric_value = model.get_numeric_value(c2.value, c2.dimension)
model.add(c2)
return model
|
[
"def",
"resolve",
"(",
"self",
")",
":",
"model",
"=",
"self",
".",
"copy",
"(",
")",
"for",
"ct",
"in",
"model",
".",
"component_types",
":",
"model",
".",
"resolve_component_type",
"(",
"ct",
")",
"for",
"c",
"in",
"model",
".",
"components",
":",
"if",
"c",
".",
"id",
"not",
"in",
"model",
".",
"fat_components",
":",
"model",
".",
"add",
"(",
"model",
".",
"fatten_component",
"(",
"c",
")",
")",
"for",
"c",
"in",
"ct",
".",
"constants",
":",
"c2",
"=",
"c",
".",
"copy",
"(",
")",
"c2",
".",
"numeric_value",
"=",
"model",
".",
"get_numeric_value",
"(",
"c2",
".",
"value",
",",
"c2",
".",
"dimension",
")",
"model",
".",
"add",
"(",
"c2",
")",
"return",
"model"
] |
Resolves references in this model.
|
[
"Resolves",
"references",
"in",
"this",
"model",
"."
] |
python
|
train
|
michaeljoseph/changes
|
changes/version.py
|
https://github.com/michaeljoseph/changes/blob/a8beb409671c58cdf28ee913bad0a5c7d5374ade/changes/version.py#L59-L67
|
def increment_version(context):
"""Increments the __version__ attribute of your module's __init__."""
attributes.replace_attribute(
context.module_name, '__version__', context.new_version, dry_run=context.dry_run
)
log.info(
'Bumped version from %s to %s' % (context.current_version, context.new_version)
)
|
[
"def",
"increment_version",
"(",
"context",
")",
":",
"attributes",
".",
"replace_attribute",
"(",
"context",
".",
"module_name",
",",
"'__version__'",
",",
"context",
".",
"new_version",
",",
"dry_run",
"=",
"context",
".",
"dry_run",
")",
"log",
".",
"info",
"(",
"'Bumped version from %s to %s'",
"%",
"(",
"context",
".",
"current_version",
",",
"context",
".",
"new_version",
")",
")"
] |
Increments the __version__ attribute of your module's __init__.
|
[
"Increments",
"the",
"__version__",
"attribute",
"of",
"your",
"module",
"s",
"__init__",
"."
] |
python
|
train
|
bharadwaj-raju/libdesktop
|
libdesktop/system.py
|
https://github.com/bharadwaj-raju/libdesktop/blob/4d6b815755c76660b6ef4d2db6f54beff38c0db7/libdesktop/system.py#L56-L174
|
def get_name():
'''Get desktop environment or OS.
Get the OS name or desktop environment.
**List of Possible Values**
+-------------------------+---------------+
| Windows | windows |
+-------------------------+---------------+
| Mac OS X | mac |
+-------------------------+---------------+
| GNOME 3+ | gnome |
+-------------------------+---------------+
| GNOME 2 | gnome2 |
+-------------------------+---------------+
| XFCE | xfce4 |
+-------------------------+---------------+
| KDE | kde |
+-------------------------+---------------+
| Unity | unity |
+-------------------------+---------------+
| LXDE | lxde |
+-------------------------+---------------+
| i3wm | i3 |
+-------------------------+---------------+
| \*box | \*box |
+-------------------------+---------------+
| Trinity (KDE 3 fork) | trinity |
+-------------------------+---------------+
| MATE | mate |
+-------------------------+---------------+
| IceWM | icewm |
+-------------------------+---------------+
| Pantheon (elementaryOS) | pantheon |
+-------------------------+---------------+
| LXQt | lxqt |
+-------------------------+---------------+
| Awesome WM | awesome |
+-------------------------+---------------+
| Enlightenment | enlightenment |
+-------------------------+---------------+
| AfterStep | afterstep |
+-------------------------+---------------+
| WindowMaker | windowmaker |
+-------------------------+---------------+
| [Other] | unknown |
+-------------------------+---------------+
Returns:
str: The name of the desktop environment or OS.
'''
if sys.platform in ['win32', 'cygwin']:
return 'windows'
elif sys.platform == 'darwin':
return 'mac'
else:
desktop_session = os.environ.get(
'XDG_CURRENT_DESKTOP') or os.environ.get('DESKTOP_SESSION')
if desktop_session is not None:
desktop_session = desktop_session.lower()
# Fix for X-Cinnamon etc
if desktop_session.startswith('x-'):
desktop_session = desktop_session.replace('x-', '')
if desktop_session in ['gnome', 'unity', 'cinnamon', 'mate',
'xfce4', 'lxde', 'fluxbox',
'blackbox', 'openbox', 'icewm', 'jwm',
'afterstep', 'trinity', 'kde', 'pantheon',
'i3', 'lxqt', 'awesome', 'enlightenment']:
return desktop_session
#-- Special cases --#
# Canonical sets environment var to Lubuntu rather than
# LXDE if using LXDE.
# There is no guarantee that they will not do the same
# with the other desktop environments.
elif 'xfce' in desktop_session:
return 'xfce4'
elif desktop_session.startswith('ubuntu'):
return 'unity'
elif desktop_session.startswith('xubuntu'):
return 'xfce4'
elif desktop_session.startswith('lubuntu'):
return 'lxde'
elif desktop_session.startswith('kubuntu'):
return 'kde'
elif desktop_session.startswith('razor'):
return 'razor-qt'
elif desktop_session.startswith('wmaker'):
return 'windowmaker'
if os.environ.get('KDE_FULL_SESSION') == 'true':
return 'kde'
elif os.environ.get('GNOME_DESKTOP_SESSION_ID'):
if not 'deprecated' in os.environ.get('GNOME_DESKTOP_SESSION_ID'):
return 'gnome2'
elif is_running('xfce-mcs-manage'):
return 'xfce4'
elif is_running('ksmserver'):
return 'kde'
return 'unknown'
|
[
"def",
"get_name",
"(",
")",
":",
"if",
"sys",
".",
"platform",
"in",
"[",
"'win32'",
",",
"'cygwin'",
"]",
":",
"return",
"'windows'",
"elif",
"sys",
".",
"platform",
"==",
"'darwin'",
":",
"return",
"'mac'",
"else",
":",
"desktop_session",
"=",
"os",
".",
"environ",
".",
"get",
"(",
"'XDG_CURRENT_DESKTOP'",
")",
"or",
"os",
".",
"environ",
".",
"get",
"(",
"'DESKTOP_SESSION'",
")",
"if",
"desktop_session",
"is",
"not",
"None",
":",
"desktop_session",
"=",
"desktop_session",
".",
"lower",
"(",
")",
"# Fix for X-Cinnamon etc",
"if",
"desktop_session",
".",
"startswith",
"(",
"'x-'",
")",
":",
"desktop_session",
"=",
"desktop_session",
".",
"replace",
"(",
"'x-'",
",",
"''",
")",
"if",
"desktop_session",
"in",
"[",
"'gnome'",
",",
"'unity'",
",",
"'cinnamon'",
",",
"'mate'",
",",
"'xfce4'",
",",
"'lxde'",
",",
"'fluxbox'",
",",
"'blackbox'",
",",
"'openbox'",
",",
"'icewm'",
",",
"'jwm'",
",",
"'afterstep'",
",",
"'trinity'",
",",
"'kde'",
",",
"'pantheon'",
",",
"'i3'",
",",
"'lxqt'",
",",
"'awesome'",
",",
"'enlightenment'",
"]",
":",
"return",
"desktop_session",
"#-- Special cases --#",
"# Canonical sets environment var to Lubuntu rather than",
"# LXDE if using LXDE.",
"# There is no guarantee that they will not do the same",
"# with the other desktop environments.",
"elif",
"'xfce'",
"in",
"desktop_session",
":",
"return",
"'xfce4'",
"elif",
"desktop_session",
".",
"startswith",
"(",
"'ubuntu'",
")",
":",
"return",
"'unity'",
"elif",
"desktop_session",
".",
"startswith",
"(",
"'xubuntu'",
")",
":",
"return",
"'xfce4'",
"elif",
"desktop_session",
".",
"startswith",
"(",
"'lubuntu'",
")",
":",
"return",
"'lxde'",
"elif",
"desktop_session",
".",
"startswith",
"(",
"'kubuntu'",
")",
":",
"return",
"'kde'",
"elif",
"desktop_session",
".",
"startswith",
"(",
"'razor'",
")",
":",
"return",
"'razor-qt'",
"elif",
"desktop_session",
".",
"startswith",
"(",
"'wmaker'",
")",
":",
"return",
"'windowmaker'",
"if",
"os",
".",
"environ",
".",
"get",
"(",
"'KDE_FULL_SESSION'",
")",
"==",
"'true'",
":",
"return",
"'kde'",
"elif",
"os",
".",
"environ",
".",
"get",
"(",
"'GNOME_DESKTOP_SESSION_ID'",
")",
":",
"if",
"not",
"'deprecated'",
"in",
"os",
".",
"environ",
".",
"get",
"(",
"'GNOME_DESKTOP_SESSION_ID'",
")",
":",
"return",
"'gnome2'",
"elif",
"is_running",
"(",
"'xfce-mcs-manage'",
")",
":",
"return",
"'xfce4'",
"elif",
"is_running",
"(",
"'ksmserver'",
")",
":",
"return",
"'kde'",
"return",
"'unknown'"
] |
Get desktop environment or OS.
Get the OS name or desktop environment.
**List of Possible Values**
+-------------------------+---------------+
| Windows | windows |
+-------------------------+---------------+
| Mac OS X | mac |
+-------------------------+---------------+
| GNOME 3+ | gnome |
+-------------------------+---------------+
| GNOME 2 | gnome2 |
+-------------------------+---------------+
| XFCE | xfce4 |
+-------------------------+---------------+
| KDE | kde |
+-------------------------+---------------+
| Unity | unity |
+-------------------------+---------------+
| LXDE | lxde |
+-------------------------+---------------+
| i3wm | i3 |
+-------------------------+---------------+
| \*box | \*box |
+-------------------------+---------------+
| Trinity (KDE 3 fork) | trinity |
+-------------------------+---------------+
| MATE | mate |
+-------------------------+---------------+
| IceWM | icewm |
+-------------------------+---------------+
| Pantheon (elementaryOS) | pantheon |
+-------------------------+---------------+
| LXQt | lxqt |
+-------------------------+---------------+
| Awesome WM | awesome |
+-------------------------+---------------+
| Enlightenment | enlightenment |
+-------------------------+---------------+
| AfterStep | afterstep |
+-------------------------+---------------+
| WindowMaker | windowmaker |
+-------------------------+---------------+
| [Other] | unknown |
+-------------------------+---------------+
Returns:
str: The name of the desktop environment or OS.
|
[
"Get",
"desktop",
"environment",
"or",
"OS",
"."
] |
python
|
train
|
pybel/pybel-tools
|
src/pybel_tools/filters/node_deletion.py
|
https://github.com/pybel/pybel-tools/blob/3491adea0ac4ee60f57275ef72f9b73da6dbfe0c/src/pybel_tools/filters/node_deletion.py#L54-L60
|
def remove_nodes_by_function_namespace(graph: BELGraph, func: str, namespace: Strings) -> None:
"""Remove nodes with the given function and namespace.
This might be useful to exclude information learned about distant species, such as excluding all information
from MGI and RGD in diseases where mice and rats don't give much insight to the human disease mechanism.
"""
remove_filtered_nodes(graph, function_namespace_inclusion_builder(func, namespace))
|
[
"def",
"remove_nodes_by_function_namespace",
"(",
"graph",
":",
"BELGraph",
",",
"func",
":",
"str",
",",
"namespace",
":",
"Strings",
")",
"->",
"None",
":",
"remove_filtered_nodes",
"(",
"graph",
",",
"function_namespace_inclusion_builder",
"(",
"func",
",",
"namespace",
")",
")"
] |
Remove nodes with the given function and namespace.
This might be useful to exclude information learned about distant species, such as excluding all information
from MGI and RGD in diseases where mice and rats don't give much insight to the human disease mechanism.
|
[
"Remove",
"nodes",
"with",
"the",
"given",
"function",
"and",
"namespace",
"."
] |
python
|
valid
|
vtkiorg/vtki
|
vtki/renderer.py
|
https://github.com/vtkiorg/vtki/blob/5ccad7ae6d64a03e9594c9c7474c8aab3eb22dd1/vtki/renderer.py#L774-L791
|
def _remove_mapper_from_plotter(plotter, actor, reset_camera):
"""removes this actor's mapper from the given plotter's _scalar_bar_mappers"""
try:
mapper = actor.GetMapper()
except AttributeError:
return
for name in list(plotter._scalar_bar_mappers.keys()):
try:
plotter._scalar_bar_mappers[name].remove(mapper)
except ValueError:
pass
if len(plotter._scalar_bar_mappers[name]) < 1:
slot = plotter._scalar_bar_slot_lookup.pop(name)
plotter._scalar_bar_mappers.pop(name)
plotter._scalar_bar_ranges.pop(name)
plotter.remove_actor(plotter._scalar_bar_actors.pop(name), reset_camera=reset_camera)
plotter._scalar_bar_slots.add(slot)
return
|
[
"def",
"_remove_mapper_from_plotter",
"(",
"plotter",
",",
"actor",
",",
"reset_camera",
")",
":",
"try",
":",
"mapper",
"=",
"actor",
".",
"GetMapper",
"(",
")",
"except",
"AttributeError",
":",
"return",
"for",
"name",
"in",
"list",
"(",
"plotter",
".",
"_scalar_bar_mappers",
".",
"keys",
"(",
")",
")",
":",
"try",
":",
"plotter",
".",
"_scalar_bar_mappers",
"[",
"name",
"]",
".",
"remove",
"(",
"mapper",
")",
"except",
"ValueError",
":",
"pass",
"if",
"len",
"(",
"plotter",
".",
"_scalar_bar_mappers",
"[",
"name",
"]",
")",
"<",
"1",
":",
"slot",
"=",
"plotter",
".",
"_scalar_bar_slot_lookup",
".",
"pop",
"(",
"name",
")",
"plotter",
".",
"_scalar_bar_mappers",
".",
"pop",
"(",
"name",
")",
"plotter",
".",
"_scalar_bar_ranges",
".",
"pop",
"(",
"name",
")",
"plotter",
".",
"remove_actor",
"(",
"plotter",
".",
"_scalar_bar_actors",
".",
"pop",
"(",
"name",
")",
",",
"reset_camera",
"=",
"reset_camera",
")",
"plotter",
".",
"_scalar_bar_slots",
".",
"add",
"(",
"slot",
")",
"return"
] |
removes this actor's mapper from the given plotter's _scalar_bar_mappers
|
[
"removes",
"this",
"actor",
"s",
"mapper",
"from",
"the",
"given",
"plotter",
"s",
"_scalar_bar_mappers"
] |
python
|
train
|
allenai/allennlp
|
allennlp/semparse/contexts/table_question_context.py
|
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/semparse/contexts/table_question_context.py#L342-L357
|
def _string_in_table(self, candidate: str) -> List[str]:
"""
Checks if the string occurs in the table, and if it does, returns the names of the columns
under which it occurs. If it does not, returns an empty list.
"""
candidate_column_names: List[str] = []
# First check if the entire candidate occurs as a cell.
if candidate in self._string_column_mapping:
candidate_column_names = self._string_column_mapping[candidate]
# If not, check if it is a substring pf any cell value.
if not candidate_column_names:
for cell_value, column_names in self._string_column_mapping.items():
if candidate in cell_value:
candidate_column_names.extend(column_names)
candidate_column_names = list(set(candidate_column_names))
return candidate_column_names
|
[
"def",
"_string_in_table",
"(",
"self",
",",
"candidate",
":",
"str",
")",
"->",
"List",
"[",
"str",
"]",
":",
"candidate_column_names",
":",
"List",
"[",
"str",
"]",
"=",
"[",
"]",
"# First check if the entire candidate occurs as a cell.",
"if",
"candidate",
"in",
"self",
".",
"_string_column_mapping",
":",
"candidate_column_names",
"=",
"self",
".",
"_string_column_mapping",
"[",
"candidate",
"]",
"# If not, check if it is a substring pf any cell value.",
"if",
"not",
"candidate_column_names",
":",
"for",
"cell_value",
",",
"column_names",
"in",
"self",
".",
"_string_column_mapping",
".",
"items",
"(",
")",
":",
"if",
"candidate",
"in",
"cell_value",
":",
"candidate_column_names",
".",
"extend",
"(",
"column_names",
")",
"candidate_column_names",
"=",
"list",
"(",
"set",
"(",
"candidate_column_names",
")",
")",
"return",
"candidate_column_names"
] |
Checks if the string occurs in the table, and if it does, returns the names of the columns
under which it occurs. If it does not, returns an empty list.
|
[
"Checks",
"if",
"the",
"string",
"occurs",
"in",
"the",
"table",
"and",
"if",
"it",
"does",
"returns",
"the",
"names",
"of",
"the",
"columns",
"under",
"which",
"it",
"occurs",
".",
"If",
"it",
"does",
"not",
"returns",
"an",
"empty",
"list",
"."
] |
python
|
train
|
tradenity/python-sdk
|
tradenity/resources/discount_coupon.py
|
https://github.com/tradenity/python-sdk/blob/d13fbe23f4d6ff22554c6d8d2deaf209371adaf1/tradenity/resources/discount_coupon.py#L634-L654
|
def get_discount_coupon_by_id(cls, discount_coupon_id, **kwargs):
"""Find DiscountCoupon
Return single instance of DiscountCoupon by its ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_discount_coupon_by_id(discount_coupon_id, async=True)
>>> result = thread.get()
:param async bool
:param str discount_coupon_id: ID of discountCoupon to return (required)
:return: DiscountCoupon
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._get_discount_coupon_by_id_with_http_info(discount_coupon_id, **kwargs)
else:
(data) = cls._get_discount_coupon_by_id_with_http_info(discount_coupon_id, **kwargs)
return data
|
[
"def",
"get_discount_coupon_by_id",
"(",
"cls",
",",
"discount_coupon_id",
",",
"*",
"*",
"kwargs",
")",
":",
"kwargs",
"[",
"'_return_http_data_only'",
"]",
"=",
"True",
"if",
"kwargs",
".",
"get",
"(",
"'async'",
")",
":",
"return",
"cls",
".",
"_get_discount_coupon_by_id_with_http_info",
"(",
"discount_coupon_id",
",",
"*",
"*",
"kwargs",
")",
"else",
":",
"(",
"data",
")",
"=",
"cls",
".",
"_get_discount_coupon_by_id_with_http_info",
"(",
"discount_coupon_id",
",",
"*",
"*",
"kwargs",
")",
"return",
"data"
] |
Find DiscountCoupon
Return single instance of DiscountCoupon by its ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_discount_coupon_by_id(discount_coupon_id, async=True)
>>> result = thread.get()
:param async bool
:param str discount_coupon_id: ID of discountCoupon to return (required)
:return: DiscountCoupon
If the method is called asynchronously,
returns the request thread.
|
[
"Find",
"DiscountCoupon"
] |
python
|
train
|
titusjan/argos
|
argos/collect/collector.py
|
https://github.com/titusjan/argos/blob/20d0a3cae26c36ea789a5d219c02ca7df21279dd/argos/collect/collector.py#L159-L167
|
def _setColumnCountForContents(self):
""" Sets the column count given the current axes and selected RTI.
Returns the newly set column count.
"""
numRtiDims = self.rti.nDims if self.rti and self.rti.isSliceable else 0
colCount = self.COL_FIRST_COMBO + max(numRtiDims, len(self.axisNames))
self.tree.model().setColumnCount(colCount)
return colCount
|
[
"def",
"_setColumnCountForContents",
"(",
"self",
")",
":",
"numRtiDims",
"=",
"self",
".",
"rti",
".",
"nDims",
"if",
"self",
".",
"rti",
"and",
"self",
".",
"rti",
".",
"isSliceable",
"else",
"0",
"colCount",
"=",
"self",
".",
"COL_FIRST_COMBO",
"+",
"max",
"(",
"numRtiDims",
",",
"len",
"(",
"self",
".",
"axisNames",
")",
")",
"self",
".",
"tree",
".",
"model",
"(",
")",
".",
"setColumnCount",
"(",
"colCount",
")",
"return",
"colCount"
] |
Sets the column count given the current axes and selected RTI.
Returns the newly set column count.
|
[
"Sets",
"the",
"column",
"count",
"given",
"the",
"current",
"axes",
"and",
"selected",
"RTI",
".",
"Returns",
"the",
"newly",
"set",
"column",
"count",
"."
] |
python
|
train
|
dnephin/PyStaticConfiguration
|
staticconf/config.py
|
https://github.com/dnephin/PyStaticConfiguration/blob/229733270bc0dc0d9690ba850dbfb470e535c212/staticconf/config.py#L276-L286
|
def has_duplicate_keys(config_data, base_conf, raise_error):
"""Compare two dictionaries for duplicate keys. if raise_error is True
then raise on exception, otherwise log return True."""
duplicate_keys = set(base_conf) & set(config_data)
if not duplicate_keys:
return
msg = "Duplicate keys in config: %s" % duplicate_keys
if raise_error:
raise errors.ConfigurationError(msg)
log.info(msg)
return True
|
[
"def",
"has_duplicate_keys",
"(",
"config_data",
",",
"base_conf",
",",
"raise_error",
")",
":",
"duplicate_keys",
"=",
"set",
"(",
"base_conf",
")",
"&",
"set",
"(",
"config_data",
")",
"if",
"not",
"duplicate_keys",
":",
"return",
"msg",
"=",
"\"Duplicate keys in config: %s\"",
"%",
"duplicate_keys",
"if",
"raise_error",
":",
"raise",
"errors",
".",
"ConfigurationError",
"(",
"msg",
")",
"log",
".",
"info",
"(",
"msg",
")",
"return",
"True"
] |
Compare two dictionaries for duplicate keys. if raise_error is True
then raise on exception, otherwise log return True.
|
[
"Compare",
"two",
"dictionaries",
"for",
"duplicate",
"keys",
".",
"if",
"raise_error",
"is",
"True",
"then",
"raise",
"on",
"exception",
"otherwise",
"log",
"return",
"True",
"."
] |
python
|
train
|
materialsproject/pymatgen
|
pymatgen/io/vasp/inputs.py
|
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/io/vasp/inputs.py#L1022-L1037
|
def monkhorst_automatic(kpts=(2, 2, 2), shift=(0, 0, 0)):
"""
Convenient static constructor for an automatic Monkhorst pack Kpoint
grid.
Args:
kpts: Subdivisions N_1, N_2 and N_3 along reciprocal lattice
vectors. Defaults to (2,2,2)
shift: Shift to be applied to the kpoints. Defaults to (0,0,0).
Returns:
Kpoints object
"""
return Kpoints("Automatic kpoint scheme", 0,
Kpoints.supported_modes.Monkhorst, kpts=[kpts],
kpts_shift=shift)
|
[
"def",
"monkhorst_automatic",
"(",
"kpts",
"=",
"(",
"2",
",",
"2",
",",
"2",
")",
",",
"shift",
"=",
"(",
"0",
",",
"0",
",",
"0",
")",
")",
":",
"return",
"Kpoints",
"(",
"\"Automatic kpoint scheme\"",
",",
"0",
",",
"Kpoints",
".",
"supported_modes",
".",
"Monkhorst",
",",
"kpts",
"=",
"[",
"kpts",
"]",
",",
"kpts_shift",
"=",
"shift",
")"
] |
Convenient static constructor for an automatic Monkhorst pack Kpoint
grid.
Args:
kpts: Subdivisions N_1, N_2 and N_3 along reciprocal lattice
vectors. Defaults to (2,2,2)
shift: Shift to be applied to the kpoints. Defaults to (0,0,0).
Returns:
Kpoints object
|
[
"Convenient",
"static",
"constructor",
"for",
"an",
"automatic",
"Monkhorst",
"pack",
"Kpoint",
"grid",
"."
] |
python
|
train
|
pypa/pipenv
|
pipenv/vendor/attr/_make.py
|
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/attr/_make.py#L711-L727
|
def _add_method_dunders(self, method):
"""
Add __module__ and __qualname__ to a *method* if possible.
"""
try:
method.__module__ = self._cls.__module__
except AttributeError:
pass
try:
method.__qualname__ = ".".join(
(self._cls.__qualname__, method.__name__)
)
except AttributeError:
pass
return method
|
[
"def",
"_add_method_dunders",
"(",
"self",
",",
"method",
")",
":",
"try",
":",
"method",
".",
"__module__",
"=",
"self",
".",
"_cls",
".",
"__module__",
"except",
"AttributeError",
":",
"pass",
"try",
":",
"method",
".",
"__qualname__",
"=",
"\".\"",
".",
"join",
"(",
"(",
"self",
".",
"_cls",
".",
"__qualname__",
",",
"method",
".",
"__name__",
")",
")",
"except",
"AttributeError",
":",
"pass",
"return",
"method"
] |
Add __module__ and __qualname__ to a *method* if possible.
|
[
"Add",
"__module__",
"and",
"__qualname__",
"to",
"a",
"*",
"method",
"*",
"if",
"possible",
"."
] |
python
|
train
|
bitesofcode/projexui
|
projexui/widgets/xnodewidget/xnode.py
|
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xnodewidget/xnode.py#L1718-L1731
|
def setDisabledPenColor(self, color):
"""
Sets the pen color to be used when drawing this node as disabled.
:param color | <QColor>
"""
color = QColor(color)
if self._palette is None:
self._palette = XNodePalette(self._scenePalette)
self._palette.setColor(self._palette.Disabled,
self._palette.NodeForeground,
color)
self.setDirty()
|
[
"def",
"setDisabledPenColor",
"(",
"self",
",",
"color",
")",
":",
"color",
"=",
"QColor",
"(",
"color",
")",
"if",
"self",
".",
"_palette",
"is",
"None",
":",
"self",
".",
"_palette",
"=",
"XNodePalette",
"(",
"self",
".",
"_scenePalette",
")",
"self",
".",
"_palette",
".",
"setColor",
"(",
"self",
".",
"_palette",
".",
"Disabled",
",",
"self",
".",
"_palette",
".",
"NodeForeground",
",",
"color",
")",
"self",
".",
"setDirty",
"(",
")"
] |
Sets the pen color to be used when drawing this node as disabled.
:param color | <QColor>
|
[
"Sets",
"the",
"pen",
"color",
"to",
"be",
"used",
"when",
"drawing",
"this",
"node",
"as",
"disabled",
".",
":",
"param",
"color",
"|",
"<QColor",
">"
] |
python
|
train
|
vilmibm/done
|
parsedatetime/parsedatetime.py
|
https://github.com/vilmibm/done/blob/7e5b60d2900ceddefa49de352a19b794199b51a8/parsedatetime/parsedatetime.py#L872-L943
|
def _evalModifier2(self, modifier, chunk1 , chunk2, sourceTime):
"""
Evaluate the C{modifier} string and following text (passed in
as C{chunk1} and C{chunk2}) and if they match any known modifiers
calculate the delta and apply it to C{sourceTime}.
@type modifier: string
@param modifier: modifier text to apply to C{sourceTime}
@type chunk1: string
@param chunk1: first text chunk that followed modifier (if any)
@type chunk2: string
@param chunk2: second text chunk that followed modifier (if any)
@type sourceTime: struct_time
@param sourceTime: C{struct_time} value to use as the base
@rtype: tuple
@return: tuple of: remaining text and the modified sourceTime
"""
offset = self.ptc.Modifiers[modifier]
digit = r'\d+'
self.modifier2Flag = False
# If the string after the negative modifier starts with digits,
# then it is likely that the string is similar to ' before 3 days'
# or 'evening prior to 3 days'.
# In this case, the total time is calculated by subtracting '3 days'
# from the current date.
# So, we have to identify the quantity and negate it before parsing
# the string.
# This is not required for strings not starting with digits since the
# string is enough to calculate the sourceTime
if chunk2 != '':
if offset < 0:
m = re.match(digit, chunk2.strip())
if m is not None:
qty = int(m.group()) * -1
chunk2 = chunk2[m.end():]
chunk2 = '%d%s' % (qty, chunk2)
sourceTime, flag1 = self.parse(chunk2, sourceTime)
if flag1 == 0:
flag1 = True
else:
flag1 = False
flag2 = False
else:
flag1 = False
if chunk1 != '':
if offset < 0:
m = re.search(digit, chunk1.strip())
if m is not None:
qty = int(m.group()) * -1
chunk1 = chunk1[m.end():]
chunk1 = '%d%s' % (qty, chunk1)
tempDateFlag = self.dateFlag
tempTimeFlag = self.timeFlag
sourceTime2, flag2 = self.parse(chunk1, sourceTime)
else:
return sourceTime, (flag1 and flag2)
# if chunk1 is not a datetime and chunk2 is then do not use datetime
# value returned by parsing chunk1
if not (flag1 == False and flag2 == 0):
sourceTime = sourceTime2
else:
self.timeFlag = tempTimeFlag
self.dateFlag = tempDateFlag
return sourceTime, (flag1 and flag2)
|
[
"def",
"_evalModifier2",
"(",
"self",
",",
"modifier",
",",
"chunk1",
",",
"chunk2",
",",
"sourceTime",
")",
":",
"offset",
"=",
"self",
".",
"ptc",
".",
"Modifiers",
"[",
"modifier",
"]",
"digit",
"=",
"r'\\d+'",
"self",
".",
"modifier2Flag",
"=",
"False",
"# If the string after the negative modifier starts with digits,\r",
"# then it is likely that the string is similar to ' before 3 days'\r",
"# or 'evening prior to 3 days'.\r",
"# In this case, the total time is calculated by subtracting '3 days'\r",
"# from the current date.\r",
"# So, we have to identify the quantity and negate it before parsing\r",
"# the string.\r",
"# This is not required for strings not starting with digits since the\r",
"# string is enough to calculate the sourceTime\r",
"if",
"chunk2",
"!=",
"''",
":",
"if",
"offset",
"<",
"0",
":",
"m",
"=",
"re",
".",
"match",
"(",
"digit",
",",
"chunk2",
".",
"strip",
"(",
")",
")",
"if",
"m",
"is",
"not",
"None",
":",
"qty",
"=",
"int",
"(",
"m",
".",
"group",
"(",
")",
")",
"*",
"-",
"1",
"chunk2",
"=",
"chunk2",
"[",
"m",
".",
"end",
"(",
")",
":",
"]",
"chunk2",
"=",
"'%d%s'",
"%",
"(",
"qty",
",",
"chunk2",
")",
"sourceTime",
",",
"flag1",
"=",
"self",
".",
"parse",
"(",
"chunk2",
",",
"sourceTime",
")",
"if",
"flag1",
"==",
"0",
":",
"flag1",
"=",
"True",
"else",
":",
"flag1",
"=",
"False",
"flag2",
"=",
"False",
"else",
":",
"flag1",
"=",
"False",
"if",
"chunk1",
"!=",
"''",
":",
"if",
"offset",
"<",
"0",
":",
"m",
"=",
"re",
".",
"search",
"(",
"digit",
",",
"chunk1",
".",
"strip",
"(",
")",
")",
"if",
"m",
"is",
"not",
"None",
":",
"qty",
"=",
"int",
"(",
"m",
".",
"group",
"(",
")",
")",
"*",
"-",
"1",
"chunk1",
"=",
"chunk1",
"[",
"m",
".",
"end",
"(",
")",
":",
"]",
"chunk1",
"=",
"'%d%s'",
"%",
"(",
"qty",
",",
"chunk1",
")",
"tempDateFlag",
"=",
"self",
".",
"dateFlag",
"tempTimeFlag",
"=",
"self",
".",
"timeFlag",
"sourceTime2",
",",
"flag2",
"=",
"self",
".",
"parse",
"(",
"chunk1",
",",
"sourceTime",
")",
"else",
":",
"return",
"sourceTime",
",",
"(",
"flag1",
"and",
"flag2",
")",
"# if chunk1 is not a datetime and chunk2 is then do not use datetime\r",
"# value returned by parsing chunk1\r",
"if",
"not",
"(",
"flag1",
"==",
"False",
"and",
"flag2",
"==",
"0",
")",
":",
"sourceTime",
"=",
"sourceTime2",
"else",
":",
"self",
".",
"timeFlag",
"=",
"tempTimeFlag",
"self",
".",
"dateFlag",
"=",
"tempDateFlag",
"return",
"sourceTime",
",",
"(",
"flag1",
"and",
"flag2",
")"
] |
Evaluate the C{modifier} string and following text (passed in
as C{chunk1} and C{chunk2}) and if they match any known modifiers
calculate the delta and apply it to C{sourceTime}.
@type modifier: string
@param modifier: modifier text to apply to C{sourceTime}
@type chunk1: string
@param chunk1: first text chunk that followed modifier (if any)
@type chunk2: string
@param chunk2: second text chunk that followed modifier (if any)
@type sourceTime: struct_time
@param sourceTime: C{struct_time} value to use as the base
@rtype: tuple
@return: tuple of: remaining text and the modified sourceTime
|
[
"Evaluate",
"the",
"C",
"{",
"modifier",
"}",
"string",
"and",
"following",
"text",
"(",
"passed",
"in",
"as",
"C",
"{",
"chunk1",
"}",
"and",
"C",
"{",
"chunk2",
"}",
")",
"and",
"if",
"they",
"match",
"any",
"known",
"modifiers",
"calculate",
"the",
"delta",
"and",
"apply",
"it",
"to",
"C",
"{",
"sourceTime",
"}",
"."
] |
python
|
train
|
audreyr/cookiecutter
|
cookiecutter/prompt.py
|
https://github.com/audreyr/cookiecutter/blob/3bc7b987e4ae9dcee996ae0b00375c1325b8d866/cookiecutter/prompt.py#L113-L136
|
def read_user_dict(var_name, default_value):
"""Prompt the user to provide a dictionary of data.
:param str var_name: Variable as specified in the context
:param default_value: Value that will be returned if no input is provided
:return: A Python dictionary to use in the context.
"""
# Please see http://click.pocoo.org/4/api/#click.prompt
if not isinstance(default_value, dict):
raise TypeError
default_display = 'default'
user_value = click.prompt(
var_name,
default=default_display,
type=click.STRING,
value_proc=process_json,
)
if user_value == default_display:
# Return the given default w/o any processing
return default_value
return user_value
|
[
"def",
"read_user_dict",
"(",
"var_name",
",",
"default_value",
")",
":",
"# Please see http://click.pocoo.org/4/api/#click.prompt",
"if",
"not",
"isinstance",
"(",
"default_value",
",",
"dict",
")",
":",
"raise",
"TypeError",
"default_display",
"=",
"'default'",
"user_value",
"=",
"click",
".",
"prompt",
"(",
"var_name",
",",
"default",
"=",
"default_display",
",",
"type",
"=",
"click",
".",
"STRING",
",",
"value_proc",
"=",
"process_json",
",",
")",
"if",
"user_value",
"==",
"default_display",
":",
"# Return the given default w/o any processing",
"return",
"default_value",
"return",
"user_value"
] |
Prompt the user to provide a dictionary of data.
:param str var_name: Variable as specified in the context
:param default_value: Value that will be returned if no input is provided
:return: A Python dictionary to use in the context.
|
[
"Prompt",
"the",
"user",
"to",
"provide",
"a",
"dictionary",
"of",
"data",
"."
] |
python
|
train
|
Genida/archan
|
src/archan/dsm.py
|
https://github.com/Genida/archan/blob/a026d3105c7e86f30e6c9507b93ceb736684bfdc/src/archan/dsm.py#L15-L21
|
def validate_rows_length(data, length, message=None, exception=MatrixError):
"""Validate that all rows have the same length."""
if message is None:
message = 'All rows must have the same length (same number of columns)'
for row in data:
if len(row) != length:
raise exception(message)
|
[
"def",
"validate_rows_length",
"(",
"data",
",",
"length",
",",
"message",
"=",
"None",
",",
"exception",
"=",
"MatrixError",
")",
":",
"if",
"message",
"is",
"None",
":",
"message",
"=",
"'All rows must have the same length (same number of columns)'",
"for",
"row",
"in",
"data",
":",
"if",
"len",
"(",
"row",
")",
"!=",
"length",
":",
"raise",
"exception",
"(",
"message",
")"
] |
Validate that all rows have the same length.
|
[
"Validate",
"that",
"all",
"rows",
"have",
"the",
"same",
"length",
"."
] |
python
|
train
|
inveniosoftware-attic/invenio-knowledge
|
docs/_ext/flask_app.py
|
https://github.com/inveniosoftware-attic/invenio-knowledge/blob/b31722dc14243ca8f626f8b3bce9718d0119de55/docs/_ext/flask_app.py#L23-L33
|
def setup(sphinx):
"""Setup Sphinx object."""
from flask import has_app_context
from invenio_base.factory import create_app
PACKAGES = ['invenio_base', 'invenio.modules.accounts',
'invenio.modules.records', 'invenio_knowledge']
if not has_app_context():
app = create_app(PACKAGES=PACKAGES)
ctx = app.test_request_context('/')
ctx.push()
|
[
"def",
"setup",
"(",
"sphinx",
")",
":",
"from",
"flask",
"import",
"has_app_context",
"from",
"invenio_base",
".",
"factory",
"import",
"create_app",
"PACKAGES",
"=",
"[",
"'invenio_base'",
",",
"'invenio.modules.accounts'",
",",
"'invenio.modules.records'",
",",
"'invenio_knowledge'",
"]",
"if",
"not",
"has_app_context",
"(",
")",
":",
"app",
"=",
"create_app",
"(",
"PACKAGES",
"=",
"PACKAGES",
")",
"ctx",
"=",
"app",
".",
"test_request_context",
"(",
"'/'",
")",
"ctx",
".",
"push",
"(",
")"
] |
Setup Sphinx object.
|
[
"Setup",
"Sphinx",
"object",
"."
] |
python
|
train
|
pygobject/pgi
|
pgi/codegen/funcgen.py
|
https://github.com/pygobject/pgi/blob/2090435df6241a15ec2a78379a36b738b728652c/pgi/codegen/funcgen.py#L23-L36
|
def may_be_null_is_nullable():
"""If may_be_null returns nullable or if NULL can be passed in.
This can still be wrong if the specific typelib is older than the linked
libgirepository.
https://bugzilla.gnome.org/show_bug.cgi?id=660879#c47
"""
repo = GIRepository()
repo.require("GLib", "2.0", 0)
info = repo.find_by_name("GLib", "spawn_sync")
# this argument is (allow-none) and can never be (nullable)
return not info.get_arg(8).may_be_null
|
[
"def",
"may_be_null_is_nullable",
"(",
")",
":",
"repo",
"=",
"GIRepository",
"(",
")",
"repo",
".",
"require",
"(",
"\"GLib\"",
",",
"\"2.0\"",
",",
"0",
")",
"info",
"=",
"repo",
".",
"find_by_name",
"(",
"\"GLib\"",
",",
"\"spawn_sync\"",
")",
"# this argument is (allow-none) and can never be (nullable)",
"return",
"not",
"info",
".",
"get_arg",
"(",
"8",
")",
".",
"may_be_null"
] |
If may_be_null returns nullable or if NULL can be passed in.
This can still be wrong if the specific typelib is older than the linked
libgirepository.
https://bugzilla.gnome.org/show_bug.cgi?id=660879#c47
|
[
"If",
"may_be_null",
"returns",
"nullable",
"or",
"if",
"NULL",
"can",
"be",
"passed",
"in",
"."
] |
python
|
train
|
thebigmunch/gmusicapi-wrapper
|
gmusicapi_wrapper/utils.py
|
https://github.com/thebigmunch/gmusicapi-wrapper/blob/8708683cd33955def1378fc28319ef37805b851d/gmusicapi_wrapper/utils.py#L253-L301
|
def filter_local_songs(filepaths, include_filters=None, exclude_filters=None, all_includes=False, all_excludes=False):
"""Match a local file against a set of metadata filters.
Parameters:
filepaths (list): Filepaths to filter.
include_filters (list): A list of ``(field, pattern)`` tuples.
Fields are any valid mutagen metadata fields.
Patterns are Python regex patterns.
Local songs are filtered out if the given metadata field values don't match any of the given patterns.
exclude_filters (list): A list of ``(field, pattern)`` tuples.
Fields are any valid mutagen metadata fields.
Patterns are Python regex patterns.
Local songs are filtered out if the given metadata field values match any of the given patterns.
all_includes (bool): If ``True``, all include_filters criteria must match to include a song.
all_excludes (bool): If ``True``, all exclude_filters criteria must match to exclude a song.
Returns:
A list of local song filepaths matching criteria and
a list of local song filepaths filtered out using filter criteria.
Invalid music files are also filtered out.
::
(matched, filtered)
"""
matched_songs = []
filtered_songs = []
for filepath in filepaths:
try:
song = _get_mutagen_metadata(filepath)
except mutagen.MutagenError:
filtered_songs.append(filepath)
else:
if include_filters or exclude_filters:
if _check_filters(
song, include_filters=include_filters, exclude_filters=exclude_filters,
all_includes=all_includes, all_excludes=all_excludes):
matched_songs.append(filepath)
else:
filtered_songs.append(filepath)
else:
matched_songs.append(filepath)
return matched_songs, filtered_songs
|
[
"def",
"filter_local_songs",
"(",
"filepaths",
",",
"include_filters",
"=",
"None",
",",
"exclude_filters",
"=",
"None",
",",
"all_includes",
"=",
"False",
",",
"all_excludes",
"=",
"False",
")",
":",
"matched_songs",
"=",
"[",
"]",
"filtered_songs",
"=",
"[",
"]",
"for",
"filepath",
"in",
"filepaths",
":",
"try",
":",
"song",
"=",
"_get_mutagen_metadata",
"(",
"filepath",
")",
"except",
"mutagen",
".",
"MutagenError",
":",
"filtered_songs",
".",
"append",
"(",
"filepath",
")",
"else",
":",
"if",
"include_filters",
"or",
"exclude_filters",
":",
"if",
"_check_filters",
"(",
"song",
",",
"include_filters",
"=",
"include_filters",
",",
"exclude_filters",
"=",
"exclude_filters",
",",
"all_includes",
"=",
"all_includes",
",",
"all_excludes",
"=",
"all_excludes",
")",
":",
"matched_songs",
".",
"append",
"(",
"filepath",
")",
"else",
":",
"filtered_songs",
".",
"append",
"(",
"filepath",
")",
"else",
":",
"matched_songs",
".",
"append",
"(",
"filepath",
")",
"return",
"matched_songs",
",",
"filtered_songs"
] |
Match a local file against a set of metadata filters.
Parameters:
filepaths (list): Filepaths to filter.
include_filters (list): A list of ``(field, pattern)`` tuples.
Fields are any valid mutagen metadata fields.
Patterns are Python regex patterns.
Local songs are filtered out if the given metadata field values don't match any of the given patterns.
exclude_filters (list): A list of ``(field, pattern)`` tuples.
Fields are any valid mutagen metadata fields.
Patterns are Python regex patterns.
Local songs are filtered out if the given metadata field values match any of the given patterns.
all_includes (bool): If ``True``, all include_filters criteria must match to include a song.
all_excludes (bool): If ``True``, all exclude_filters criteria must match to exclude a song.
Returns:
A list of local song filepaths matching criteria and
a list of local song filepaths filtered out using filter criteria.
Invalid music files are also filtered out.
::
(matched, filtered)
|
[
"Match",
"a",
"local",
"file",
"against",
"a",
"set",
"of",
"metadata",
"filters",
"."
] |
python
|
valid
|
JdeRobot/base
|
src/drivers/MAVLinkServer/MAVProxy/pymavlink/generator/mavgen_swift.py
|
https://github.com/JdeRobot/base/blob/303b18992785b2fe802212f2d758a60873007f1f/src/drivers/MAVLinkServer/MAVProxy/pymavlink/generator/mavgen_swift.py#L304-L316
|
def camel_case_from_underscores(string):
"""Generate a CamelCase string from an underscore_string"""
components = string.split('_')
string = ''
for component in components:
if component in abbreviations:
string += component
else:
string += component[0].upper() + component[1:].lower()
return string
|
[
"def",
"camel_case_from_underscores",
"(",
"string",
")",
":",
"components",
"=",
"string",
".",
"split",
"(",
"'_'",
")",
"string",
"=",
"''",
"for",
"component",
"in",
"components",
":",
"if",
"component",
"in",
"abbreviations",
":",
"string",
"+=",
"component",
"else",
":",
"string",
"+=",
"component",
"[",
"0",
"]",
".",
"upper",
"(",
")",
"+",
"component",
"[",
"1",
":",
"]",
".",
"lower",
"(",
")",
"return",
"string"
] |
Generate a CamelCase string from an underscore_string
|
[
"Generate",
"a",
"CamelCase",
"string",
"from",
"an",
"underscore_string"
] |
python
|
train
|
brocade/pynos
|
pynos/versions/ver_6/ver_6_0_1/yang/brocade_span.py
|
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_span.py#L68-L81
|
def monitor_session_span_command_src_tengigabitethernet_val(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
monitor = ET.SubElement(config, "monitor", xmlns="urn:brocade.com:mgmt:brocade-span")
session = ET.SubElement(monitor, "session")
session_number_key = ET.SubElement(session, "session-number")
session_number_key.text = kwargs.pop('session_number')
span_command = ET.SubElement(session, "span-command")
src_tengigabitethernet_val = ET.SubElement(span_command, "src-tengigabitethernet-val")
src_tengigabitethernet_val.text = kwargs.pop('src_tengigabitethernet_val')
callback = kwargs.pop('callback', self._callback)
return callback(config)
|
[
"def",
"monitor_session_span_command_src_tengigabitethernet_val",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"config",
"=",
"ET",
".",
"Element",
"(",
"\"config\"",
")",
"monitor",
"=",
"ET",
".",
"SubElement",
"(",
"config",
",",
"\"monitor\"",
",",
"xmlns",
"=",
"\"urn:brocade.com:mgmt:brocade-span\"",
")",
"session",
"=",
"ET",
".",
"SubElement",
"(",
"monitor",
",",
"\"session\"",
")",
"session_number_key",
"=",
"ET",
".",
"SubElement",
"(",
"session",
",",
"\"session-number\"",
")",
"session_number_key",
".",
"text",
"=",
"kwargs",
".",
"pop",
"(",
"'session_number'",
")",
"span_command",
"=",
"ET",
".",
"SubElement",
"(",
"session",
",",
"\"span-command\"",
")",
"src_tengigabitethernet_val",
"=",
"ET",
".",
"SubElement",
"(",
"span_command",
",",
"\"src-tengigabitethernet-val\"",
")",
"src_tengigabitethernet_val",
".",
"text",
"=",
"kwargs",
".",
"pop",
"(",
"'src_tengigabitethernet_val'",
")",
"callback",
"=",
"kwargs",
".",
"pop",
"(",
"'callback'",
",",
"self",
".",
"_callback",
")",
"return",
"callback",
"(",
"config",
")"
] |
Auto Generated Code
|
[
"Auto",
"Generated",
"Code"
] |
python
|
train
|
kakwa/ldapcherry
|
ldapcherry/__init__.py
|
https://github.com/kakwa/ldapcherry/blob/b5e7cb6a44065abc30d164e72981b3713a172dda/ldapcherry/__init__.py#L531-L552
|
def _parse_params(self, params):
""" get user attributes
@dict params: form parameters
@rtype: dict, {<type>: {<attr>: <value>}}
"""
ret = {'attrs': {}, 'roles': {}, 'groups': {}}
for p in params:
# each form attributes is prefixed with type, ex: attr.uidNumber
# separate this prefix from the attribute name
p_type, sep, param = p.partition('.')
if p_type == 'attr':
ret['attrs'][param] = params[p]
elif p_type == 'role':
ret['roles'][param] = params[p]
elif p_type == 'group':
# with groups there is a second prefix
# corresponding to the backend
backend, sep, value = param.partition('.')
if backend not in ret['groups']:
ret['groups'][backend] = []
ret['groups'][backend].append(value)
return ret
|
[
"def",
"_parse_params",
"(",
"self",
",",
"params",
")",
":",
"ret",
"=",
"{",
"'attrs'",
":",
"{",
"}",
",",
"'roles'",
":",
"{",
"}",
",",
"'groups'",
":",
"{",
"}",
"}",
"for",
"p",
"in",
"params",
":",
"# each form attributes is prefixed with type, ex: attr.uidNumber",
"# separate this prefix from the attribute name",
"p_type",
",",
"sep",
",",
"param",
"=",
"p",
".",
"partition",
"(",
"'.'",
")",
"if",
"p_type",
"==",
"'attr'",
":",
"ret",
"[",
"'attrs'",
"]",
"[",
"param",
"]",
"=",
"params",
"[",
"p",
"]",
"elif",
"p_type",
"==",
"'role'",
":",
"ret",
"[",
"'roles'",
"]",
"[",
"param",
"]",
"=",
"params",
"[",
"p",
"]",
"elif",
"p_type",
"==",
"'group'",
":",
"# with groups there is a second prefix",
"# corresponding to the backend",
"backend",
",",
"sep",
",",
"value",
"=",
"param",
".",
"partition",
"(",
"'.'",
")",
"if",
"backend",
"not",
"in",
"ret",
"[",
"'groups'",
"]",
":",
"ret",
"[",
"'groups'",
"]",
"[",
"backend",
"]",
"=",
"[",
"]",
"ret",
"[",
"'groups'",
"]",
"[",
"backend",
"]",
".",
"append",
"(",
"value",
")",
"return",
"ret"
] |
get user attributes
@dict params: form parameters
@rtype: dict, {<type>: {<attr>: <value>}}
|
[
"get",
"user",
"attributes"
] |
python
|
train
|
kakwa/ldapcherry
|
ldapcherry/backend/backendLdap.py
|
https://github.com/kakwa/ldapcherry/blob/b5e7cb6a44065abc30d164e72981b3713a172dda/ldapcherry/backend/backendLdap.py#L179-L192
|
def _normalize_group_attrs(self, attrs):
"""Normalize the attributes used to set groups
If it's a list of one element, it just become this
element.
It raises an error if the attribute doesn't exist
or if it's multivaluated.
"""
for key in self.group_attrs_keys:
if key not in attrs:
raise MissingGroupAttr(key)
if type(attrs[key]) is list and len(attrs[key]) == 1:
attrs[key] = attrs[key][0]
if type(attrs[key]) is list and len(attrs[key]) != 1:
raise MultivaluedGroupAttr(key)
|
[
"def",
"_normalize_group_attrs",
"(",
"self",
",",
"attrs",
")",
":",
"for",
"key",
"in",
"self",
".",
"group_attrs_keys",
":",
"if",
"key",
"not",
"in",
"attrs",
":",
"raise",
"MissingGroupAttr",
"(",
"key",
")",
"if",
"type",
"(",
"attrs",
"[",
"key",
"]",
")",
"is",
"list",
"and",
"len",
"(",
"attrs",
"[",
"key",
"]",
")",
"==",
"1",
":",
"attrs",
"[",
"key",
"]",
"=",
"attrs",
"[",
"key",
"]",
"[",
"0",
"]",
"if",
"type",
"(",
"attrs",
"[",
"key",
"]",
")",
"is",
"list",
"and",
"len",
"(",
"attrs",
"[",
"key",
"]",
")",
"!=",
"1",
":",
"raise",
"MultivaluedGroupAttr",
"(",
"key",
")"
] |
Normalize the attributes used to set groups
If it's a list of one element, it just become this
element.
It raises an error if the attribute doesn't exist
or if it's multivaluated.
|
[
"Normalize",
"the",
"attributes",
"used",
"to",
"set",
"groups",
"If",
"it",
"s",
"a",
"list",
"of",
"one",
"element",
"it",
"just",
"become",
"this",
"element",
".",
"It",
"raises",
"an",
"error",
"if",
"the",
"attribute",
"doesn",
"t",
"exist",
"or",
"if",
"it",
"s",
"multivaluated",
"."
] |
python
|
train
|
MolSSI-BSE/basis_set_exchange
|
basis_set_exchange/api.py
|
https://github.com/MolSSI-BSE/basis_set_exchange/blob/e79110aaeb65f392ed5032420322dee3336948f7/basis_set_exchange/api.py#L398-L404
|
def get_basis_family(basis_name, data_dir=None):
'''Lookup a family by a basis set name
'''
data_dir = fix_data_dir(data_dir)
bs_data = _get_basis_metadata(basis_name, data_dir)
return bs_data['family']
|
[
"def",
"get_basis_family",
"(",
"basis_name",
",",
"data_dir",
"=",
"None",
")",
":",
"data_dir",
"=",
"fix_data_dir",
"(",
"data_dir",
")",
"bs_data",
"=",
"_get_basis_metadata",
"(",
"basis_name",
",",
"data_dir",
")",
"return",
"bs_data",
"[",
"'family'",
"]"
] |
Lookup a family by a basis set name
|
[
"Lookup",
"a",
"family",
"by",
"a",
"basis",
"set",
"name"
] |
python
|
train
|
Fantomas42/django-blog-zinnia
|
zinnia/templatetags/zinnia.py
|
https://github.com/Fantomas42/django-blog-zinnia/blob/b4949304b104a8e1a7a7a0773cbfd024313c3a15/zinnia/templatetags/zinnia.py#L435-L442
|
def user_admin_urlname(action):
"""
Return the admin URLs for the user app used.
"""
user = get_user_model()
return 'admin:%s_%s_%s' % (
user._meta.app_label, user._meta.model_name,
action)
|
[
"def",
"user_admin_urlname",
"(",
"action",
")",
":",
"user",
"=",
"get_user_model",
"(",
")",
"return",
"'admin:%s_%s_%s'",
"%",
"(",
"user",
".",
"_meta",
".",
"app_label",
",",
"user",
".",
"_meta",
".",
"model_name",
",",
"action",
")"
] |
Return the admin URLs for the user app used.
|
[
"Return",
"the",
"admin",
"URLs",
"for",
"the",
"user",
"app",
"used",
"."
] |
python
|
train
|
SBRG/ssbio
|
ssbio/biopython/Bio/Struct/Hydrogenate.py
|
https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/biopython/Bio/Struct/Hydrogenate.py#L243-L325
|
def _add_1(self, hydrogen_name, heavy_atom, bonds):
"""
Adds the missing proton to single protonated heavy_atoms.
"""
residue = heavy_atom.parent
ffld = self.ffld[self.selection[residue]]
bnd_len = self.bondfield.length
anchor = bonds[0]
# If not linear
if self.bondfield.nonlinear.has_key(ffld[( residue.resname,
heavy_atom.name)]['type']):
# returns tuple of two atoms
bonded = self._find_secondary_anchors(residue, heavy_atom, anchor)
if bonded:
# Phenolic hydrogens, etc.
if self.bondfield.planer.has_key(ffld[( residue.resname,
anchor.name)]['type']):
secondary_anchor = bonded[0]
p0 = heavy_atom.coord - anchor.coord
d2 = secondary_anchor.coord - anchor.coord
p1 = normalize(cross_product(d2, p0))
p2 = normalize(cross_product(p0, p1))
vector = scale(p2, TRI_TAN)
vector = normalize(add(p0, vector))
hydrogen_coord = add(heavy_atom.coord,
scale(vector,
bnd_len[(ffld[(residue.resname,
heavy_atom.name)]['type'],
ffld[(residue.resname,
hydrogen_name)]['type'])]))
else: # Ser, Cys, Thr hydroxyl hydrogens
secondary_anchor = bonded[0]
vector = anchor.coord - secondary_anchor.coord
hydrogen_coord = add(heavy_atom.coord,
scale(vector,
bnd_len[(ffld[(residue.resname, heavy_atom.name)]['type'],
ffld[(residue.resname, hydrogen_name)]['type'])] ))
elif len(bonds):
d2 = [1.0, 0, 0]
p0 = heavy_atom.coord - anchor.coord
p1 = normalize(cross_product(d2, p0))
vector = scale(p1, TET_TAN)
vector = normalize(add(p0, vector))
hydrogen_coord = add(heavy_atom.coord,
scale( vector,
bnd_len[(ffld[(residue.resname, heavy_atom.name)]['type'],
ffld[(residue.resname, hydrogen_name)]['type'])] ))
else:
hydrogen_coord = random_sphere(heavy_atom.coord,
bnd_len[ (ffld[(residue.resname, heavy_atom.name)]['type'],
ffld[(residue.resname, hydrogen_name)]['type']) ])
elif len(bonds): # linear sum...amide, tbu, etc
vector = [0.0, 0.0, 0.0]
if heavy_atom.name == 'N': # Fix to get previous atom O from peptide bond. Ugly.
prev_res = list(residue.get_id())
prev_res[1] -= 1
prev_res = tuple(prev_res)
if residue.parent.child_dict.has_key(prev_res):
prev_res = residue.parent.child_dict[prev_res]
bonds.append(prev_res.child_dict['O'])
for b in bonds:
d = heavy_atom.coord - b.coord
vector = add(vector, d)
vector = normalize(vector)
hydrogen_coord = add(heavy_atom.coord,
scale(vector,
bnd_len[(ffld[(residue.resname, heavy_atom.name)]['type'],
ffld[(residue.resname, hydrogen_name)]['type']) ]))
else:
hydrogen_coord = random_sphere(heavy_atom.coord,
bnd_len[ (ffld[(residue.resname, heavy_atom.name)]['type'],
ffld[(residue.resname, hydrogen_name)]['type']) ])
return hydrogen_coord
|
[
"def",
"_add_1",
"(",
"self",
",",
"hydrogen_name",
",",
"heavy_atom",
",",
"bonds",
")",
":",
"residue",
"=",
"heavy_atom",
".",
"parent",
"ffld",
"=",
"self",
".",
"ffld",
"[",
"self",
".",
"selection",
"[",
"residue",
"]",
"]",
"bnd_len",
"=",
"self",
".",
"bondfield",
".",
"length",
"anchor",
"=",
"bonds",
"[",
"0",
"]",
"# If not linear",
"if",
"self",
".",
"bondfield",
".",
"nonlinear",
".",
"has_key",
"(",
"ffld",
"[",
"(",
"residue",
".",
"resname",
",",
"heavy_atom",
".",
"name",
")",
"]",
"[",
"'type'",
"]",
")",
":",
"# returns tuple of two atoms",
"bonded",
"=",
"self",
".",
"_find_secondary_anchors",
"(",
"residue",
",",
"heavy_atom",
",",
"anchor",
")",
"if",
"bonded",
":",
"# Phenolic hydrogens, etc.",
"if",
"self",
".",
"bondfield",
".",
"planer",
".",
"has_key",
"(",
"ffld",
"[",
"(",
"residue",
".",
"resname",
",",
"anchor",
".",
"name",
")",
"]",
"[",
"'type'",
"]",
")",
":",
"secondary_anchor",
"=",
"bonded",
"[",
"0",
"]",
"p0",
"=",
"heavy_atom",
".",
"coord",
"-",
"anchor",
".",
"coord",
"d2",
"=",
"secondary_anchor",
".",
"coord",
"-",
"anchor",
".",
"coord",
"p1",
"=",
"normalize",
"(",
"cross_product",
"(",
"d2",
",",
"p0",
")",
")",
"p2",
"=",
"normalize",
"(",
"cross_product",
"(",
"p0",
",",
"p1",
")",
")",
"vector",
"=",
"scale",
"(",
"p2",
",",
"TRI_TAN",
")",
"vector",
"=",
"normalize",
"(",
"add",
"(",
"p0",
",",
"vector",
")",
")",
"hydrogen_coord",
"=",
"add",
"(",
"heavy_atom",
".",
"coord",
",",
"scale",
"(",
"vector",
",",
"bnd_len",
"[",
"(",
"ffld",
"[",
"(",
"residue",
".",
"resname",
",",
"heavy_atom",
".",
"name",
")",
"]",
"[",
"'type'",
"]",
",",
"ffld",
"[",
"(",
"residue",
".",
"resname",
",",
"hydrogen_name",
")",
"]",
"[",
"'type'",
"]",
")",
"]",
")",
")",
"else",
":",
"# Ser, Cys, Thr hydroxyl hydrogens",
"secondary_anchor",
"=",
"bonded",
"[",
"0",
"]",
"vector",
"=",
"anchor",
".",
"coord",
"-",
"secondary_anchor",
".",
"coord",
"hydrogen_coord",
"=",
"add",
"(",
"heavy_atom",
".",
"coord",
",",
"scale",
"(",
"vector",
",",
"bnd_len",
"[",
"(",
"ffld",
"[",
"(",
"residue",
".",
"resname",
",",
"heavy_atom",
".",
"name",
")",
"]",
"[",
"'type'",
"]",
",",
"ffld",
"[",
"(",
"residue",
".",
"resname",
",",
"hydrogen_name",
")",
"]",
"[",
"'type'",
"]",
")",
"]",
")",
")",
"elif",
"len",
"(",
"bonds",
")",
":",
"d2",
"=",
"[",
"1.0",
",",
"0",
",",
"0",
"]",
"p0",
"=",
"heavy_atom",
".",
"coord",
"-",
"anchor",
".",
"coord",
"p1",
"=",
"normalize",
"(",
"cross_product",
"(",
"d2",
",",
"p0",
")",
")",
"vector",
"=",
"scale",
"(",
"p1",
",",
"TET_TAN",
")",
"vector",
"=",
"normalize",
"(",
"add",
"(",
"p0",
",",
"vector",
")",
")",
"hydrogen_coord",
"=",
"add",
"(",
"heavy_atom",
".",
"coord",
",",
"scale",
"(",
"vector",
",",
"bnd_len",
"[",
"(",
"ffld",
"[",
"(",
"residue",
".",
"resname",
",",
"heavy_atom",
".",
"name",
")",
"]",
"[",
"'type'",
"]",
",",
"ffld",
"[",
"(",
"residue",
".",
"resname",
",",
"hydrogen_name",
")",
"]",
"[",
"'type'",
"]",
")",
"]",
")",
")",
"else",
":",
"hydrogen_coord",
"=",
"random_sphere",
"(",
"heavy_atom",
".",
"coord",
",",
"bnd_len",
"[",
"(",
"ffld",
"[",
"(",
"residue",
".",
"resname",
",",
"heavy_atom",
".",
"name",
")",
"]",
"[",
"'type'",
"]",
",",
"ffld",
"[",
"(",
"residue",
".",
"resname",
",",
"hydrogen_name",
")",
"]",
"[",
"'type'",
"]",
")",
"]",
")",
"elif",
"len",
"(",
"bonds",
")",
":",
"# linear sum...amide, tbu, etc",
"vector",
"=",
"[",
"0.0",
",",
"0.0",
",",
"0.0",
"]",
"if",
"heavy_atom",
".",
"name",
"==",
"'N'",
":",
"# Fix to get previous atom O from peptide bond. Ugly.",
"prev_res",
"=",
"list",
"(",
"residue",
".",
"get_id",
"(",
")",
")",
"prev_res",
"[",
"1",
"]",
"-=",
"1",
"prev_res",
"=",
"tuple",
"(",
"prev_res",
")",
"if",
"residue",
".",
"parent",
".",
"child_dict",
".",
"has_key",
"(",
"prev_res",
")",
":",
"prev_res",
"=",
"residue",
".",
"parent",
".",
"child_dict",
"[",
"prev_res",
"]",
"bonds",
".",
"append",
"(",
"prev_res",
".",
"child_dict",
"[",
"'O'",
"]",
")",
"for",
"b",
"in",
"bonds",
":",
"d",
"=",
"heavy_atom",
".",
"coord",
"-",
"b",
".",
"coord",
"vector",
"=",
"add",
"(",
"vector",
",",
"d",
")",
"vector",
"=",
"normalize",
"(",
"vector",
")",
"hydrogen_coord",
"=",
"add",
"(",
"heavy_atom",
".",
"coord",
",",
"scale",
"(",
"vector",
",",
"bnd_len",
"[",
"(",
"ffld",
"[",
"(",
"residue",
".",
"resname",
",",
"heavy_atom",
".",
"name",
")",
"]",
"[",
"'type'",
"]",
",",
"ffld",
"[",
"(",
"residue",
".",
"resname",
",",
"hydrogen_name",
")",
"]",
"[",
"'type'",
"]",
")",
"]",
")",
")",
"else",
":",
"hydrogen_coord",
"=",
"random_sphere",
"(",
"heavy_atom",
".",
"coord",
",",
"bnd_len",
"[",
"(",
"ffld",
"[",
"(",
"residue",
".",
"resname",
",",
"heavy_atom",
".",
"name",
")",
"]",
"[",
"'type'",
"]",
",",
"ffld",
"[",
"(",
"residue",
".",
"resname",
",",
"hydrogen_name",
")",
"]",
"[",
"'type'",
"]",
")",
"]",
")",
"return",
"hydrogen_coord"
] |
Adds the missing proton to single protonated heavy_atoms.
|
[
"Adds",
"the",
"missing",
"proton",
"to",
"single",
"protonated",
"heavy_atoms",
"."
] |
python
|
train
|
inasafe/inasafe
|
safe/report/processors/default.py
|
https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/safe/report/processors/default.py#L60-L90
|
def layout_item(layout, item_id, item_class):
"""Fetch a specific item according to its type in a layout.
There's some sip casting conversion issues with QgsLayout::itemById.
Don't use it, and use this function instead.
See https://github.com/inasafe/inasafe/issues/4271
:param layout: The layout to look in.
:type layout: QgsLayout
:param item_id: The ID of the item to look for.
:type item_id: basestring
:param item_class: The expected class name.
:type item_class: cls
:return: The layout item, inherited class of QgsLayoutItem.
"""
item = layout.itemById(item_id)
if item is None:
# no match!
return item
if issubclass(item_class, QgsLayoutMultiFrame):
# finding a multiframe by frame id
frame = sip.cast(item, QgsLayoutFrame)
multi_frame = frame.multiFrame()
return sip.cast(multi_frame, item_class)
else:
# force sip to correctly cast item to required type
return sip.cast(item, item_class)
|
[
"def",
"layout_item",
"(",
"layout",
",",
"item_id",
",",
"item_class",
")",
":",
"item",
"=",
"layout",
".",
"itemById",
"(",
"item_id",
")",
"if",
"item",
"is",
"None",
":",
"# no match!",
"return",
"item",
"if",
"issubclass",
"(",
"item_class",
",",
"QgsLayoutMultiFrame",
")",
":",
"# finding a multiframe by frame id",
"frame",
"=",
"sip",
".",
"cast",
"(",
"item",
",",
"QgsLayoutFrame",
")",
"multi_frame",
"=",
"frame",
".",
"multiFrame",
"(",
")",
"return",
"sip",
".",
"cast",
"(",
"multi_frame",
",",
"item_class",
")",
"else",
":",
"# force sip to correctly cast item to required type",
"return",
"sip",
".",
"cast",
"(",
"item",
",",
"item_class",
")"
] |
Fetch a specific item according to its type in a layout.
There's some sip casting conversion issues with QgsLayout::itemById.
Don't use it, and use this function instead.
See https://github.com/inasafe/inasafe/issues/4271
:param layout: The layout to look in.
:type layout: QgsLayout
:param item_id: The ID of the item to look for.
:type item_id: basestring
:param item_class: The expected class name.
:type item_class: cls
:return: The layout item, inherited class of QgsLayoutItem.
|
[
"Fetch",
"a",
"specific",
"item",
"according",
"to",
"its",
"type",
"in",
"a",
"layout",
"."
] |
python
|
train
|
wiheto/teneto
|
teneto/classes/network.py
|
https://github.com/wiheto/teneto/blob/80d7a83a9adc1714589b020627c45bd5b66248ab/teneto/classes/network.py#L227-L243
|
def network_from_edgelist(self, edgelist):
"""
Defines a network from an array.
Parameters
----------
edgelist : list of lists.
A list of lists which are 3 or 4 in length. For binary networks each sublist should be [i, j ,t] where i and j are node indicies and t is the temporal index.
For weighted networks each sublist should be [i, j, t, weight].
"""
teneto.utils.check_TemporalNetwork_input(edgelist, 'edgelist')
if len(edgelist[0]) == 4:
colnames = ['i', 'j', 't', 'weight']
elif len(edgelist[0]) == 3:
colnames = ['i', 'j', 't']
self.network = pd.DataFrame(edgelist, columns=colnames)
self._update_network()
|
[
"def",
"network_from_edgelist",
"(",
"self",
",",
"edgelist",
")",
":",
"teneto",
".",
"utils",
".",
"check_TemporalNetwork_input",
"(",
"edgelist",
",",
"'edgelist'",
")",
"if",
"len",
"(",
"edgelist",
"[",
"0",
"]",
")",
"==",
"4",
":",
"colnames",
"=",
"[",
"'i'",
",",
"'j'",
",",
"'t'",
",",
"'weight'",
"]",
"elif",
"len",
"(",
"edgelist",
"[",
"0",
"]",
")",
"==",
"3",
":",
"colnames",
"=",
"[",
"'i'",
",",
"'j'",
",",
"'t'",
"]",
"self",
".",
"network",
"=",
"pd",
".",
"DataFrame",
"(",
"edgelist",
",",
"columns",
"=",
"colnames",
")",
"self",
".",
"_update_network",
"(",
")"
] |
Defines a network from an array.
Parameters
----------
edgelist : list of lists.
A list of lists which are 3 or 4 in length. For binary networks each sublist should be [i, j ,t] where i and j are node indicies and t is the temporal index.
For weighted networks each sublist should be [i, j, t, weight].
|
[
"Defines",
"a",
"network",
"from",
"an",
"array",
"."
] |
python
|
train
|
ergoithz/browsepy
|
browsepy/compat.py
|
https://github.com/ergoithz/browsepy/blob/1612a930ef220fae507e1b152c531707e555bd92/browsepy/compat.py#L84-L97
|
def getcwd(fs_encoding=FS_ENCODING, cwd_fnc=os.getcwd):
'''
Get current work directory's absolute path.
Like os.getcwd but garanteed to return an unicode-str object.
:param fs_encoding: filesystem encoding, defaults to autodetected
:type fs_encoding: str
:param cwd_fnc: callable used to get the path, defaults to os.getcwd
:type cwd_fnc: Callable
:return: path
:rtype: str
'''
path = fsdecode(cwd_fnc(), fs_encoding=fs_encoding)
return os.path.abspath(path)
|
[
"def",
"getcwd",
"(",
"fs_encoding",
"=",
"FS_ENCODING",
",",
"cwd_fnc",
"=",
"os",
".",
"getcwd",
")",
":",
"path",
"=",
"fsdecode",
"(",
"cwd_fnc",
"(",
")",
",",
"fs_encoding",
"=",
"fs_encoding",
")",
"return",
"os",
".",
"path",
".",
"abspath",
"(",
"path",
")"
] |
Get current work directory's absolute path.
Like os.getcwd but garanteed to return an unicode-str object.
:param fs_encoding: filesystem encoding, defaults to autodetected
:type fs_encoding: str
:param cwd_fnc: callable used to get the path, defaults to os.getcwd
:type cwd_fnc: Callable
:return: path
:rtype: str
|
[
"Get",
"current",
"work",
"directory",
"s",
"absolute",
"path",
".",
"Like",
"os",
".",
"getcwd",
"but",
"garanteed",
"to",
"return",
"an",
"unicode",
"-",
"str",
"object",
"."
] |
python
|
train
|
ubc/ubcpi
|
ubcpi/persistence.py
|
https://github.com/ubc/ubcpi/blob/7b6de03f93f3a4a8af4b92dfde7c69eeaf21f46e/ubcpi/persistence.py#L134-L145
|
def add_answer(self, vote, rationale):
"""
Add an answer
Args:
vote (int): the option that student voted for
rationale (str): the reason why the student vote for the option
"""
self.raw_answers.append({
VOTE_KEY: vote,
RATIONALE_KEY: rationale,
})
|
[
"def",
"add_answer",
"(",
"self",
",",
"vote",
",",
"rationale",
")",
":",
"self",
".",
"raw_answers",
".",
"append",
"(",
"{",
"VOTE_KEY",
":",
"vote",
",",
"RATIONALE_KEY",
":",
"rationale",
",",
"}",
")"
] |
Add an answer
Args:
vote (int): the option that student voted for
rationale (str): the reason why the student vote for the option
|
[
"Add",
"an",
"answer"
] |
python
|
train
|
heikomuller/sco-datastore
|
scodata/funcdata.py
|
https://github.com/heikomuller/sco-datastore/blob/7180a6b51150667e47629da566aedaa742e39342/scodata/funcdata.py#L178-L236
|
def create_object(self, filename, read_only=False):
"""Create a functional data object for the given file. Expects the file
to be a valid functional data file. Expects exactly one file that has
suffix mgh/mgz or nii/nii.gz.
Parameters
----------
filename : string
Name of the (uploaded) file
read_only : boolean, optional
Optional value for the read-only property
Returns
-------
FunctionalDataHandle
Handle for created functional data object in database
"""
# Get the file name, i.e., last component of the given absolute path
prop_name = os.path.basename(os.path.normpath(filename))
# Ensure that the uploaded file has a valid suffix. Currently no tests
# are performed to ensure that the file actually conatains any data.
if prop_name.endswith('.nii.gz') or prop_name.endswith('.mgz') or prop_name.endswith('.mgh.gz'):
prop_mime = 'application/x-gzip'
elif prop_name.endswith('.nii'):
prop_mime = 'application/NIfTI-1'
elif prop_name.endswith('.mgh'):
prop_mime = 'application/MGH'
else:
raise ValueError('unsupported file type: ' + prop_name)
# Create a new object identifier.
identifier = str(uuid.uuid4()).replace('-','')
# The object directory is given by the object identifier.
object_dir = os.path.join(self.directory, identifier)
# Create (sub-)directories for the uploaded and extracted data files.
if not os.access(object_dir, os.F_OK):
os.makedirs(object_dir)
data_dir = os.path.join(object_dir, DATA_DIRECTORY)
os.mkdir(data_dir)
func_data_file = prop_name
uploaded_file = os.path.join(data_dir, prop_name)
shutil.copyfile(filename, uploaded_file)
# Create the initial set of properties for the new image object.
properties = {
datastore.PROPERTY_NAME: prop_name,
datastore.PROPERTY_FILENAME : prop_name,
datastore.PROPERTY_FILESIZE : os.path.getsize(uploaded_file),
datastore.PROPERTY_MIMETYPE : prop_mime,
datastore.PROPERTY_FUNCDATAFILE : func_data_file
}
if read_only:
properties[datastore.PROPERTY_READONLY] = True
# Create object handle and store it in database before returning it
obj = FunctionalDataHandle(
identifier,
properties,
object_dir
)
self.insert_object(obj)
return obj
|
[
"def",
"create_object",
"(",
"self",
",",
"filename",
",",
"read_only",
"=",
"False",
")",
":",
"# Get the file name, i.e., last component of the given absolute path",
"prop_name",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"os",
".",
"path",
".",
"normpath",
"(",
"filename",
")",
")",
"# Ensure that the uploaded file has a valid suffix. Currently no tests",
"# are performed to ensure that the file actually conatains any data.",
"if",
"prop_name",
".",
"endswith",
"(",
"'.nii.gz'",
")",
"or",
"prop_name",
".",
"endswith",
"(",
"'.mgz'",
")",
"or",
"prop_name",
".",
"endswith",
"(",
"'.mgh.gz'",
")",
":",
"prop_mime",
"=",
"'application/x-gzip'",
"elif",
"prop_name",
".",
"endswith",
"(",
"'.nii'",
")",
":",
"prop_mime",
"=",
"'application/NIfTI-1'",
"elif",
"prop_name",
".",
"endswith",
"(",
"'.mgh'",
")",
":",
"prop_mime",
"=",
"'application/MGH'",
"else",
":",
"raise",
"ValueError",
"(",
"'unsupported file type: '",
"+",
"prop_name",
")",
"# Create a new object identifier.",
"identifier",
"=",
"str",
"(",
"uuid",
".",
"uuid4",
"(",
")",
")",
".",
"replace",
"(",
"'-'",
",",
"''",
")",
"# The object directory is given by the object identifier.",
"object_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"directory",
",",
"identifier",
")",
"# Create (sub-)directories for the uploaded and extracted data files.",
"if",
"not",
"os",
".",
"access",
"(",
"object_dir",
",",
"os",
".",
"F_OK",
")",
":",
"os",
".",
"makedirs",
"(",
"object_dir",
")",
"data_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"object_dir",
",",
"DATA_DIRECTORY",
")",
"os",
".",
"mkdir",
"(",
"data_dir",
")",
"func_data_file",
"=",
"prop_name",
"uploaded_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"data_dir",
",",
"prop_name",
")",
"shutil",
".",
"copyfile",
"(",
"filename",
",",
"uploaded_file",
")",
"# Create the initial set of properties for the new image object.",
"properties",
"=",
"{",
"datastore",
".",
"PROPERTY_NAME",
":",
"prop_name",
",",
"datastore",
".",
"PROPERTY_FILENAME",
":",
"prop_name",
",",
"datastore",
".",
"PROPERTY_FILESIZE",
":",
"os",
".",
"path",
".",
"getsize",
"(",
"uploaded_file",
")",
",",
"datastore",
".",
"PROPERTY_MIMETYPE",
":",
"prop_mime",
",",
"datastore",
".",
"PROPERTY_FUNCDATAFILE",
":",
"func_data_file",
"}",
"if",
"read_only",
":",
"properties",
"[",
"datastore",
".",
"PROPERTY_READONLY",
"]",
"=",
"True",
"# Create object handle and store it in database before returning it",
"obj",
"=",
"FunctionalDataHandle",
"(",
"identifier",
",",
"properties",
",",
"object_dir",
")",
"self",
".",
"insert_object",
"(",
"obj",
")",
"return",
"obj"
] |
Create a functional data object for the given file. Expects the file
to be a valid functional data file. Expects exactly one file that has
suffix mgh/mgz or nii/nii.gz.
Parameters
----------
filename : string
Name of the (uploaded) file
read_only : boolean, optional
Optional value for the read-only property
Returns
-------
FunctionalDataHandle
Handle for created functional data object in database
|
[
"Create",
"a",
"functional",
"data",
"object",
"for",
"the",
"given",
"file",
".",
"Expects",
"the",
"file",
"to",
"be",
"a",
"valid",
"functional",
"data",
"file",
".",
"Expects",
"exactly",
"one",
"file",
"that",
"has",
"suffix",
"mgh",
"/",
"mgz",
"or",
"nii",
"/",
"nii",
".",
"gz",
"."
] |
python
|
train
|
lrq3000/pyFileFixity
|
pyFileFixity/lib/pathlib2.py
|
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/pathlib2.py#L595-L602
|
def select_from(self, parent_path):
"""Iterate over all child paths of `parent_path` matched by this
selector. This can contain parent_path itself."""
path_cls = type(parent_path)
is_dir = path_cls.is_dir
exists = path_cls.exists
listdir = parent_path._accessor.listdir
return self._select_from(parent_path, is_dir, exists, listdir)
|
[
"def",
"select_from",
"(",
"self",
",",
"parent_path",
")",
":",
"path_cls",
"=",
"type",
"(",
"parent_path",
")",
"is_dir",
"=",
"path_cls",
".",
"is_dir",
"exists",
"=",
"path_cls",
".",
"exists",
"listdir",
"=",
"parent_path",
".",
"_accessor",
".",
"listdir",
"return",
"self",
".",
"_select_from",
"(",
"parent_path",
",",
"is_dir",
",",
"exists",
",",
"listdir",
")"
] |
Iterate over all child paths of `parent_path` matched by this
selector. This can contain parent_path itself.
|
[
"Iterate",
"over",
"all",
"child",
"paths",
"of",
"parent_path",
"matched",
"by",
"this",
"selector",
".",
"This",
"can",
"contain",
"parent_path",
"itself",
"."
] |
python
|
train
|
IdentityPython/pysaml2
|
src/saml2/mdstore.py
|
https://github.com/IdentityPython/pysaml2/blob/d3aa78eeb7d37c12688f783cb4db1c7263a14ad6/src/saml2/mdstore.py#L548-L585
|
def service(self, entity_id, typ, service, binding=None):
""" Get me all services with a specified
entity ID and type, that supports the specified version of binding.
:param entity_id: The EntityId
:param typ: Type of service (idp, attribute_authority, ...)
:param service: which service that is sought for
:param binding: A binding identifier
:return: list of service descriptions.
Or if no binding was specified a list of 2-tuples (binding, srv)
"""
try:
srvs = []
for t in self[entity_id][typ]:
try:
srvs.extend(t[service])
except KeyError:
pass
except KeyError:
return None
if not srvs:
return srvs
if binding:
res = []
for srv in srvs:
if srv["binding"] == binding:
res.append(srv)
else:
res = {}
for srv in srvs:
try:
res[srv["binding"]].append(srv)
except KeyError:
res[srv["binding"]] = [srv]
logger.debug("service => %s", res)
return res
|
[
"def",
"service",
"(",
"self",
",",
"entity_id",
",",
"typ",
",",
"service",
",",
"binding",
"=",
"None",
")",
":",
"try",
":",
"srvs",
"=",
"[",
"]",
"for",
"t",
"in",
"self",
"[",
"entity_id",
"]",
"[",
"typ",
"]",
":",
"try",
":",
"srvs",
".",
"extend",
"(",
"t",
"[",
"service",
"]",
")",
"except",
"KeyError",
":",
"pass",
"except",
"KeyError",
":",
"return",
"None",
"if",
"not",
"srvs",
":",
"return",
"srvs",
"if",
"binding",
":",
"res",
"=",
"[",
"]",
"for",
"srv",
"in",
"srvs",
":",
"if",
"srv",
"[",
"\"binding\"",
"]",
"==",
"binding",
":",
"res",
".",
"append",
"(",
"srv",
")",
"else",
":",
"res",
"=",
"{",
"}",
"for",
"srv",
"in",
"srvs",
":",
"try",
":",
"res",
"[",
"srv",
"[",
"\"binding\"",
"]",
"]",
".",
"append",
"(",
"srv",
")",
"except",
"KeyError",
":",
"res",
"[",
"srv",
"[",
"\"binding\"",
"]",
"]",
"=",
"[",
"srv",
"]",
"logger",
".",
"debug",
"(",
"\"service => %s\"",
",",
"res",
")",
"return",
"res"
] |
Get me all services with a specified
entity ID and type, that supports the specified version of binding.
:param entity_id: The EntityId
:param typ: Type of service (idp, attribute_authority, ...)
:param service: which service that is sought for
:param binding: A binding identifier
:return: list of service descriptions.
Or if no binding was specified a list of 2-tuples (binding, srv)
|
[
"Get",
"me",
"all",
"services",
"with",
"a",
"specified",
"entity",
"ID",
"and",
"type",
"that",
"supports",
"the",
"specified",
"version",
"of",
"binding",
"."
] |
python
|
train
|
alefnula/tea
|
tea/shell/__init__.py
|
https://github.com/alefnula/tea/blob/f5a0a724a425ec4f9dd2c7fe966ef06faf3a15a3/tea/shell/__init__.py#L260-L273
|
def gcopy(pattern, destination):
"""Copy all file found by glob.glob(pattern) to destination directory.
Args:
pattern (str): Glob pattern
destination (str): Path to the destination directory.
Returns:
bool: True if the operation is successful, False otherwise.
"""
for item in glob.glob(pattern):
if not copy(item, destination):
return False
return True
|
[
"def",
"gcopy",
"(",
"pattern",
",",
"destination",
")",
":",
"for",
"item",
"in",
"glob",
".",
"glob",
"(",
"pattern",
")",
":",
"if",
"not",
"copy",
"(",
"item",
",",
"destination",
")",
":",
"return",
"False",
"return",
"True"
] |
Copy all file found by glob.glob(pattern) to destination directory.
Args:
pattern (str): Glob pattern
destination (str): Path to the destination directory.
Returns:
bool: True if the operation is successful, False otherwise.
|
[
"Copy",
"all",
"file",
"found",
"by",
"glob",
".",
"glob",
"(",
"pattern",
")",
"to",
"destination",
"directory",
"."
] |
python
|
train
|
Koed00/django-q
|
django_q/tasks.py
|
https://github.com/Koed00/django-q/blob/c84fd11a67c9a47d821786dfcdc189bb258c6f54/django_q/tasks.py#L561-L568
|
def current(self):
"""
get the index of the currently executing chain element
:return int: current chain index
"""
if not self.started:
return None
return count_group(self.group, cached=self.cached)
|
[
"def",
"current",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"started",
":",
"return",
"None",
"return",
"count_group",
"(",
"self",
".",
"group",
",",
"cached",
"=",
"self",
".",
"cached",
")"
] |
get the index of the currently executing chain element
:return int: current chain index
|
[
"get",
"the",
"index",
"of",
"the",
"currently",
"executing",
"chain",
"element",
":",
"return",
"int",
":",
"current",
"chain",
"index"
] |
python
|
train
|
dropbox/stone
|
stone/ir/data_types.py
|
https://github.com/dropbox/stone/blob/2e95cbcd1c48e05cca68c919fd8d24adec6b0f58/stone/ir/data_types.py#L1370-L1384
|
def set_attributes(self, doc, fields, # pylint: disable=arguments-differ
parent_type=None, catch_all_field=None):
"""
:param UnionField catch_all_field: The field designated as the
catch-all. This field should be a member of the list of fields.
See :meth:`Composite.set_attributes` for parameter definitions.
"""
if parent_type:
assert isinstance(parent_type, Union)
super(Union, self).set_attributes(doc, fields, parent_type)
self.catch_all_field = catch_all_field
self.parent_type = parent_type
|
[
"def",
"set_attributes",
"(",
"self",
",",
"doc",
",",
"fields",
",",
"# pylint: disable=arguments-differ",
"parent_type",
"=",
"None",
",",
"catch_all_field",
"=",
"None",
")",
":",
"if",
"parent_type",
":",
"assert",
"isinstance",
"(",
"parent_type",
",",
"Union",
")",
"super",
"(",
"Union",
",",
"self",
")",
".",
"set_attributes",
"(",
"doc",
",",
"fields",
",",
"parent_type",
")",
"self",
".",
"catch_all_field",
"=",
"catch_all_field",
"self",
".",
"parent_type",
"=",
"parent_type"
] |
:param UnionField catch_all_field: The field designated as the
catch-all. This field should be a member of the list of fields.
See :meth:`Composite.set_attributes` for parameter definitions.
|
[
":",
"param",
"UnionField",
"catch_all_field",
":",
"The",
"field",
"designated",
"as",
"the",
"catch",
"-",
"all",
".",
"This",
"field",
"should",
"be",
"a",
"member",
"of",
"the",
"list",
"of",
"fields",
"."
] |
python
|
train
|
BoboTiG/python-mss
|
mss/linux.py
|
https://github.com/BoboTiG/python-mss/blob/56347f781edb38a0e7a5104080bd683f49c6f074/mss/linux.py#L399-L443
|
def grab(self, monitor):
# type: (Monitor) -> ScreenShot
""" Retrieve all pixels from a monitor. Pixels have to be RGB. """
# Convert PIL bbox style
if isinstance(monitor, tuple):
monitor = {
"left": monitor[0],
"top": monitor[1],
"width": monitor[2] - monitor[0],
"height": monitor[3] - monitor[1],
}
ximage = self.xlib.XGetImage(
MSS.display,
self.drawable,
monitor["left"],
monitor["top"],
monitor["width"],
monitor["height"],
PLAINMASK,
ZPIXMAP,
)
try:
bits_per_pixel = ximage.contents.bits_per_pixel
if bits_per_pixel != 32:
raise ScreenShotError(
"[XImage] bits per pixel value not (yet?) implemented: {}.".format(
bits_per_pixel
)
)
raw_data = ctypes.cast(
ximage.contents.data,
ctypes.POINTER(
ctypes.c_ubyte * monitor["height"] * monitor["width"] * 4
),
)
data = bytearray(raw_data.contents)
finally:
# Free
self.xlib.XDestroyImage(ximage)
return self.cls_image(data, monitor)
|
[
"def",
"grab",
"(",
"self",
",",
"monitor",
")",
":",
"# type: (Monitor) -> ScreenShot",
"# Convert PIL bbox style",
"if",
"isinstance",
"(",
"monitor",
",",
"tuple",
")",
":",
"monitor",
"=",
"{",
"\"left\"",
":",
"monitor",
"[",
"0",
"]",
",",
"\"top\"",
":",
"monitor",
"[",
"1",
"]",
",",
"\"width\"",
":",
"monitor",
"[",
"2",
"]",
"-",
"monitor",
"[",
"0",
"]",
",",
"\"height\"",
":",
"monitor",
"[",
"3",
"]",
"-",
"monitor",
"[",
"1",
"]",
",",
"}",
"ximage",
"=",
"self",
".",
"xlib",
".",
"XGetImage",
"(",
"MSS",
".",
"display",
",",
"self",
".",
"drawable",
",",
"monitor",
"[",
"\"left\"",
"]",
",",
"monitor",
"[",
"\"top\"",
"]",
",",
"monitor",
"[",
"\"width\"",
"]",
",",
"monitor",
"[",
"\"height\"",
"]",
",",
"PLAINMASK",
",",
"ZPIXMAP",
",",
")",
"try",
":",
"bits_per_pixel",
"=",
"ximage",
".",
"contents",
".",
"bits_per_pixel",
"if",
"bits_per_pixel",
"!=",
"32",
":",
"raise",
"ScreenShotError",
"(",
"\"[XImage] bits per pixel value not (yet?) implemented: {}.\"",
".",
"format",
"(",
"bits_per_pixel",
")",
")",
"raw_data",
"=",
"ctypes",
".",
"cast",
"(",
"ximage",
".",
"contents",
".",
"data",
",",
"ctypes",
".",
"POINTER",
"(",
"ctypes",
".",
"c_ubyte",
"*",
"monitor",
"[",
"\"height\"",
"]",
"*",
"monitor",
"[",
"\"width\"",
"]",
"*",
"4",
")",
",",
")",
"data",
"=",
"bytearray",
"(",
"raw_data",
".",
"contents",
")",
"finally",
":",
"# Free",
"self",
".",
"xlib",
".",
"XDestroyImage",
"(",
"ximage",
")",
"return",
"self",
".",
"cls_image",
"(",
"data",
",",
"monitor",
")"
] |
Retrieve all pixels from a monitor. Pixels have to be RGB.
|
[
"Retrieve",
"all",
"pixels",
"from",
"a",
"monitor",
".",
"Pixels",
"have",
"to",
"be",
"RGB",
"."
] |
python
|
train
|
andymccurdy/redis-py
|
redis/client.py
|
https://github.com/andymccurdy/redis-py/blob/cdfe2befbe00db4a3c48c9ddd6d64dea15f6f0db/redis/client.py#L2597-L2603
|
def zunionstore(self, dest, keys, aggregate=None):
"""
Union multiple sorted sets specified by ``keys`` into
a new sorted set, ``dest``. Scores in the destination will be
aggregated based on the ``aggregate``, or SUM if none is provided.
"""
return self._zaggregate('ZUNIONSTORE', dest, keys, aggregate)
|
[
"def",
"zunionstore",
"(",
"self",
",",
"dest",
",",
"keys",
",",
"aggregate",
"=",
"None",
")",
":",
"return",
"self",
".",
"_zaggregate",
"(",
"'ZUNIONSTORE'",
",",
"dest",
",",
"keys",
",",
"aggregate",
")"
] |
Union multiple sorted sets specified by ``keys`` into
a new sorted set, ``dest``. Scores in the destination will be
aggregated based on the ``aggregate``, or SUM if none is provided.
|
[
"Union",
"multiple",
"sorted",
"sets",
"specified",
"by",
"keys",
"into",
"a",
"new",
"sorted",
"set",
"dest",
".",
"Scores",
"in",
"the",
"destination",
"will",
"be",
"aggregated",
"based",
"on",
"the",
"aggregate",
"or",
"SUM",
"if",
"none",
"is",
"provided",
"."
] |
python
|
train
|
sanand0/xmljson
|
xmljson/__init__.py
|
https://github.com/sanand0/xmljson/blob/2ecc2065fe7c87b3d282d362289927f13ce7f8b0/xmljson/__init__.py#L149-L173
|
def data(self, root):
'''Convert etree.Element into a dictionary'''
value = self.dict()
children = [node for node in root if isinstance(node.tag, basestring)]
for attr, attrval in root.attrib.items():
attr = attr if self.attr_prefix is None else self.attr_prefix + attr
value[attr] = self._fromstring(attrval)
if root.text and self.text_content is not None:
text = root.text.strip()
if text:
if self.simple_text and len(children) == len(root.attrib) == 0:
value = self._fromstring(text)
else:
value[self.text_content] = self._fromstring(text)
count = Counter(child.tag for child in children)
for child in children:
if count[child.tag] == 1:
value.update(self.data(child))
else:
result = value.setdefault(child.tag, self.list())
result += self.data(child).values()
# if simple_text, elements with no children nor attrs become '', not {}
if isinstance(value, dict) and not value and self.simple_text:
value = ''
return self.dict([(root.tag, value)])
|
[
"def",
"data",
"(",
"self",
",",
"root",
")",
":",
"value",
"=",
"self",
".",
"dict",
"(",
")",
"children",
"=",
"[",
"node",
"for",
"node",
"in",
"root",
"if",
"isinstance",
"(",
"node",
".",
"tag",
",",
"basestring",
")",
"]",
"for",
"attr",
",",
"attrval",
"in",
"root",
".",
"attrib",
".",
"items",
"(",
")",
":",
"attr",
"=",
"attr",
"if",
"self",
".",
"attr_prefix",
"is",
"None",
"else",
"self",
".",
"attr_prefix",
"+",
"attr",
"value",
"[",
"attr",
"]",
"=",
"self",
".",
"_fromstring",
"(",
"attrval",
")",
"if",
"root",
".",
"text",
"and",
"self",
".",
"text_content",
"is",
"not",
"None",
":",
"text",
"=",
"root",
".",
"text",
".",
"strip",
"(",
")",
"if",
"text",
":",
"if",
"self",
".",
"simple_text",
"and",
"len",
"(",
"children",
")",
"==",
"len",
"(",
"root",
".",
"attrib",
")",
"==",
"0",
":",
"value",
"=",
"self",
".",
"_fromstring",
"(",
"text",
")",
"else",
":",
"value",
"[",
"self",
".",
"text_content",
"]",
"=",
"self",
".",
"_fromstring",
"(",
"text",
")",
"count",
"=",
"Counter",
"(",
"child",
".",
"tag",
"for",
"child",
"in",
"children",
")",
"for",
"child",
"in",
"children",
":",
"if",
"count",
"[",
"child",
".",
"tag",
"]",
"==",
"1",
":",
"value",
".",
"update",
"(",
"self",
".",
"data",
"(",
"child",
")",
")",
"else",
":",
"result",
"=",
"value",
".",
"setdefault",
"(",
"child",
".",
"tag",
",",
"self",
".",
"list",
"(",
")",
")",
"result",
"+=",
"self",
".",
"data",
"(",
"child",
")",
".",
"values",
"(",
")",
"# if simple_text, elements with no children nor attrs become '', not {}",
"if",
"isinstance",
"(",
"value",
",",
"dict",
")",
"and",
"not",
"value",
"and",
"self",
".",
"simple_text",
":",
"value",
"=",
"''",
"return",
"self",
".",
"dict",
"(",
"[",
"(",
"root",
".",
"tag",
",",
"value",
")",
"]",
")"
] |
Convert etree.Element into a dictionary
|
[
"Convert",
"etree",
".",
"Element",
"into",
"a",
"dictionary"
] |
python
|
train
|
gamechanger/schemer
|
schemer/validators.py
|
https://github.com/gamechanger/schemer/blob/1d1dd7da433d3b84ce5a80ded5a84ab4a65825ee/schemer/validators.py#L24-L32
|
def gte(min_value):
"""
Validates that a field value is greater than or equal to the
value given to this validator.
"""
def validate(value):
if value < min_value:
return e("{} is not greater than or equal to {}", value, min_value)
return validate
|
[
"def",
"gte",
"(",
"min_value",
")",
":",
"def",
"validate",
"(",
"value",
")",
":",
"if",
"value",
"<",
"min_value",
":",
"return",
"e",
"(",
"\"{} is not greater than or equal to {}\"",
",",
"value",
",",
"min_value",
")",
"return",
"validate"
] |
Validates that a field value is greater than or equal to the
value given to this validator.
|
[
"Validates",
"that",
"a",
"field",
"value",
"is",
"greater",
"than",
"or",
"equal",
"to",
"the",
"value",
"given",
"to",
"this",
"validator",
"."
] |
python
|
train
|
aouyar/PyMunin
|
pysysinfo/postgresql.py
|
https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pysysinfo/postgresql.py#L114-L124
|
def _simpleQuery(self, query):
"""Executes simple query which returns a single column.
@param query: Query string.
@return: Query result string.
"""
cur = self._conn.cursor()
cur.execute(query)
row = cur.fetchone()
return util.parse_value(row[0])
|
[
"def",
"_simpleQuery",
"(",
"self",
",",
"query",
")",
":",
"cur",
"=",
"self",
".",
"_conn",
".",
"cursor",
"(",
")",
"cur",
".",
"execute",
"(",
"query",
")",
"row",
"=",
"cur",
".",
"fetchone",
"(",
")",
"return",
"util",
".",
"parse_value",
"(",
"row",
"[",
"0",
"]",
")"
] |
Executes simple query which returns a single column.
@param query: Query string.
@return: Query result string.
|
[
"Executes",
"simple",
"query",
"which",
"returns",
"a",
"single",
"column",
"."
] |
python
|
train
|
mardix/pylot
|
pylot/utils.py
|
https://github.com/mardix/pylot/blob/506a33a56ebdfc0925b94015e8cf98ccb16a143c/pylot/utils.py#L20-L24
|
def get_base_dir():
"""
Return the base directory
"""
return os.path.split(os.path.abspath(os.path.dirname(__file__)))[0]
|
[
"def",
"get_base_dir",
"(",
")",
":",
"return",
"os",
".",
"path",
".",
"split",
"(",
"os",
".",
"path",
".",
"abspath",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"__file__",
")",
")",
")",
"[",
"0",
"]"
] |
Return the base directory
|
[
"Return",
"the",
"base",
"directory"
] |
python
|
train
|
rbit/pydtls
|
dtls/sslconnection.py
|
https://github.com/rbit/pydtls/blob/41a71fccd990347d0de5f42418fea1e4e733359c/dtls/sslconnection.py#L778-L811
|
def shutdown(self):
"""Shut down the DTLS connection
This method attemps to complete a bidirectional shutdown between
peers. For non-blocking sockets, it should be called repeatedly until
it no longer raises continuation request exceptions.
"""
if hasattr(self, "_listening"):
# Listening server-side sockets cannot be shut down
return
try:
self._wrap_socket_library_call(
lambda: SSL_shutdown(self._ssl.value), ERR_READ_TIMEOUT)
except openssl_error() as err:
if err.result == 0:
# close-notify alert was just sent; wait for same from peer
# Note: while it might seem wise to suppress further read-aheads
# with SSL_set_read_ahead here, doing so causes a shutdown
# failure (ret: -1, SSL_ERROR_SYSCALL) on the DTLS shutdown
# initiator side. And test_starttls does pass.
self._wrap_socket_library_call(
lambda: SSL_shutdown(self._ssl.value), ERR_READ_TIMEOUT)
else:
raise
if hasattr(self, "_rsock"):
# Return wrapped connected server socket (non-listening)
return _UnwrappedSocket(self._sock, self._rsock, self._udp_demux,
self._ctx,
BIO_dgram_get_peer(self._wbio.value))
# Return unwrapped client-side socket or unwrapped server-side socket
# for single-socket servers
return self._sock
|
[
"def",
"shutdown",
"(",
"self",
")",
":",
"if",
"hasattr",
"(",
"self",
",",
"\"_listening\"",
")",
":",
"# Listening server-side sockets cannot be shut down",
"return",
"try",
":",
"self",
".",
"_wrap_socket_library_call",
"(",
"lambda",
":",
"SSL_shutdown",
"(",
"self",
".",
"_ssl",
".",
"value",
")",
",",
"ERR_READ_TIMEOUT",
")",
"except",
"openssl_error",
"(",
")",
"as",
"err",
":",
"if",
"err",
".",
"result",
"==",
"0",
":",
"# close-notify alert was just sent; wait for same from peer",
"# Note: while it might seem wise to suppress further read-aheads",
"# with SSL_set_read_ahead here, doing so causes a shutdown",
"# failure (ret: -1, SSL_ERROR_SYSCALL) on the DTLS shutdown",
"# initiator side. And test_starttls does pass.",
"self",
".",
"_wrap_socket_library_call",
"(",
"lambda",
":",
"SSL_shutdown",
"(",
"self",
".",
"_ssl",
".",
"value",
")",
",",
"ERR_READ_TIMEOUT",
")",
"else",
":",
"raise",
"if",
"hasattr",
"(",
"self",
",",
"\"_rsock\"",
")",
":",
"# Return wrapped connected server socket (non-listening)",
"return",
"_UnwrappedSocket",
"(",
"self",
".",
"_sock",
",",
"self",
".",
"_rsock",
",",
"self",
".",
"_udp_demux",
",",
"self",
".",
"_ctx",
",",
"BIO_dgram_get_peer",
"(",
"self",
".",
"_wbio",
".",
"value",
")",
")",
"# Return unwrapped client-side socket or unwrapped server-side socket",
"# for single-socket servers",
"return",
"self",
".",
"_sock"
] |
Shut down the DTLS connection
This method attemps to complete a bidirectional shutdown between
peers. For non-blocking sockets, it should be called repeatedly until
it no longer raises continuation request exceptions.
|
[
"Shut",
"down",
"the",
"DTLS",
"connection"
] |
python
|
train
|
brechtm/rinohtype
|
src/rinoh/text.py
|
https://github.com/brechtm/rinohtype/blob/40a63c4e5ad7550f62b6860f1812cb67cafb9dc7/src/rinoh/text.py#L262-L273
|
def y_offset(self, container):
"""Vertical baseline offset (up is positive)."""
offset = (self.parent.y_offset(container)\
if hasattr(self.parent, 'y_offset') else 0)
if self.is_script(container):
style = self._style(container)
offset += (self.parent.height(container) *
self.position[style.position])
# The Y offset should only change once for the nesting level
# where the position style is set, hence we don't recursively
# get the position style using self.get_style('position')
return offset
|
[
"def",
"y_offset",
"(",
"self",
",",
"container",
")",
":",
"offset",
"=",
"(",
"self",
".",
"parent",
".",
"y_offset",
"(",
"container",
")",
"if",
"hasattr",
"(",
"self",
".",
"parent",
",",
"'y_offset'",
")",
"else",
"0",
")",
"if",
"self",
".",
"is_script",
"(",
"container",
")",
":",
"style",
"=",
"self",
".",
"_style",
"(",
"container",
")",
"offset",
"+=",
"(",
"self",
".",
"parent",
".",
"height",
"(",
"container",
")",
"*",
"self",
".",
"position",
"[",
"style",
".",
"position",
"]",
")",
"# The Y offset should only change once for the nesting level",
"# where the position style is set, hence we don't recursively",
"# get the position style using self.get_style('position')",
"return",
"offset"
] |
Vertical baseline offset (up is positive).
|
[
"Vertical",
"baseline",
"offset",
"(",
"up",
"is",
"positive",
")",
"."
] |
python
|
train
|
rochacbruno/dynaconf
|
dynaconf/contrib/flask_dynaconf.py
|
https://github.com/rochacbruno/dynaconf/blob/5a7cc8f8252251cbdf4f4112965801f9dfe2831d/dynaconf/contrib/flask_dynaconf.py#L102-L107
|
def init_app(self, app, **kwargs):
"""kwargs holds initial dynaconf configuration"""
self.kwargs.update(kwargs)
self.settings = self.dynaconf_instance or LazySettings(**self.kwargs)
app.config = self.make_config(app)
app.dynaconf = self.settings
|
[
"def",
"init_app",
"(",
"self",
",",
"app",
",",
"*",
"*",
"kwargs",
")",
":",
"self",
".",
"kwargs",
".",
"update",
"(",
"kwargs",
")",
"self",
".",
"settings",
"=",
"self",
".",
"dynaconf_instance",
"or",
"LazySettings",
"(",
"*",
"*",
"self",
".",
"kwargs",
")",
"app",
".",
"config",
"=",
"self",
".",
"make_config",
"(",
"app",
")",
"app",
".",
"dynaconf",
"=",
"self",
".",
"settings"
] |
kwargs holds initial dynaconf configuration
|
[
"kwargs",
"holds",
"initial",
"dynaconf",
"configuration"
] |
python
|
train
|
tomplus/kubernetes_asyncio
|
kubernetes_asyncio/client/api/version_api.py
|
https://github.com/tomplus/kubernetes_asyncio/blob/f9ab15317ec921409714c7afef11aeb0f579985d/kubernetes_asyncio/client/api/version_api.py#L35-L54
|
def get_code(self, **kwargs): # noqa: E501
"""get_code # noqa: E501
get the code version # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_code(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: VersionInfo
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_code_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_code_with_http_info(**kwargs) # noqa: E501
return data
|
[
"def",
"get_code",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"# noqa: E501",
"kwargs",
"[",
"'_return_http_data_only'",
"]",
"=",
"True",
"if",
"kwargs",
".",
"get",
"(",
"'async_req'",
")",
":",
"return",
"self",
".",
"get_code_with_http_info",
"(",
"*",
"*",
"kwargs",
")",
"# noqa: E501",
"else",
":",
"(",
"data",
")",
"=",
"self",
".",
"get_code_with_http_info",
"(",
"*",
"*",
"kwargs",
")",
"# noqa: E501",
"return",
"data"
] |
get_code # noqa: E501
get the code version # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_code(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: VersionInfo
If the method is called asynchronously,
returns the request thread.
|
[
"get_code",
"#",
"noqa",
":",
"E501"
] |
python
|
train
|
jmbeach/KEP.py
|
src/keppy/channel.py
|
https://github.com/jmbeach/KEP.py/blob/68cda64ab649640a486534867c81274c41e39446/src/keppy/channel.py#L19-L24
|
def parse_devices(self):
"""Creates an array of Device objects from the channel"""
devices = []
for device in self._channel_dict["devices"]:
devices.append(Device(device, self._is_sixteen_bit, self._ignore_list))
return devices
|
[
"def",
"parse_devices",
"(",
"self",
")",
":",
"devices",
"=",
"[",
"]",
"for",
"device",
"in",
"self",
".",
"_channel_dict",
"[",
"\"devices\"",
"]",
":",
"devices",
".",
"append",
"(",
"Device",
"(",
"device",
",",
"self",
".",
"_is_sixteen_bit",
",",
"self",
".",
"_ignore_list",
")",
")",
"return",
"devices"
] |
Creates an array of Device objects from the channel
|
[
"Creates",
"an",
"array",
"of",
"Device",
"objects",
"from",
"the",
"channel"
] |
python
|
train
|
carpedm20/fbchat
|
fbchat/_client.py
|
https://github.com/carpedm20/fbchat/blob/f480d68b5773473e6daba7f66075ee30e8d737a8/fbchat/_client.py#L1343-L1359
|
def send(self, message, thread_id=None, thread_type=ThreadType.USER):
"""
Sends a message to a thread
:param message: Message to send
:param thread_id: User/Group ID to send to. See :ref:`intro_threads`
:param thread_type: See :ref:`intro_threads`
:type message: models.Message
:type thread_type: models.ThreadType
:return: :ref:`Message ID <intro_message_ids>` of the sent message
:raises: FBchatException if request failed
"""
thread_id, thread_type = self._getThread(thread_id, thread_type)
data = self._getSendData(
message=message, thread_id=thread_id, thread_type=thread_type
)
return self._doSendRequest(data)
|
[
"def",
"send",
"(",
"self",
",",
"message",
",",
"thread_id",
"=",
"None",
",",
"thread_type",
"=",
"ThreadType",
".",
"USER",
")",
":",
"thread_id",
",",
"thread_type",
"=",
"self",
".",
"_getThread",
"(",
"thread_id",
",",
"thread_type",
")",
"data",
"=",
"self",
".",
"_getSendData",
"(",
"message",
"=",
"message",
",",
"thread_id",
"=",
"thread_id",
",",
"thread_type",
"=",
"thread_type",
")",
"return",
"self",
".",
"_doSendRequest",
"(",
"data",
")"
] |
Sends a message to a thread
:param message: Message to send
:param thread_id: User/Group ID to send to. See :ref:`intro_threads`
:param thread_type: See :ref:`intro_threads`
:type message: models.Message
:type thread_type: models.ThreadType
:return: :ref:`Message ID <intro_message_ids>` of the sent message
:raises: FBchatException if request failed
|
[
"Sends",
"a",
"message",
"to",
"a",
"thread"
] |
python
|
train
|
AkihikoITOH/capybara
|
capybara/virtualenv/lib/python2.7/site-packages/flask/app.py
|
https://github.com/AkihikoITOH/capybara/blob/e86c2173ea386654f4ae061148e8fbe3f25e715c/capybara/virtualenv/lib/python2.7/site-packages/flask/app.py#L1463-L1481
|
def full_dispatch_request(self):
"""Dispatches the request and on top of that performs request
pre and postprocessing as well as HTTP exception catching and
error handling.
.. versionadded:: 0.7
"""
self.try_trigger_before_first_request_functions()
try:
request_started.send(self)
rv = self.preprocess_request()
if rv is None:
rv = self.dispatch_request()
except Exception as e:
rv = self.handle_user_exception(e)
response = self.make_response(rv)
response = self.process_response(response)
request_finished.send(self, response=response)
return response
|
[
"def",
"full_dispatch_request",
"(",
"self",
")",
":",
"self",
".",
"try_trigger_before_first_request_functions",
"(",
")",
"try",
":",
"request_started",
".",
"send",
"(",
"self",
")",
"rv",
"=",
"self",
".",
"preprocess_request",
"(",
")",
"if",
"rv",
"is",
"None",
":",
"rv",
"=",
"self",
".",
"dispatch_request",
"(",
")",
"except",
"Exception",
"as",
"e",
":",
"rv",
"=",
"self",
".",
"handle_user_exception",
"(",
"e",
")",
"response",
"=",
"self",
".",
"make_response",
"(",
"rv",
")",
"response",
"=",
"self",
".",
"process_response",
"(",
"response",
")",
"request_finished",
".",
"send",
"(",
"self",
",",
"response",
"=",
"response",
")",
"return",
"response"
] |
Dispatches the request and on top of that performs request
pre and postprocessing as well as HTTP exception catching and
error handling.
.. versionadded:: 0.7
|
[
"Dispatches",
"the",
"request",
"and",
"on",
"top",
"of",
"that",
"performs",
"request",
"pre",
"and",
"postprocessing",
"as",
"well",
"as",
"HTTP",
"exception",
"catching",
"and",
"error",
"handling",
"."
] |
python
|
test
|
rosenbrockc/fortpy
|
fortpy/stats/calltree.py
|
https://github.com/rosenbrockc/fortpy/blob/1ed0757c52d549e41d9d44bdea68cb89529293a5/fortpy/stats/calltree.py#L49-L64
|
def _call_fan(branch, calls, executable):
"""Appends a list of callees to the branch for each parent
in the call list that calls this executable.
"""
#Since we don't keep track of the specific logic in the executables
#it is possible that we could get a infinite recursion of executables
#that keep calling each other.
if executable in branch:
return
branch.append(executable)
if executable.name in calls:
for caller in calls[executable.name]:
twig = []
_call_fan(twig, calls, caller)
branch
|
[
"def",
"_call_fan",
"(",
"branch",
",",
"calls",
",",
"executable",
")",
":",
"#Since we don't keep track of the specific logic in the executables",
"#it is possible that we could get a infinite recursion of executables",
"#that keep calling each other.",
"if",
"executable",
"in",
"branch",
":",
"return",
"branch",
".",
"append",
"(",
"executable",
")",
"if",
"executable",
".",
"name",
"in",
"calls",
":",
"for",
"caller",
"in",
"calls",
"[",
"executable",
".",
"name",
"]",
":",
"twig",
"=",
"[",
"]",
"_call_fan",
"(",
"twig",
",",
"calls",
",",
"caller",
")",
"branch"
] |
Appends a list of callees to the branch for each parent
in the call list that calls this executable.
|
[
"Appends",
"a",
"list",
"of",
"callees",
"to",
"the",
"branch",
"for",
"each",
"parent",
"in",
"the",
"call",
"list",
"that",
"calls",
"this",
"executable",
"."
] |
python
|
train
|
bunq/sdk_python
|
bunq/sdk/model/generated/object_.py
|
https://github.com/bunq/sdk_python/blob/da6c9b83e6d83ee8062617f53c6eb7293c0d863d/bunq/sdk/model/generated/object_.py#L3552-L3642
|
def get_referenced_object(self):
"""
:rtype: core.BunqModel
:raise: BunqException
"""
if self._BunqMeTab is not None:
return self._BunqMeTab
if self._BunqMeTabResultResponse is not None:
return self._BunqMeTabResultResponse
if self._BunqMeFundraiserResult is not None:
return self._BunqMeFundraiserResult
if self._Card is not None:
return self._Card
if self._CardDebit is not None:
return self._CardDebit
if self._DraftPayment is not None:
return self._DraftPayment
if self._FeatureAnnouncement is not None:
return self._FeatureAnnouncement
if self._IdealMerchantTransaction is not None:
return self._IdealMerchantTransaction
if self._Invoice is not None:
return self._Invoice
if self._ScheduledPayment is not None:
return self._ScheduledPayment
if self._ScheduledPaymentBatch is not None:
return self._ScheduledPaymentBatch
if self._ScheduledInstance is not None:
return self._ScheduledInstance
if self._MasterCardAction is not None:
return self._MasterCardAction
if self._BankSwitchServiceNetherlandsIncomingPayment is not None:
return self._BankSwitchServiceNetherlandsIncomingPayment
if self._Payment is not None:
return self._Payment
if self._PaymentBatch is not None:
return self._PaymentBatch
if self._RequestInquiryBatch is not None:
return self._RequestInquiryBatch
if self._RequestInquiry is not None:
return self._RequestInquiry
if self._RequestResponse is not None:
return self._RequestResponse
if self._RewardRecipient is not None:
return self._RewardRecipient
if self._RewardSender is not None:
return self._RewardSender
if self._ShareInviteBankInquiryBatch is not None:
return self._ShareInviteBankInquiryBatch
if self._ShareInviteBankInquiry is not None:
return self._ShareInviteBankInquiry
if self._ShareInviteBankResponse is not None:
return self._ShareInviteBankResponse
if self._SofortMerchantTransaction is not None:
return self._SofortMerchantTransaction
if self._TabResultInquiry is not None:
return self._TabResultInquiry
if self._TabResultResponse is not None:
return self._TabResultResponse
if self._TransferwiseTransfer is not None:
return self._TransferwiseTransfer
raise exception.BunqException(self._ERROR_NULL_FIELDS)
|
[
"def",
"get_referenced_object",
"(",
"self",
")",
":",
"if",
"self",
".",
"_BunqMeTab",
"is",
"not",
"None",
":",
"return",
"self",
".",
"_BunqMeTab",
"if",
"self",
".",
"_BunqMeTabResultResponse",
"is",
"not",
"None",
":",
"return",
"self",
".",
"_BunqMeTabResultResponse",
"if",
"self",
".",
"_BunqMeFundraiserResult",
"is",
"not",
"None",
":",
"return",
"self",
".",
"_BunqMeFundraiserResult",
"if",
"self",
".",
"_Card",
"is",
"not",
"None",
":",
"return",
"self",
".",
"_Card",
"if",
"self",
".",
"_CardDebit",
"is",
"not",
"None",
":",
"return",
"self",
".",
"_CardDebit",
"if",
"self",
".",
"_DraftPayment",
"is",
"not",
"None",
":",
"return",
"self",
".",
"_DraftPayment",
"if",
"self",
".",
"_FeatureAnnouncement",
"is",
"not",
"None",
":",
"return",
"self",
".",
"_FeatureAnnouncement",
"if",
"self",
".",
"_IdealMerchantTransaction",
"is",
"not",
"None",
":",
"return",
"self",
".",
"_IdealMerchantTransaction",
"if",
"self",
".",
"_Invoice",
"is",
"not",
"None",
":",
"return",
"self",
".",
"_Invoice",
"if",
"self",
".",
"_ScheduledPayment",
"is",
"not",
"None",
":",
"return",
"self",
".",
"_ScheduledPayment",
"if",
"self",
".",
"_ScheduledPaymentBatch",
"is",
"not",
"None",
":",
"return",
"self",
".",
"_ScheduledPaymentBatch",
"if",
"self",
".",
"_ScheduledInstance",
"is",
"not",
"None",
":",
"return",
"self",
".",
"_ScheduledInstance",
"if",
"self",
".",
"_MasterCardAction",
"is",
"not",
"None",
":",
"return",
"self",
".",
"_MasterCardAction",
"if",
"self",
".",
"_BankSwitchServiceNetherlandsIncomingPayment",
"is",
"not",
"None",
":",
"return",
"self",
".",
"_BankSwitchServiceNetherlandsIncomingPayment",
"if",
"self",
".",
"_Payment",
"is",
"not",
"None",
":",
"return",
"self",
".",
"_Payment",
"if",
"self",
".",
"_PaymentBatch",
"is",
"not",
"None",
":",
"return",
"self",
".",
"_PaymentBatch",
"if",
"self",
".",
"_RequestInquiryBatch",
"is",
"not",
"None",
":",
"return",
"self",
".",
"_RequestInquiryBatch",
"if",
"self",
".",
"_RequestInquiry",
"is",
"not",
"None",
":",
"return",
"self",
".",
"_RequestInquiry",
"if",
"self",
".",
"_RequestResponse",
"is",
"not",
"None",
":",
"return",
"self",
".",
"_RequestResponse",
"if",
"self",
".",
"_RewardRecipient",
"is",
"not",
"None",
":",
"return",
"self",
".",
"_RewardRecipient",
"if",
"self",
".",
"_RewardSender",
"is",
"not",
"None",
":",
"return",
"self",
".",
"_RewardSender",
"if",
"self",
".",
"_ShareInviteBankInquiryBatch",
"is",
"not",
"None",
":",
"return",
"self",
".",
"_ShareInviteBankInquiryBatch",
"if",
"self",
".",
"_ShareInviteBankInquiry",
"is",
"not",
"None",
":",
"return",
"self",
".",
"_ShareInviteBankInquiry",
"if",
"self",
".",
"_ShareInviteBankResponse",
"is",
"not",
"None",
":",
"return",
"self",
".",
"_ShareInviteBankResponse",
"if",
"self",
".",
"_SofortMerchantTransaction",
"is",
"not",
"None",
":",
"return",
"self",
".",
"_SofortMerchantTransaction",
"if",
"self",
".",
"_TabResultInquiry",
"is",
"not",
"None",
":",
"return",
"self",
".",
"_TabResultInquiry",
"if",
"self",
".",
"_TabResultResponse",
"is",
"not",
"None",
":",
"return",
"self",
".",
"_TabResultResponse",
"if",
"self",
".",
"_TransferwiseTransfer",
"is",
"not",
"None",
":",
"return",
"self",
".",
"_TransferwiseTransfer",
"raise",
"exception",
".",
"BunqException",
"(",
"self",
".",
"_ERROR_NULL_FIELDS",
")"
] |
:rtype: core.BunqModel
:raise: BunqException
|
[
":",
"rtype",
":",
"core",
".",
"BunqModel",
":",
"raise",
":",
"BunqException"
] |
python
|
train
|
Microsoft/azure-devops-python-api
|
azure-devops/azure/devops/v5_1/gallery/gallery_client.py
|
https://github.com/Microsoft/azure-devops-python-api/blob/4777ffda2f5052fabbaddb2abe9cb434e0cf1aa8/azure-devops/azure/devops/v5_1/gallery/gallery_client.py#L671-L695
|
def get_asset_from_new_extension_draft(self, publisher_name, draft_id, asset_type, **kwargs):
"""GetAssetFromNewExtensionDraft.
[Preview API]
:param str publisher_name:
:param str draft_id:
:param str asset_type:
:rtype: object
"""
route_values = {}
if publisher_name is not None:
route_values['publisherName'] = self._serialize.url('publisher_name', publisher_name, 'str')
if draft_id is not None:
route_values['draftId'] = self._serialize.url('draft_id', draft_id, 'str')
if asset_type is not None:
route_values['assetType'] = self._serialize.url('asset_type', asset_type, 'str')
response = self._send(http_method='GET',
location_id='88c0b1c8-b4f1-498a-9b2a-8446ef9f32e7',
version='5.1-preview.1',
route_values=route_values,
accept_media_type='application/octet-stream')
if "callback" in kwargs:
callback = kwargs["callback"]
else:
callback = None
return self._client.stream_download(response, callback=callback)
|
[
"def",
"get_asset_from_new_extension_draft",
"(",
"self",
",",
"publisher_name",
",",
"draft_id",
",",
"asset_type",
",",
"*",
"*",
"kwargs",
")",
":",
"route_values",
"=",
"{",
"}",
"if",
"publisher_name",
"is",
"not",
"None",
":",
"route_values",
"[",
"'publisherName'",
"]",
"=",
"self",
".",
"_serialize",
".",
"url",
"(",
"'publisher_name'",
",",
"publisher_name",
",",
"'str'",
")",
"if",
"draft_id",
"is",
"not",
"None",
":",
"route_values",
"[",
"'draftId'",
"]",
"=",
"self",
".",
"_serialize",
".",
"url",
"(",
"'draft_id'",
",",
"draft_id",
",",
"'str'",
")",
"if",
"asset_type",
"is",
"not",
"None",
":",
"route_values",
"[",
"'assetType'",
"]",
"=",
"self",
".",
"_serialize",
".",
"url",
"(",
"'asset_type'",
",",
"asset_type",
",",
"'str'",
")",
"response",
"=",
"self",
".",
"_send",
"(",
"http_method",
"=",
"'GET'",
",",
"location_id",
"=",
"'88c0b1c8-b4f1-498a-9b2a-8446ef9f32e7'",
",",
"version",
"=",
"'5.1-preview.1'",
",",
"route_values",
"=",
"route_values",
",",
"accept_media_type",
"=",
"'application/octet-stream'",
")",
"if",
"\"callback\"",
"in",
"kwargs",
":",
"callback",
"=",
"kwargs",
"[",
"\"callback\"",
"]",
"else",
":",
"callback",
"=",
"None",
"return",
"self",
".",
"_client",
".",
"stream_download",
"(",
"response",
",",
"callback",
"=",
"callback",
")"
] |
GetAssetFromNewExtensionDraft.
[Preview API]
:param str publisher_name:
:param str draft_id:
:param str asset_type:
:rtype: object
|
[
"GetAssetFromNewExtensionDraft",
".",
"[",
"Preview",
"API",
"]",
":",
"param",
"str",
"publisher_name",
":",
":",
"param",
"str",
"draft_id",
":",
":",
"param",
"str",
"asset_type",
":",
":",
"rtype",
":",
"object"
] |
python
|
train
|
GibbsConsulting/django-plotly-dash
|
django_plotly_dash/dash_wrapper.py
|
https://github.com/GibbsConsulting/django-plotly-dash/blob/773ed081fc2ea3cc7607590322a14686a7a79bc5/django_plotly_dash/dash_wrapper.py#L187-L209
|
def form_dash_instance(self, replacements=None, ndid=None, base_pathname=None):
'Construct a Dash instance taking into account state'
if ndid is None:
ndid = self._uid
rd = WrappedDash(base_pathname=base_pathname,
expanded_callbacks=self._expanded_callbacks,
replacements=replacements,
ndid=ndid,
serve_locally=self._serve_locally)
rd.layout = self.layout
rd.config['suppress_callback_exceptions'] = self._suppress_callback_exceptions
for cb, func in self._callback_sets:
rd.callback(**cb)(func)
for s in self.css.items:
rd.css.append_css(s)
for s in self.scripts.items:
rd.scripts.append_script(s)
return rd
|
[
"def",
"form_dash_instance",
"(",
"self",
",",
"replacements",
"=",
"None",
",",
"ndid",
"=",
"None",
",",
"base_pathname",
"=",
"None",
")",
":",
"if",
"ndid",
"is",
"None",
":",
"ndid",
"=",
"self",
".",
"_uid",
"rd",
"=",
"WrappedDash",
"(",
"base_pathname",
"=",
"base_pathname",
",",
"expanded_callbacks",
"=",
"self",
".",
"_expanded_callbacks",
",",
"replacements",
"=",
"replacements",
",",
"ndid",
"=",
"ndid",
",",
"serve_locally",
"=",
"self",
".",
"_serve_locally",
")",
"rd",
".",
"layout",
"=",
"self",
".",
"layout",
"rd",
".",
"config",
"[",
"'suppress_callback_exceptions'",
"]",
"=",
"self",
".",
"_suppress_callback_exceptions",
"for",
"cb",
",",
"func",
"in",
"self",
".",
"_callback_sets",
":",
"rd",
".",
"callback",
"(",
"*",
"*",
"cb",
")",
"(",
"func",
")",
"for",
"s",
"in",
"self",
".",
"css",
".",
"items",
":",
"rd",
".",
"css",
".",
"append_css",
"(",
"s",
")",
"for",
"s",
"in",
"self",
".",
"scripts",
".",
"items",
":",
"rd",
".",
"scripts",
".",
"append_script",
"(",
"s",
")",
"return",
"rd"
] |
Construct a Dash instance taking into account state
|
[
"Construct",
"a",
"Dash",
"instance",
"taking",
"into",
"account",
"state"
] |
python
|
train
|
sernst/cauldron
|
cauldron/cli/server/routes/display.py
|
https://github.com/sernst/cauldron/blob/4086aec9c038c402ea212c79fe8bd0d27104f9cf/cauldron/cli/server/routes/display.py#L10-L28
|
def view(route: str):
"""
Retrieves the contents of the file specified by the view route if it
exists.
"""
project = cauldron.project.get_internal_project()
results_path = project.results_path if project else None
if not project or not results_path:
return '', 204
path = os.path.join(results_path, route)
if not os.path.exists(path):
return '', 204
return flask.send_file(
path,
mimetype=mimetypes.guess_type(path)[0],
cache_timeout=-1
)
|
[
"def",
"view",
"(",
"route",
":",
"str",
")",
":",
"project",
"=",
"cauldron",
".",
"project",
".",
"get_internal_project",
"(",
")",
"results_path",
"=",
"project",
".",
"results_path",
"if",
"project",
"else",
"None",
"if",
"not",
"project",
"or",
"not",
"results_path",
":",
"return",
"''",
",",
"204",
"path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"results_path",
",",
"route",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"path",
")",
":",
"return",
"''",
",",
"204",
"return",
"flask",
".",
"send_file",
"(",
"path",
",",
"mimetype",
"=",
"mimetypes",
".",
"guess_type",
"(",
"path",
")",
"[",
"0",
"]",
",",
"cache_timeout",
"=",
"-",
"1",
")"
] |
Retrieves the contents of the file specified by the view route if it
exists.
|
[
"Retrieves",
"the",
"contents",
"of",
"the",
"file",
"specified",
"by",
"the",
"view",
"route",
"if",
"it",
"exists",
"."
] |
python
|
train
|
iotile/coretools
|
iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Script/SConsOptions.py
|
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Script/SConsOptions.py#L529-L982
|
def Parser(version):
"""
Returns an options parser object initialized with the standard
SCons options.
"""
formatter = SConsIndentedHelpFormatter(max_help_position=30)
op = SConsOptionParser(option_class=SConsOption,
add_help_option=False,
formatter=formatter,
usage="usage: scons [OPTION] [TARGET] ...",)
op.preserve_unknown_options = True
op.version = version
# Add the options to the parser we just created.
#
# These are in the order we want them to show up in the -H help
# text, basically alphabetical. Each op.add_option() call below
# should have a consistent format:
#
# op.add_option("-L", "--long-option-name",
# nargs=1, type="string",
# dest="long_option_name", default='foo',
# action="callback", callback=opt_long_option,
# help="help text goes here",
# metavar="VAR")
#
# Even though the optparse module constructs reasonable default
# destination names from the long option names, we're going to be
# explicit about each one for easier readability and so this code
# will at least show up when grepping the source for option attribute
# names, or otherwise browsing the source code.
# options ignored for compatibility
def opt_ignore(option, opt, value, parser):
sys.stderr.write("Warning: ignoring %s option\n" % opt)
op.add_option("-b", "-d", "-e", "-m", "-S", "-t", "-w",
"--environment-overrides",
"--no-keep-going",
"--no-print-directory",
"--print-directory",
"--stop",
"--touch",
action="callback", callback=opt_ignore,
help="Ignored for compatibility.")
op.add_option('-c', '--clean', '--remove',
dest="clean", default=False,
action="store_true",
help="Remove specified targets and dependencies.")
op.add_option('-C', '--directory',
nargs=1, type="string",
dest="directory", default=[],
action="append",
help="Change to DIR before doing anything.",
metavar="DIR")
op.add_option('--cache-debug',
nargs=1,
dest="cache_debug", default=None,
action="store",
help="Print CacheDir debug info to FILE.",
metavar="FILE")
op.add_option('--cache-disable', '--no-cache',
dest='cache_disable', default=False,
action="store_true",
help="Do not retrieve built targets from CacheDir.")
op.add_option('--cache-force', '--cache-populate',
dest='cache_force', default=False,
action="store_true",
help="Copy already-built targets into the CacheDir.")
op.add_option('--cache-readonly',
dest='cache_readonly', default=False,
action="store_true",
help="Do not update CacheDir with built targets.")
op.add_option('--cache-show',
dest='cache_show', default=False,
action="store_true",
help="Print build actions for files from CacheDir.")
def opt_invalid(group, value, options):
errmsg = "`%s' is not a valid %s option type, try:\n" % (value, group)
return errmsg + " %s" % ", ".join(options)
config_options = ["auto", "force" ,"cache"]
opt_config_help = "Controls Configure subsystem: %s." \
% ", ".join(config_options)
op.add_option('--config',
nargs=1, choices=config_options,
dest="config", default="auto",
help = opt_config_help,
metavar="MODE")
op.add_option('-D',
dest="climb_up", default=None,
action="store_const", const=2,
help="Search up directory tree for SConstruct, "
"build all Default() targets.")
deprecated_debug_options = {
"dtree" : '; please use --tree=derived instead',
"nomemoizer" : ' and has no effect',
"stree" : '; please use --tree=all,status instead',
"tree" : '; please use --tree=all instead',
}
debug_options = ["count", "duplicate", "explain", "findlibs",
"includes", "memoizer", "memory", "objects",
"pdb", "prepare", "presub", "stacktrace",
"time"]
def opt_debug(option, opt, value__, parser,
debug_options=debug_options,
deprecated_debug_options=deprecated_debug_options):
for value in value__.split(','):
if value in debug_options:
parser.values.debug.append(value)
elif value in list(deprecated_debug_options.keys()):
parser.values.debug.append(value)
try:
parser.values.delayed_warnings
except AttributeError:
parser.values.delayed_warnings = []
msg = deprecated_debug_options[value]
w = "The --debug=%s option is deprecated%s." % (value, msg)
t = (SCons.Warnings.DeprecatedDebugOptionsWarning, w)
parser.values.delayed_warnings.append(t)
else:
raise OptionValueError(opt_invalid('debug', value, debug_options))
opt_debug_help = "Print various types of debugging information: %s." \
% ", ".join(debug_options)
op.add_option('--debug',
nargs=1, type="string",
dest="debug", default=[],
action="callback", callback=opt_debug,
help=opt_debug_help,
metavar="TYPE")
def opt_diskcheck(option, opt, value, parser):
try:
diskcheck_value = diskcheck_convert(value)
except ValueError as e:
raise OptionValueError("`%s' is not a valid diskcheck type" % e)
setattr(parser.values, option.dest, diskcheck_value)
op.add_option('--diskcheck',
nargs=1, type="string",
dest='diskcheck', default=None,
action="callback", callback=opt_diskcheck,
help="Enable specific on-disk checks.",
metavar="TYPE")
def opt_duplicate(option, opt, value, parser):
if not value in SCons.Node.FS.Valid_Duplicates:
raise OptionValueError(opt_invalid('duplication', value,
SCons.Node.FS.Valid_Duplicates))
setattr(parser.values, option.dest, value)
# Set the duplicate style right away so it can affect linking
# of SConscript files.
SCons.Node.FS.set_duplicate(value)
opt_duplicate_help = "Set the preferred duplication methods. Must be one of " \
+ ", ".join(SCons.Node.FS.Valid_Duplicates)
op.add_option('--duplicate',
nargs=1, type="string",
dest="duplicate", default='hard-soft-copy',
action="callback", callback=opt_duplicate,
help=opt_duplicate_help)
op.add_option('-f', '--file', '--makefile', '--sconstruct',
nargs=1, type="string",
dest="file", default=[],
action="append",
help="Read FILE as the top-level SConstruct file.")
op.add_option('-h', '--help',
dest="help", default=False,
action="store_true",
help="Print defined help message, or this one.")
op.add_option("-H", "--help-options",
action="help",
help="Print this message and exit.")
op.add_option('-i', '--ignore-errors',
dest='ignore_errors', default=False,
action="store_true",
help="Ignore errors from build actions.")
op.add_option('-I', '--include-dir',
nargs=1,
dest='include_dir', default=[],
action="append",
help="Search DIR for imported Python modules.",
metavar="DIR")
op.add_option('--implicit-cache',
dest='implicit_cache', default=False,
action="store_true",
help="Cache implicit dependencies")
def opt_implicit_deps(option, opt, value, parser):
setattr(parser.values, 'implicit_cache', True)
setattr(parser.values, option.dest, True)
op.add_option('--implicit-deps-changed',
dest="implicit_deps_changed", default=False,
action="callback", callback=opt_implicit_deps,
help="Ignore cached implicit dependencies.")
op.add_option('--implicit-deps-unchanged',
dest="implicit_deps_unchanged", default=False,
action="callback", callback=opt_implicit_deps,
help="Ignore changes in implicit dependencies.")
op.add_option('--interact', '--interactive',
dest='interactive', default=False,
action="store_true",
help="Run in interactive mode.")
op.add_option('-j', '--jobs',
nargs=1, type="int",
dest="num_jobs", default=1,
action="store",
help="Allow N jobs at once.",
metavar="N")
op.add_option('-k', '--keep-going',
dest='keep_going', default=False,
action="store_true",
help="Keep going when a target can't be made.")
op.add_option('--max-drift',
nargs=1, type="int",
dest='max_drift', default=SCons.Node.FS.default_max_drift,
action="store",
help="Set maximum system clock drift to N seconds.",
metavar="N")
op.add_option('--md5-chunksize',
nargs=1, type="int",
dest='md5_chunksize', default=SCons.Node.FS.File.md5_chunksize,
action="store",
help="Set chunk-size for MD5 signature computation to N kilobytes.",
metavar="N")
op.add_option('-n', '--no-exec', '--just-print', '--dry-run', '--recon',
dest='no_exec', default=False,
action="store_true",
help="Don't build; just print commands.")
op.add_option('--no-site-dir',
dest='no_site_dir', default=False,
action="store_true",
help="Don't search or use the usual site_scons dir.")
op.add_option('--profile',
nargs=1,
dest="profile_file", default=None,
action="store",
help="Profile SCons and put results in FILE.",
metavar="FILE")
op.add_option('-q', '--question',
dest="question", default=False,
action="store_true",
help="Don't build; exit status says if up to date.")
op.add_option('-Q',
dest='no_progress', default=False,
action="store_true",
help="Suppress \"Reading/Building\" progress messages.")
op.add_option('--random',
dest="random", default=False,
action="store_true",
help="Build dependencies in random order.")
op.add_option('-s', '--silent', '--quiet',
dest="silent", default=False,
action="store_true",
help="Don't print commands.")
op.add_option('--site-dir',
nargs=1,
dest='site_dir', default=None,
action="store",
help="Use DIR instead of the usual site_scons dir.",
metavar="DIR")
op.add_option('--stack-size',
nargs=1, type="int",
dest='stack_size',
action="store",
help="Set the stack size of the threads used to run jobs to N kilobytes.",
metavar="N")
op.add_option('--taskmastertrace',
nargs=1,
dest="taskmastertrace_file", default=None,
action="store",
help="Trace Node evaluation to FILE.",
metavar="FILE")
tree_options = ["all", "derived", "prune", "status"]
def opt_tree(option, opt, value, parser, tree_options=tree_options):
from . import Main
tp = Main.TreePrinter()
for o in value.split(','):
if o == 'all':
tp.derived = False
elif o == 'derived':
tp.derived = True
elif o == 'prune':
tp.prune = True
elif o == 'status':
tp.status = True
else:
raise OptionValueError(opt_invalid('--tree', o, tree_options))
parser.values.tree_printers.append(tp)
opt_tree_help = "Print a dependency tree in various formats: %s." \
% ", ".join(tree_options)
op.add_option('--tree',
nargs=1, type="string",
dest="tree_printers", default=[],
action="callback", callback=opt_tree,
help=opt_tree_help,
metavar="OPTIONS")
op.add_option('-u', '--up', '--search-up',
dest="climb_up", default=0,
action="store_const", const=1,
help="Search up directory tree for SConstruct, "
"build targets at or below current directory.")
op.add_option('-U',
dest="climb_up", default=0,
action="store_const", const=3,
help="Search up directory tree for SConstruct, "
"build Default() targets from local SConscript.")
def opt_version(option, opt, value, parser):
sys.stdout.write(parser.version + '\n')
sys.exit(0)
op.add_option("-v", "--version",
action="callback", callback=opt_version,
help="Print the SCons version number and exit.")
def opt_warn(option, opt, value, parser, tree_options=tree_options):
if SCons.Util.is_String(value):
value = value.split(',')
parser.values.warn.extend(value)
op.add_option('--warn', '--warning',
nargs=1, type="string",
dest="warn", default=[],
action="callback", callback=opt_warn,
help="Enable or disable warnings.",
metavar="WARNING-SPEC")
op.add_option('-Y', '--repository', '--srcdir',
nargs=1,
dest="repository", default=[],
action="append",
help="Search REPOSITORY for source and target files.")
# Options from Make and Cons classic that we do not yet support,
# but which we may support someday and whose (potential) meanings
# we don't want to change. These all get a "the -X option is not
# yet implemented" message and don't show up in the help output.
def opt_not_yet(option, opt, value, parser):
msg = "Warning: the %s option is not yet implemented\n" % opt
sys.stderr.write(msg)
op.add_option('-l', '--load-average', '--max-load',
nargs=1, type="float",
dest="load_average", default=0,
action="callback", callback=opt_not_yet,
# action="store",
# help="Don't start multiple jobs unless load is below "
# "LOAD-AVERAGE."
help=SUPPRESS_HELP)
op.add_option('--list-actions',
dest="list_actions",
action="callback", callback=opt_not_yet,
# help="Don't build; list files and build actions."
help=SUPPRESS_HELP)
op.add_option('--list-derived',
dest="list_derived",
action="callback", callback=opt_not_yet,
# help="Don't build; list files that would be built."
help=SUPPRESS_HELP)
op.add_option('--list-where',
dest="list_where",
action="callback", callback=opt_not_yet,
# help="Don't build; list files and where defined."
help=SUPPRESS_HELP)
op.add_option('-o', '--old-file', '--assume-old',
nargs=1, type="string",
dest="old_file", default=[],
action="callback", callback=opt_not_yet,
# action="append",
# help = "Consider FILE to be old; don't rebuild it."
help=SUPPRESS_HELP)
op.add_option('--override',
nargs=1, type="string",
action="callback", callback=opt_not_yet,
dest="override",
# help="Override variables as specified in FILE."
help=SUPPRESS_HELP)
op.add_option('-p',
action="callback", callback=opt_not_yet,
dest="p",
# help="Print internal environments/objects."
help=SUPPRESS_HELP)
op.add_option('-r', '-R', '--no-builtin-rules', '--no-builtin-variables',
action="callback", callback=opt_not_yet,
dest="no_builtin_rules",
# help="Clear default environments and variables."
help=SUPPRESS_HELP)
op.add_option('--write-filenames',
nargs=1, type="string",
dest="write_filenames",
action="callback", callback=opt_not_yet,
# help="Write all filenames examined into FILE."
help=SUPPRESS_HELP)
op.add_option('-W', '--new-file', '--assume-new', '--what-if',
nargs=1, type="string",
dest="new_file",
action="callback", callback=opt_not_yet,
# help="Consider FILE to be changed."
help=SUPPRESS_HELP)
op.add_option('--warn-undefined-variables',
dest="warn_undefined_variables",
action="callback", callback=opt_not_yet,
# help="Warn when an undefined variable is referenced."
help=SUPPRESS_HELP)
return op
|
[
"def",
"Parser",
"(",
"version",
")",
":",
"formatter",
"=",
"SConsIndentedHelpFormatter",
"(",
"max_help_position",
"=",
"30",
")",
"op",
"=",
"SConsOptionParser",
"(",
"option_class",
"=",
"SConsOption",
",",
"add_help_option",
"=",
"False",
",",
"formatter",
"=",
"formatter",
",",
"usage",
"=",
"\"usage: scons [OPTION] [TARGET] ...\"",
",",
")",
"op",
".",
"preserve_unknown_options",
"=",
"True",
"op",
".",
"version",
"=",
"version",
"# Add the options to the parser we just created.",
"#",
"# These are in the order we want them to show up in the -H help",
"# text, basically alphabetical. Each op.add_option() call below",
"# should have a consistent format:",
"#",
"# op.add_option(\"-L\", \"--long-option-name\",",
"# nargs=1, type=\"string\",",
"# dest=\"long_option_name\", default='foo',",
"# action=\"callback\", callback=opt_long_option,",
"# help=\"help text goes here\",",
"# metavar=\"VAR\")",
"#",
"# Even though the optparse module constructs reasonable default",
"# destination names from the long option names, we're going to be",
"# explicit about each one for easier readability and so this code",
"# will at least show up when grepping the source for option attribute",
"# names, or otherwise browsing the source code.",
"# options ignored for compatibility",
"def",
"opt_ignore",
"(",
"option",
",",
"opt",
",",
"value",
",",
"parser",
")",
":",
"sys",
".",
"stderr",
".",
"write",
"(",
"\"Warning: ignoring %s option\\n\"",
"%",
"opt",
")",
"op",
".",
"add_option",
"(",
"\"-b\"",
",",
"\"-d\"",
",",
"\"-e\"",
",",
"\"-m\"",
",",
"\"-S\"",
",",
"\"-t\"",
",",
"\"-w\"",
",",
"\"--environment-overrides\"",
",",
"\"--no-keep-going\"",
",",
"\"--no-print-directory\"",
",",
"\"--print-directory\"",
",",
"\"--stop\"",
",",
"\"--touch\"",
",",
"action",
"=",
"\"callback\"",
",",
"callback",
"=",
"opt_ignore",
",",
"help",
"=",
"\"Ignored for compatibility.\"",
")",
"op",
".",
"add_option",
"(",
"'-c'",
",",
"'--clean'",
",",
"'--remove'",
",",
"dest",
"=",
"\"clean\"",
",",
"default",
"=",
"False",
",",
"action",
"=",
"\"store_true\"",
",",
"help",
"=",
"\"Remove specified targets and dependencies.\"",
")",
"op",
".",
"add_option",
"(",
"'-C'",
",",
"'--directory'",
",",
"nargs",
"=",
"1",
",",
"type",
"=",
"\"string\"",
",",
"dest",
"=",
"\"directory\"",
",",
"default",
"=",
"[",
"]",
",",
"action",
"=",
"\"append\"",
",",
"help",
"=",
"\"Change to DIR before doing anything.\"",
",",
"metavar",
"=",
"\"DIR\"",
")",
"op",
".",
"add_option",
"(",
"'--cache-debug'",
",",
"nargs",
"=",
"1",
",",
"dest",
"=",
"\"cache_debug\"",
",",
"default",
"=",
"None",
",",
"action",
"=",
"\"store\"",
",",
"help",
"=",
"\"Print CacheDir debug info to FILE.\"",
",",
"metavar",
"=",
"\"FILE\"",
")",
"op",
".",
"add_option",
"(",
"'--cache-disable'",
",",
"'--no-cache'",
",",
"dest",
"=",
"'cache_disable'",
",",
"default",
"=",
"False",
",",
"action",
"=",
"\"store_true\"",
",",
"help",
"=",
"\"Do not retrieve built targets from CacheDir.\"",
")",
"op",
".",
"add_option",
"(",
"'--cache-force'",
",",
"'--cache-populate'",
",",
"dest",
"=",
"'cache_force'",
",",
"default",
"=",
"False",
",",
"action",
"=",
"\"store_true\"",
",",
"help",
"=",
"\"Copy already-built targets into the CacheDir.\"",
")",
"op",
".",
"add_option",
"(",
"'--cache-readonly'",
",",
"dest",
"=",
"'cache_readonly'",
",",
"default",
"=",
"False",
",",
"action",
"=",
"\"store_true\"",
",",
"help",
"=",
"\"Do not update CacheDir with built targets.\"",
")",
"op",
".",
"add_option",
"(",
"'--cache-show'",
",",
"dest",
"=",
"'cache_show'",
",",
"default",
"=",
"False",
",",
"action",
"=",
"\"store_true\"",
",",
"help",
"=",
"\"Print build actions for files from CacheDir.\"",
")",
"def",
"opt_invalid",
"(",
"group",
",",
"value",
",",
"options",
")",
":",
"errmsg",
"=",
"\"`%s' is not a valid %s option type, try:\\n\"",
"%",
"(",
"value",
",",
"group",
")",
"return",
"errmsg",
"+",
"\" %s\"",
"%",
"\", \"",
".",
"join",
"(",
"options",
")",
"config_options",
"=",
"[",
"\"auto\"",
",",
"\"force\"",
",",
"\"cache\"",
"]",
"opt_config_help",
"=",
"\"Controls Configure subsystem: %s.\"",
"%",
"\", \"",
".",
"join",
"(",
"config_options",
")",
"op",
".",
"add_option",
"(",
"'--config'",
",",
"nargs",
"=",
"1",
",",
"choices",
"=",
"config_options",
",",
"dest",
"=",
"\"config\"",
",",
"default",
"=",
"\"auto\"",
",",
"help",
"=",
"opt_config_help",
",",
"metavar",
"=",
"\"MODE\"",
")",
"op",
".",
"add_option",
"(",
"'-D'",
",",
"dest",
"=",
"\"climb_up\"",
",",
"default",
"=",
"None",
",",
"action",
"=",
"\"store_const\"",
",",
"const",
"=",
"2",
",",
"help",
"=",
"\"Search up directory tree for SConstruct, \"",
"\"build all Default() targets.\"",
")",
"deprecated_debug_options",
"=",
"{",
"\"dtree\"",
":",
"'; please use --tree=derived instead'",
",",
"\"nomemoizer\"",
":",
"' and has no effect'",
",",
"\"stree\"",
":",
"'; please use --tree=all,status instead'",
",",
"\"tree\"",
":",
"'; please use --tree=all instead'",
",",
"}",
"debug_options",
"=",
"[",
"\"count\"",
",",
"\"duplicate\"",
",",
"\"explain\"",
",",
"\"findlibs\"",
",",
"\"includes\"",
",",
"\"memoizer\"",
",",
"\"memory\"",
",",
"\"objects\"",
",",
"\"pdb\"",
",",
"\"prepare\"",
",",
"\"presub\"",
",",
"\"stacktrace\"",
",",
"\"time\"",
"]",
"def",
"opt_debug",
"(",
"option",
",",
"opt",
",",
"value__",
",",
"parser",
",",
"debug_options",
"=",
"debug_options",
",",
"deprecated_debug_options",
"=",
"deprecated_debug_options",
")",
":",
"for",
"value",
"in",
"value__",
".",
"split",
"(",
"','",
")",
":",
"if",
"value",
"in",
"debug_options",
":",
"parser",
".",
"values",
".",
"debug",
".",
"append",
"(",
"value",
")",
"elif",
"value",
"in",
"list",
"(",
"deprecated_debug_options",
".",
"keys",
"(",
")",
")",
":",
"parser",
".",
"values",
".",
"debug",
".",
"append",
"(",
"value",
")",
"try",
":",
"parser",
".",
"values",
".",
"delayed_warnings",
"except",
"AttributeError",
":",
"parser",
".",
"values",
".",
"delayed_warnings",
"=",
"[",
"]",
"msg",
"=",
"deprecated_debug_options",
"[",
"value",
"]",
"w",
"=",
"\"The --debug=%s option is deprecated%s.\"",
"%",
"(",
"value",
",",
"msg",
")",
"t",
"=",
"(",
"SCons",
".",
"Warnings",
".",
"DeprecatedDebugOptionsWarning",
",",
"w",
")",
"parser",
".",
"values",
".",
"delayed_warnings",
".",
"append",
"(",
"t",
")",
"else",
":",
"raise",
"OptionValueError",
"(",
"opt_invalid",
"(",
"'debug'",
",",
"value",
",",
"debug_options",
")",
")",
"opt_debug_help",
"=",
"\"Print various types of debugging information: %s.\"",
"%",
"\", \"",
".",
"join",
"(",
"debug_options",
")",
"op",
".",
"add_option",
"(",
"'--debug'",
",",
"nargs",
"=",
"1",
",",
"type",
"=",
"\"string\"",
",",
"dest",
"=",
"\"debug\"",
",",
"default",
"=",
"[",
"]",
",",
"action",
"=",
"\"callback\"",
",",
"callback",
"=",
"opt_debug",
",",
"help",
"=",
"opt_debug_help",
",",
"metavar",
"=",
"\"TYPE\"",
")",
"def",
"opt_diskcheck",
"(",
"option",
",",
"opt",
",",
"value",
",",
"parser",
")",
":",
"try",
":",
"diskcheck_value",
"=",
"diskcheck_convert",
"(",
"value",
")",
"except",
"ValueError",
"as",
"e",
":",
"raise",
"OptionValueError",
"(",
"\"`%s' is not a valid diskcheck type\"",
"%",
"e",
")",
"setattr",
"(",
"parser",
".",
"values",
",",
"option",
".",
"dest",
",",
"diskcheck_value",
")",
"op",
".",
"add_option",
"(",
"'--diskcheck'",
",",
"nargs",
"=",
"1",
",",
"type",
"=",
"\"string\"",
",",
"dest",
"=",
"'diskcheck'",
",",
"default",
"=",
"None",
",",
"action",
"=",
"\"callback\"",
",",
"callback",
"=",
"opt_diskcheck",
",",
"help",
"=",
"\"Enable specific on-disk checks.\"",
",",
"metavar",
"=",
"\"TYPE\"",
")",
"def",
"opt_duplicate",
"(",
"option",
",",
"opt",
",",
"value",
",",
"parser",
")",
":",
"if",
"not",
"value",
"in",
"SCons",
".",
"Node",
".",
"FS",
".",
"Valid_Duplicates",
":",
"raise",
"OptionValueError",
"(",
"opt_invalid",
"(",
"'duplication'",
",",
"value",
",",
"SCons",
".",
"Node",
".",
"FS",
".",
"Valid_Duplicates",
")",
")",
"setattr",
"(",
"parser",
".",
"values",
",",
"option",
".",
"dest",
",",
"value",
")",
"# Set the duplicate style right away so it can affect linking",
"# of SConscript files.",
"SCons",
".",
"Node",
".",
"FS",
".",
"set_duplicate",
"(",
"value",
")",
"opt_duplicate_help",
"=",
"\"Set the preferred duplication methods. Must be one of \"",
"+",
"\", \"",
".",
"join",
"(",
"SCons",
".",
"Node",
".",
"FS",
".",
"Valid_Duplicates",
")",
"op",
".",
"add_option",
"(",
"'--duplicate'",
",",
"nargs",
"=",
"1",
",",
"type",
"=",
"\"string\"",
",",
"dest",
"=",
"\"duplicate\"",
",",
"default",
"=",
"'hard-soft-copy'",
",",
"action",
"=",
"\"callback\"",
",",
"callback",
"=",
"opt_duplicate",
",",
"help",
"=",
"opt_duplicate_help",
")",
"op",
".",
"add_option",
"(",
"'-f'",
",",
"'--file'",
",",
"'--makefile'",
",",
"'--sconstruct'",
",",
"nargs",
"=",
"1",
",",
"type",
"=",
"\"string\"",
",",
"dest",
"=",
"\"file\"",
",",
"default",
"=",
"[",
"]",
",",
"action",
"=",
"\"append\"",
",",
"help",
"=",
"\"Read FILE as the top-level SConstruct file.\"",
")",
"op",
".",
"add_option",
"(",
"'-h'",
",",
"'--help'",
",",
"dest",
"=",
"\"help\"",
",",
"default",
"=",
"False",
",",
"action",
"=",
"\"store_true\"",
",",
"help",
"=",
"\"Print defined help message, or this one.\"",
")",
"op",
".",
"add_option",
"(",
"\"-H\"",
",",
"\"--help-options\"",
",",
"action",
"=",
"\"help\"",
",",
"help",
"=",
"\"Print this message and exit.\"",
")",
"op",
".",
"add_option",
"(",
"'-i'",
",",
"'--ignore-errors'",
",",
"dest",
"=",
"'ignore_errors'",
",",
"default",
"=",
"False",
",",
"action",
"=",
"\"store_true\"",
",",
"help",
"=",
"\"Ignore errors from build actions.\"",
")",
"op",
".",
"add_option",
"(",
"'-I'",
",",
"'--include-dir'",
",",
"nargs",
"=",
"1",
",",
"dest",
"=",
"'include_dir'",
",",
"default",
"=",
"[",
"]",
",",
"action",
"=",
"\"append\"",
",",
"help",
"=",
"\"Search DIR for imported Python modules.\"",
",",
"metavar",
"=",
"\"DIR\"",
")",
"op",
".",
"add_option",
"(",
"'--implicit-cache'",
",",
"dest",
"=",
"'implicit_cache'",
",",
"default",
"=",
"False",
",",
"action",
"=",
"\"store_true\"",
",",
"help",
"=",
"\"Cache implicit dependencies\"",
")",
"def",
"opt_implicit_deps",
"(",
"option",
",",
"opt",
",",
"value",
",",
"parser",
")",
":",
"setattr",
"(",
"parser",
".",
"values",
",",
"'implicit_cache'",
",",
"True",
")",
"setattr",
"(",
"parser",
".",
"values",
",",
"option",
".",
"dest",
",",
"True",
")",
"op",
".",
"add_option",
"(",
"'--implicit-deps-changed'",
",",
"dest",
"=",
"\"implicit_deps_changed\"",
",",
"default",
"=",
"False",
",",
"action",
"=",
"\"callback\"",
",",
"callback",
"=",
"opt_implicit_deps",
",",
"help",
"=",
"\"Ignore cached implicit dependencies.\"",
")",
"op",
".",
"add_option",
"(",
"'--implicit-deps-unchanged'",
",",
"dest",
"=",
"\"implicit_deps_unchanged\"",
",",
"default",
"=",
"False",
",",
"action",
"=",
"\"callback\"",
",",
"callback",
"=",
"opt_implicit_deps",
",",
"help",
"=",
"\"Ignore changes in implicit dependencies.\"",
")",
"op",
".",
"add_option",
"(",
"'--interact'",
",",
"'--interactive'",
",",
"dest",
"=",
"'interactive'",
",",
"default",
"=",
"False",
",",
"action",
"=",
"\"store_true\"",
",",
"help",
"=",
"\"Run in interactive mode.\"",
")",
"op",
".",
"add_option",
"(",
"'-j'",
",",
"'--jobs'",
",",
"nargs",
"=",
"1",
",",
"type",
"=",
"\"int\"",
",",
"dest",
"=",
"\"num_jobs\"",
",",
"default",
"=",
"1",
",",
"action",
"=",
"\"store\"",
",",
"help",
"=",
"\"Allow N jobs at once.\"",
",",
"metavar",
"=",
"\"N\"",
")",
"op",
".",
"add_option",
"(",
"'-k'",
",",
"'--keep-going'",
",",
"dest",
"=",
"'keep_going'",
",",
"default",
"=",
"False",
",",
"action",
"=",
"\"store_true\"",
",",
"help",
"=",
"\"Keep going when a target can't be made.\"",
")",
"op",
".",
"add_option",
"(",
"'--max-drift'",
",",
"nargs",
"=",
"1",
",",
"type",
"=",
"\"int\"",
",",
"dest",
"=",
"'max_drift'",
",",
"default",
"=",
"SCons",
".",
"Node",
".",
"FS",
".",
"default_max_drift",
",",
"action",
"=",
"\"store\"",
",",
"help",
"=",
"\"Set maximum system clock drift to N seconds.\"",
",",
"metavar",
"=",
"\"N\"",
")",
"op",
".",
"add_option",
"(",
"'--md5-chunksize'",
",",
"nargs",
"=",
"1",
",",
"type",
"=",
"\"int\"",
",",
"dest",
"=",
"'md5_chunksize'",
",",
"default",
"=",
"SCons",
".",
"Node",
".",
"FS",
".",
"File",
".",
"md5_chunksize",
",",
"action",
"=",
"\"store\"",
",",
"help",
"=",
"\"Set chunk-size for MD5 signature computation to N kilobytes.\"",
",",
"metavar",
"=",
"\"N\"",
")",
"op",
".",
"add_option",
"(",
"'-n'",
",",
"'--no-exec'",
",",
"'--just-print'",
",",
"'--dry-run'",
",",
"'--recon'",
",",
"dest",
"=",
"'no_exec'",
",",
"default",
"=",
"False",
",",
"action",
"=",
"\"store_true\"",
",",
"help",
"=",
"\"Don't build; just print commands.\"",
")",
"op",
".",
"add_option",
"(",
"'--no-site-dir'",
",",
"dest",
"=",
"'no_site_dir'",
",",
"default",
"=",
"False",
",",
"action",
"=",
"\"store_true\"",
",",
"help",
"=",
"\"Don't search or use the usual site_scons dir.\"",
")",
"op",
".",
"add_option",
"(",
"'--profile'",
",",
"nargs",
"=",
"1",
",",
"dest",
"=",
"\"profile_file\"",
",",
"default",
"=",
"None",
",",
"action",
"=",
"\"store\"",
",",
"help",
"=",
"\"Profile SCons and put results in FILE.\"",
",",
"metavar",
"=",
"\"FILE\"",
")",
"op",
".",
"add_option",
"(",
"'-q'",
",",
"'--question'",
",",
"dest",
"=",
"\"question\"",
",",
"default",
"=",
"False",
",",
"action",
"=",
"\"store_true\"",
",",
"help",
"=",
"\"Don't build; exit status says if up to date.\"",
")",
"op",
".",
"add_option",
"(",
"'-Q'",
",",
"dest",
"=",
"'no_progress'",
",",
"default",
"=",
"False",
",",
"action",
"=",
"\"store_true\"",
",",
"help",
"=",
"\"Suppress \\\"Reading/Building\\\" progress messages.\"",
")",
"op",
".",
"add_option",
"(",
"'--random'",
",",
"dest",
"=",
"\"random\"",
",",
"default",
"=",
"False",
",",
"action",
"=",
"\"store_true\"",
",",
"help",
"=",
"\"Build dependencies in random order.\"",
")",
"op",
".",
"add_option",
"(",
"'-s'",
",",
"'--silent'",
",",
"'--quiet'",
",",
"dest",
"=",
"\"silent\"",
",",
"default",
"=",
"False",
",",
"action",
"=",
"\"store_true\"",
",",
"help",
"=",
"\"Don't print commands.\"",
")",
"op",
".",
"add_option",
"(",
"'--site-dir'",
",",
"nargs",
"=",
"1",
",",
"dest",
"=",
"'site_dir'",
",",
"default",
"=",
"None",
",",
"action",
"=",
"\"store\"",
",",
"help",
"=",
"\"Use DIR instead of the usual site_scons dir.\"",
",",
"metavar",
"=",
"\"DIR\"",
")",
"op",
".",
"add_option",
"(",
"'--stack-size'",
",",
"nargs",
"=",
"1",
",",
"type",
"=",
"\"int\"",
",",
"dest",
"=",
"'stack_size'",
",",
"action",
"=",
"\"store\"",
",",
"help",
"=",
"\"Set the stack size of the threads used to run jobs to N kilobytes.\"",
",",
"metavar",
"=",
"\"N\"",
")",
"op",
".",
"add_option",
"(",
"'--taskmastertrace'",
",",
"nargs",
"=",
"1",
",",
"dest",
"=",
"\"taskmastertrace_file\"",
",",
"default",
"=",
"None",
",",
"action",
"=",
"\"store\"",
",",
"help",
"=",
"\"Trace Node evaluation to FILE.\"",
",",
"metavar",
"=",
"\"FILE\"",
")",
"tree_options",
"=",
"[",
"\"all\"",
",",
"\"derived\"",
",",
"\"prune\"",
",",
"\"status\"",
"]",
"def",
"opt_tree",
"(",
"option",
",",
"opt",
",",
"value",
",",
"parser",
",",
"tree_options",
"=",
"tree_options",
")",
":",
"from",
".",
"import",
"Main",
"tp",
"=",
"Main",
".",
"TreePrinter",
"(",
")",
"for",
"o",
"in",
"value",
".",
"split",
"(",
"','",
")",
":",
"if",
"o",
"==",
"'all'",
":",
"tp",
".",
"derived",
"=",
"False",
"elif",
"o",
"==",
"'derived'",
":",
"tp",
".",
"derived",
"=",
"True",
"elif",
"o",
"==",
"'prune'",
":",
"tp",
".",
"prune",
"=",
"True",
"elif",
"o",
"==",
"'status'",
":",
"tp",
".",
"status",
"=",
"True",
"else",
":",
"raise",
"OptionValueError",
"(",
"opt_invalid",
"(",
"'--tree'",
",",
"o",
",",
"tree_options",
")",
")",
"parser",
".",
"values",
".",
"tree_printers",
".",
"append",
"(",
"tp",
")",
"opt_tree_help",
"=",
"\"Print a dependency tree in various formats: %s.\"",
"%",
"\", \"",
".",
"join",
"(",
"tree_options",
")",
"op",
".",
"add_option",
"(",
"'--tree'",
",",
"nargs",
"=",
"1",
",",
"type",
"=",
"\"string\"",
",",
"dest",
"=",
"\"tree_printers\"",
",",
"default",
"=",
"[",
"]",
",",
"action",
"=",
"\"callback\"",
",",
"callback",
"=",
"opt_tree",
",",
"help",
"=",
"opt_tree_help",
",",
"metavar",
"=",
"\"OPTIONS\"",
")",
"op",
".",
"add_option",
"(",
"'-u'",
",",
"'--up'",
",",
"'--search-up'",
",",
"dest",
"=",
"\"climb_up\"",
",",
"default",
"=",
"0",
",",
"action",
"=",
"\"store_const\"",
",",
"const",
"=",
"1",
",",
"help",
"=",
"\"Search up directory tree for SConstruct, \"",
"\"build targets at or below current directory.\"",
")",
"op",
".",
"add_option",
"(",
"'-U'",
",",
"dest",
"=",
"\"climb_up\"",
",",
"default",
"=",
"0",
",",
"action",
"=",
"\"store_const\"",
",",
"const",
"=",
"3",
",",
"help",
"=",
"\"Search up directory tree for SConstruct, \"",
"\"build Default() targets from local SConscript.\"",
")",
"def",
"opt_version",
"(",
"option",
",",
"opt",
",",
"value",
",",
"parser",
")",
":",
"sys",
".",
"stdout",
".",
"write",
"(",
"parser",
".",
"version",
"+",
"'\\n'",
")",
"sys",
".",
"exit",
"(",
"0",
")",
"op",
".",
"add_option",
"(",
"\"-v\"",
",",
"\"--version\"",
",",
"action",
"=",
"\"callback\"",
",",
"callback",
"=",
"opt_version",
",",
"help",
"=",
"\"Print the SCons version number and exit.\"",
")",
"def",
"opt_warn",
"(",
"option",
",",
"opt",
",",
"value",
",",
"parser",
",",
"tree_options",
"=",
"tree_options",
")",
":",
"if",
"SCons",
".",
"Util",
".",
"is_String",
"(",
"value",
")",
":",
"value",
"=",
"value",
".",
"split",
"(",
"','",
")",
"parser",
".",
"values",
".",
"warn",
".",
"extend",
"(",
"value",
")",
"op",
".",
"add_option",
"(",
"'--warn'",
",",
"'--warning'",
",",
"nargs",
"=",
"1",
",",
"type",
"=",
"\"string\"",
",",
"dest",
"=",
"\"warn\"",
",",
"default",
"=",
"[",
"]",
",",
"action",
"=",
"\"callback\"",
",",
"callback",
"=",
"opt_warn",
",",
"help",
"=",
"\"Enable or disable warnings.\"",
",",
"metavar",
"=",
"\"WARNING-SPEC\"",
")",
"op",
".",
"add_option",
"(",
"'-Y'",
",",
"'--repository'",
",",
"'--srcdir'",
",",
"nargs",
"=",
"1",
",",
"dest",
"=",
"\"repository\"",
",",
"default",
"=",
"[",
"]",
",",
"action",
"=",
"\"append\"",
",",
"help",
"=",
"\"Search REPOSITORY for source and target files.\"",
")",
"# Options from Make and Cons classic that we do not yet support,",
"# but which we may support someday and whose (potential) meanings",
"# we don't want to change. These all get a \"the -X option is not",
"# yet implemented\" message and don't show up in the help output.",
"def",
"opt_not_yet",
"(",
"option",
",",
"opt",
",",
"value",
",",
"parser",
")",
":",
"msg",
"=",
"\"Warning: the %s option is not yet implemented\\n\"",
"%",
"opt",
"sys",
".",
"stderr",
".",
"write",
"(",
"msg",
")",
"op",
".",
"add_option",
"(",
"'-l'",
",",
"'--load-average'",
",",
"'--max-load'",
",",
"nargs",
"=",
"1",
",",
"type",
"=",
"\"float\"",
",",
"dest",
"=",
"\"load_average\"",
",",
"default",
"=",
"0",
",",
"action",
"=",
"\"callback\"",
",",
"callback",
"=",
"opt_not_yet",
",",
"# action=\"store\",",
"# help=\"Don't start multiple jobs unless load is below \"",
"# \"LOAD-AVERAGE.\"",
"help",
"=",
"SUPPRESS_HELP",
")",
"op",
".",
"add_option",
"(",
"'--list-actions'",
",",
"dest",
"=",
"\"list_actions\"",
",",
"action",
"=",
"\"callback\"",
",",
"callback",
"=",
"opt_not_yet",
",",
"# help=\"Don't build; list files and build actions.\"",
"help",
"=",
"SUPPRESS_HELP",
")",
"op",
".",
"add_option",
"(",
"'--list-derived'",
",",
"dest",
"=",
"\"list_derived\"",
",",
"action",
"=",
"\"callback\"",
",",
"callback",
"=",
"opt_not_yet",
",",
"# help=\"Don't build; list files that would be built.\"",
"help",
"=",
"SUPPRESS_HELP",
")",
"op",
".",
"add_option",
"(",
"'--list-where'",
",",
"dest",
"=",
"\"list_where\"",
",",
"action",
"=",
"\"callback\"",
",",
"callback",
"=",
"opt_not_yet",
",",
"# help=\"Don't build; list files and where defined.\"",
"help",
"=",
"SUPPRESS_HELP",
")",
"op",
".",
"add_option",
"(",
"'-o'",
",",
"'--old-file'",
",",
"'--assume-old'",
",",
"nargs",
"=",
"1",
",",
"type",
"=",
"\"string\"",
",",
"dest",
"=",
"\"old_file\"",
",",
"default",
"=",
"[",
"]",
",",
"action",
"=",
"\"callback\"",
",",
"callback",
"=",
"opt_not_yet",
",",
"# action=\"append\",",
"# help = \"Consider FILE to be old; don't rebuild it.\"",
"help",
"=",
"SUPPRESS_HELP",
")",
"op",
".",
"add_option",
"(",
"'--override'",
",",
"nargs",
"=",
"1",
",",
"type",
"=",
"\"string\"",
",",
"action",
"=",
"\"callback\"",
",",
"callback",
"=",
"opt_not_yet",
",",
"dest",
"=",
"\"override\"",
",",
"# help=\"Override variables as specified in FILE.\"",
"help",
"=",
"SUPPRESS_HELP",
")",
"op",
".",
"add_option",
"(",
"'-p'",
",",
"action",
"=",
"\"callback\"",
",",
"callback",
"=",
"opt_not_yet",
",",
"dest",
"=",
"\"p\"",
",",
"# help=\"Print internal environments/objects.\"",
"help",
"=",
"SUPPRESS_HELP",
")",
"op",
".",
"add_option",
"(",
"'-r'",
",",
"'-R'",
",",
"'--no-builtin-rules'",
",",
"'--no-builtin-variables'",
",",
"action",
"=",
"\"callback\"",
",",
"callback",
"=",
"opt_not_yet",
",",
"dest",
"=",
"\"no_builtin_rules\"",
",",
"# help=\"Clear default environments and variables.\"",
"help",
"=",
"SUPPRESS_HELP",
")",
"op",
".",
"add_option",
"(",
"'--write-filenames'",
",",
"nargs",
"=",
"1",
",",
"type",
"=",
"\"string\"",
",",
"dest",
"=",
"\"write_filenames\"",
",",
"action",
"=",
"\"callback\"",
",",
"callback",
"=",
"opt_not_yet",
",",
"# help=\"Write all filenames examined into FILE.\"",
"help",
"=",
"SUPPRESS_HELP",
")",
"op",
".",
"add_option",
"(",
"'-W'",
",",
"'--new-file'",
",",
"'--assume-new'",
",",
"'--what-if'",
",",
"nargs",
"=",
"1",
",",
"type",
"=",
"\"string\"",
",",
"dest",
"=",
"\"new_file\"",
",",
"action",
"=",
"\"callback\"",
",",
"callback",
"=",
"opt_not_yet",
",",
"# help=\"Consider FILE to be changed.\"",
"help",
"=",
"SUPPRESS_HELP",
")",
"op",
".",
"add_option",
"(",
"'--warn-undefined-variables'",
",",
"dest",
"=",
"\"warn_undefined_variables\"",
",",
"action",
"=",
"\"callback\"",
",",
"callback",
"=",
"opt_not_yet",
",",
"# help=\"Warn when an undefined variable is referenced.\"",
"help",
"=",
"SUPPRESS_HELP",
")",
"return",
"op"
] |
Returns an options parser object initialized with the standard
SCons options.
|
[
"Returns",
"an",
"options",
"parser",
"object",
"initialized",
"with",
"the",
"standard",
"SCons",
"options",
"."
] |
python
|
train
|
JukeboxPipeline/jukebox-core
|
src/jukeboxcore/gui/widgetdelegate.py
|
https://github.com/JukeboxPipeline/jukebox-core/blob/bac2280ca49940355270e4b69400ce9976ab2e6f/src/jukeboxcore/gui/widgetdelegate.py#L87-L108
|
def paint(self, painter, option, index):
"""Use the painter and style option to render the item specified by the item index.
:param painter: the painter to paint
:type painter: :class:`QtGui.QPainter`
:param option: the options for painting
:type option: :class:`QtGui.QStyleOptionViewItem`
:param index: the index to paint
:type index: :class:`QtCore.QModelIndex`
:returns: None
:rtype: None
:raises: None
"""
if self._widget is None:
return super(WidgetDelegate, self).paint(painter, option, index)
self.set_widget_index(index)
painter.save()
painter.translate(option.rect.topLeft())
self._widget.resize(option.rect.size())
self._widget.render(painter, QtCore.QPoint())
painter.restore()
|
[
"def",
"paint",
"(",
"self",
",",
"painter",
",",
"option",
",",
"index",
")",
":",
"if",
"self",
".",
"_widget",
"is",
"None",
":",
"return",
"super",
"(",
"WidgetDelegate",
",",
"self",
")",
".",
"paint",
"(",
"painter",
",",
"option",
",",
"index",
")",
"self",
".",
"set_widget_index",
"(",
"index",
")",
"painter",
".",
"save",
"(",
")",
"painter",
".",
"translate",
"(",
"option",
".",
"rect",
".",
"topLeft",
"(",
")",
")",
"self",
".",
"_widget",
".",
"resize",
"(",
"option",
".",
"rect",
".",
"size",
"(",
")",
")",
"self",
".",
"_widget",
".",
"render",
"(",
"painter",
",",
"QtCore",
".",
"QPoint",
"(",
")",
")",
"painter",
".",
"restore",
"(",
")"
] |
Use the painter and style option to render the item specified by the item index.
:param painter: the painter to paint
:type painter: :class:`QtGui.QPainter`
:param option: the options for painting
:type option: :class:`QtGui.QStyleOptionViewItem`
:param index: the index to paint
:type index: :class:`QtCore.QModelIndex`
:returns: None
:rtype: None
:raises: None
|
[
"Use",
"the",
"painter",
"and",
"style",
"option",
"to",
"render",
"the",
"item",
"specified",
"by",
"the",
"item",
"index",
"."
] |
python
|
train
|
onicagroup/runway
|
runway/commands/runway/gen_sample.py
|
https://github.com/onicagroup/runway/blob/3f3549ec3bf6e39b9f27d9738a1847f3a4369e7f/runway/commands/runway/gen_sample.py#L18-L25
|
def generate_sample_module(module_dir):
"""Generate skeleton sample module."""
if os.path.isdir(module_dir):
LOGGER.error("Error generating sample module -- directory %s "
"already exists!",
module_dir)
sys.exit(1)
os.mkdir(module_dir)
|
[
"def",
"generate_sample_module",
"(",
"module_dir",
")",
":",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"module_dir",
")",
":",
"LOGGER",
".",
"error",
"(",
"\"Error generating sample module -- directory %s \"",
"\"already exists!\"",
",",
"module_dir",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"os",
".",
"mkdir",
"(",
"module_dir",
")"
] |
Generate skeleton sample module.
|
[
"Generate",
"skeleton",
"sample",
"module",
"."
] |
python
|
train
|
theolind/pymysensors
|
mysensors/handler.py
|
https://github.com/theolind/pymysensors/blob/a139ab6e2f6b71ebaf37282f69bfd0f7fe6193b6/mysensors/handler.py#L218-L225
|
def handle_heartbeat_response(msg):
"""Process an internal heartbeat response message."""
if not msg.gateway.is_sensor(msg.node_id):
return None
handle_smartsleep(msg)
msg.gateway.sensors[msg.node_id].heartbeat = msg.payload
msg.gateway.alert(msg)
return None
|
[
"def",
"handle_heartbeat_response",
"(",
"msg",
")",
":",
"if",
"not",
"msg",
".",
"gateway",
".",
"is_sensor",
"(",
"msg",
".",
"node_id",
")",
":",
"return",
"None",
"handle_smartsleep",
"(",
"msg",
")",
"msg",
".",
"gateway",
".",
"sensors",
"[",
"msg",
".",
"node_id",
"]",
".",
"heartbeat",
"=",
"msg",
".",
"payload",
"msg",
".",
"gateway",
".",
"alert",
"(",
"msg",
")",
"return",
"None"
] |
Process an internal heartbeat response message.
|
[
"Process",
"an",
"internal",
"heartbeat",
"response",
"message",
"."
] |
python
|
train
|
billy-yoyo/RainbowSixSiege-Python-API
|
r6sapi/r6sapi.py
|
https://github.com/billy-yoyo/RainbowSixSiege-Python-API/blob/9860fdfd9a78aabd977eaa71b0a4ab4ed69e94d0/r6sapi/r6sapi.py#L1093-L1103
|
def load_level(self):
"""|coro|
Load the players XP and level"""
data = yield from self.auth.get("https://public-ubiservices.ubi.com/v1/spaces/%s/sandboxes/%s/r6playerprofile/playerprofile/progressions?profile_ids=%s" % (self.spaceid, self.platform_url, self.id))
if "player_profiles" in data and len(data["player_profiles"]) > 0:
self.xp = data["player_profiles"][0].get("xp", 0)
self.level = data["player_profiles"][0].get("level", 0)
else:
raise InvalidRequest("Missing key player_profiles in returned JSON object %s" % str(data))
|
[
"def",
"load_level",
"(",
"self",
")",
":",
"data",
"=",
"yield",
"from",
"self",
".",
"auth",
".",
"get",
"(",
"\"https://public-ubiservices.ubi.com/v1/spaces/%s/sandboxes/%s/r6playerprofile/playerprofile/progressions?profile_ids=%s\"",
"%",
"(",
"self",
".",
"spaceid",
",",
"self",
".",
"platform_url",
",",
"self",
".",
"id",
")",
")",
"if",
"\"player_profiles\"",
"in",
"data",
"and",
"len",
"(",
"data",
"[",
"\"player_profiles\"",
"]",
")",
">",
"0",
":",
"self",
".",
"xp",
"=",
"data",
"[",
"\"player_profiles\"",
"]",
"[",
"0",
"]",
".",
"get",
"(",
"\"xp\"",
",",
"0",
")",
"self",
".",
"level",
"=",
"data",
"[",
"\"player_profiles\"",
"]",
"[",
"0",
"]",
".",
"get",
"(",
"\"level\"",
",",
"0",
")",
"else",
":",
"raise",
"InvalidRequest",
"(",
"\"Missing key player_profiles in returned JSON object %s\"",
"%",
"str",
"(",
"data",
")",
")"
] |
|coro|
Load the players XP and level
|
[
"|coro|"
] |
python
|
train
|
davidblaisonneau-orange/foreman
|
foreman/api.py
|
https://github.com/davidblaisonneau-orange/foreman/blob/acb8fd8d74657cfac3b25c82e9c6028b93eb6c92/foreman/api.py#L208-L221
|
def delete(self, obj, id):
""" Function delete
Delete an object by id
@param obj: object name ('hosts', 'puppetclasses'...)
@param id: the id of the object (name or id)
@return RETURN: the server response
"""
self.url = '{}{}/{}'.format(self.base_url, obj, id)
self.method = 'DELETE'
self.resp = requests.delete(url=self.url,
auth=self.auth,
headers=self.headers, cert=self.ca_cert)
return self.__process_resp__(obj)
|
[
"def",
"delete",
"(",
"self",
",",
"obj",
",",
"id",
")",
":",
"self",
".",
"url",
"=",
"'{}{}/{}'",
".",
"format",
"(",
"self",
".",
"base_url",
",",
"obj",
",",
"id",
")",
"self",
".",
"method",
"=",
"'DELETE'",
"self",
".",
"resp",
"=",
"requests",
".",
"delete",
"(",
"url",
"=",
"self",
".",
"url",
",",
"auth",
"=",
"self",
".",
"auth",
",",
"headers",
"=",
"self",
".",
"headers",
",",
"cert",
"=",
"self",
".",
"ca_cert",
")",
"return",
"self",
".",
"__process_resp__",
"(",
"obj",
")"
] |
Function delete
Delete an object by id
@param obj: object name ('hosts', 'puppetclasses'...)
@param id: the id of the object (name or id)
@return RETURN: the server response
|
[
"Function",
"delete",
"Delete",
"an",
"object",
"by",
"id"
] |
python
|
train
|
curious-containers/cc-core
|
cc_core/commons/input_references.py
|
https://github.com/curious-containers/cc-core/blob/eaeb03a4366016aff54fcc6953d052ae12ed599b/cc_core/commons/input_references.py#L242-L267
|
def resolve_input_references(to_resolve, inputs_to_reference):
"""
Resolves input references given in the string to_resolve by using the inputs_to_reference.
See http://www.commonwl.org/user_guide/06-params/index.html for more information.
Example:
"$(inputs.my_file.nameroot).md" -> "filename.md"
:param to_resolve: The path to match
:param inputs_to_reference: Inputs which are used to resolve input references like $(inputs.my_input_file.basename).
:return: A string in which the input references are replaced with actual values.
"""
splitted = split_input_references(to_resolve)
result = []
for part in splitted:
if is_input_reference(part):
result.append(str(resolve_input_reference(part, inputs_to_reference)))
else:
result.append(part)
return ''.join(result)
|
[
"def",
"resolve_input_references",
"(",
"to_resolve",
",",
"inputs_to_reference",
")",
":",
"splitted",
"=",
"split_input_references",
"(",
"to_resolve",
")",
"result",
"=",
"[",
"]",
"for",
"part",
"in",
"splitted",
":",
"if",
"is_input_reference",
"(",
"part",
")",
":",
"result",
".",
"append",
"(",
"str",
"(",
"resolve_input_reference",
"(",
"part",
",",
"inputs_to_reference",
")",
")",
")",
"else",
":",
"result",
".",
"append",
"(",
"part",
")",
"return",
"''",
".",
"join",
"(",
"result",
")"
] |
Resolves input references given in the string to_resolve by using the inputs_to_reference.
See http://www.commonwl.org/user_guide/06-params/index.html for more information.
Example:
"$(inputs.my_file.nameroot).md" -> "filename.md"
:param to_resolve: The path to match
:param inputs_to_reference: Inputs which are used to resolve input references like $(inputs.my_input_file.basename).
:return: A string in which the input references are replaced with actual values.
|
[
"Resolves",
"input",
"references",
"given",
"in",
"the",
"string",
"to_resolve",
"by",
"using",
"the",
"inputs_to_reference",
"."
] |
python
|
train
|
IdentityPython/SATOSA
|
src/satosa/backends/openid_connect.py
|
https://github.com/IdentityPython/SATOSA/blob/49da5d4c0ac1a5ebf1a71b4f7aaf04f0e52d8fdb/src/satosa/backends/openid_connect.py#L238-L272
|
def _create_client(provider_metadata, client_metadata, verify_ssl=True):
"""
Create a pyoidc client instance.
:param provider_metadata: provider configuration information
:type provider_metadata: Mapping[str, Union[str, Sequence[str]]]
:param client_metadata: client metadata
:type client_metadata: Mapping[str, Union[str, Sequence[str]]]
:return: client instance to use for communicating with the configured provider
:rtype: oic.oic.Client
"""
client = oic.Client(
client_authn_method=CLIENT_AUTHN_METHOD, verify_ssl=verify_ssl
)
# Provider configuration information
if "authorization_endpoint" in provider_metadata:
# no dynamic discovery necessary
client.handle_provider_config(ProviderConfigurationResponse(**provider_metadata),
provider_metadata["issuer"])
else:
# do dynamic discovery
client.provider_config(provider_metadata["issuer"])
# Client information
if "client_id" in client_metadata:
# static client info provided
client.store_registration_info(RegistrationRequest(**client_metadata))
else:
# do dynamic registration
client.register(client.provider_info['registration_endpoint'],
**client_metadata)
client.subject_type = (client.registration_response.get("subject_type") or
client.provider_info["subject_types_supported"][0])
return client
|
[
"def",
"_create_client",
"(",
"provider_metadata",
",",
"client_metadata",
",",
"verify_ssl",
"=",
"True",
")",
":",
"client",
"=",
"oic",
".",
"Client",
"(",
"client_authn_method",
"=",
"CLIENT_AUTHN_METHOD",
",",
"verify_ssl",
"=",
"verify_ssl",
")",
"# Provider configuration information",
"if",
"\"authorization_endpoint\"",
"in",
"provider_metadata",
":",
"# no dynamic discovery necessary",
"client",
".",
"handle_provider_config",
"(",
"ProviderConfigurationResponse",
"(",
"*",
"*",
"provider_metadata",
")",
",",
"provider_metadata",
"[",
"\"issuer\"",
"]",
")",
"else",
":",
"# do dynamic discovery",
"client",
".",
"provider_config",
"(",
"provider_metadata",
"[",
"\"issuer\"",
"]",
")",
"# Client information",
"if",
"\"client_id\"",
"in",
"client_metadata",
":",
"# static client info provided",
"client",
".",
"store_registration_info",
"(",
"RegistrationRequest",
"(",
"*",
"*",
"client_metadata",
")",
")",
"else",
":",
"# do dynamic registration",
"client",
".",
"register",
"(",
"client",
".",
"provider_info",
"[",
"'registration_endpoint'",
"]",
",",
"*",
"*",
"client_metadata",
")",
"client",
".",
"subject_type",
"=",
"(",
"client",
".",
"registration_response",
".",
"get",
"(",
"\"subject_type\"",
")",
"or",
"client",
".",
"provider_info",
"[",
"\"subject_types_supported\"",
"]",
"[",
"0",
"]",
")",
"return",
"client"
] |
Create a pyoidc client instance.
:param provider_metadata: provider configuration information
:type provider_metadata: Mapping[str, Union[str, Sequence[str]]]
:param client_metadata: client metadata
:type client_metadata: Mapping[str, Union[str, Sequence[str]]]
:return: client instance to use for communicating with the configured provider
:rtype: oic.oic.Client
|
[
"Create",
"a",
"pyoidc",
"client",
"instance",
".",
":",
"param",
"provider_metadata",
":",
"provider",
"configuration",
"information",
":",
"type",
"provider_metadata",
":",
"Mapping",
"[",
"str",
"Union",
"[",
"str",
"Sequence",
"[",
"str",
"]]]",
":",
"param",
"client_metadata",
":",
"client",
"metadata",
":",
"type",
"client_metadata",
":",
"Mapping",
"[",
"str",
"Union",
"[",
"str",
"Sequence",
"[",
"str",
"]]]",
":",
"return",
":",
"client",
"instance",
"to",
"use",
"for",
"communicating",
"with",
"the",
"configured",
"provider",
":",
"rtype",
":",
"oic",
".",
"oic",
".",
"Client"
] |
python
|
train
|
has2k1/mizani
|
mizani/utils.py
|
https://github.com/has2k1/mizani/blob/312d0550ee0136fd1b0384829b33f3b2065f47c8/mizani/utils.py#L194-L224
|
def multitype_sort(a):
"""
Sort elements of multiple types
x is assumed to contain elements of different types, such that
plain sort would raise a `TypeError`.
Parameters
----------
a : array-like
Array of items to be sorted
Returns
-------
out : list
Items sorted within their type groups.
"""
types = defaultdict(list)
numbers = {int, float, complex}
for x in a:
t = type(x)
if t in numbers:
types['number'].append(x)
else:
types[t].append(x)
for t in types:
types[t] = np.sort(types[t])
return list(chain(*(types[t] for t in types)))
|
[
"def",
"multitype_sort",
"(",
"a",
")",
":",
"types",
"=",
"defaultdict",
"(",
"list",
")",
"numbers",
"=",
"{",
"int",
",",
"float",
",",
"complex",
"}",
"for",
"x",
"in",
"a",
":",
"t",
"=",
"type",
"(",
"x",
")",
"if",
"t",
"in",
"numbers",
":",
"types",
"[",
"'number'",
"]",
".",
"append",
"(",
"x",
")",
"else",
":",
"types",
"[",
"t",
"]",
".",
"append",
"(",
"x",
")",
"for",
"t",
"in",
"types",
":",
"types",
"[",
"t",
"]",
"=",
"np",
".",
"sort",
"(",
"types",
"[",
"t",
"]",
")",
"return",
"list",
"(",
"chain",
"(",
"*",
"(",
"types",
"[",
"t",
"]",
"for",
"t",
"in",
"types",
")",
")",
")"
] |
Sort elements of multiple types
x is assumed to contain elements of different types, such that
plain sort would raise a `TypeError`.
Parameters
----------
a : array-like
Array of items to be sorted
Returns
-------
out : list
Items sorted within their type groups.
|
[
"Sort",
"elements",
"of",
"multiple",
"types"
] |
python
|
valid
|
google/grr
|
grr/server/grr_response_server/gui/api_plugins/config.py
|
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/gui/api_plugins/config.py#L140-L146
|
def Handle(self, args, token=None):
"""Renders specified config option."""
if not args.name:
raise ValueError("Name not specified.")
return ApiConfigOption().InitFromConfigOption(args.name)
|
[
"def",
"Handle",
"(",
"self",
",",
"args",
",",
"token",
"=",
"None",
")",
":",
"if",
"not",
"args",
".",
"name",
":",
"raise",
"ValueError",
"(",
"\"Name not specified.\"",
")",
"return",
"ApiConfigOption",
"(",
")",
".",
"InitFromConfigOption",
"(",
"args",
".",
"name",
")"
] |
Renders specified config option.
|
[
"Renders",
"specified",
"config",
"option",
"."
] |
python
|
train
|
allenai/allennlp
|
allennlp/semparse/contexts/sql_context_utils.py
|
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/semparse/contexts/sql_context_utils.py#L16-L23
|
def format_grammar_string(grammar_dictionary: Dict[str, List[str]]) -> str:
"""
Formats a dictionary of production rules into the string format expected
by the Parsimonious Grammar class.
"""
grammar_string = '\n'.join([f"{nonterminal} = {' / '.join(right_hand_side)}"
for nonterminal, right_hand_side in grammar_dictionary.items()])
return grammar_string.replace("\\", "\\\\")
|
[
"def",
"format_grammar_string",
"(",
"grammar_dictionary",
":",
"Dict",
"[",
"str",
",",
"List",
"[",
"str",
"]",
"]",
")",
"->",
"str",
":",
"grammar_string",
"=",
"'\\n'",
".",
"join",
"(",
"[",
"f\"{nonterminal} = {' / '.join(right_hand_side)}\"",
"for",
"nonterminal",
",",
"right_hand_side",
"in",
"grammar_dictionary",
".",
"items",
"(",
")",
"]",
")",
"return",
"grammar_string",
".",
"replace",
"(",
"\"\\\\\"",
",",
"\"\\\\\\\\\"",
")"
] |
Formats a dictionary of production rules into the string format expected
by the Parsimonious Grammar class.
|
[
"Formats",
"a",
"dictionary",
"of",
"production",
"rules",
"into",
"the",
"string",
"format",
"expected",
"by",
"the",
"Parsimonious",
"Grammar",
"class",
"."
] |
python
|
train
|
mushkevych/scheduler
|
synergy/system/time_helper.py
|
https://github.com/mushkevych/scheduler/blob/6740331360f49083c208085fb5a60ce80ebf418b/synergy/system/time_helper.py#L98-L135
|
def increment_timeperiod(time_qualifier, timeperiod, delta=1):
""" method performs simple increment/decrement of the timeperiods
For instance: 2010010119 with delta=1 -> 2010010120
Or 2010010000 with delta=-1 -> 2009120000, etc"""
pattern = define_pattern(timeperiod)
t = datetime.strptime(timeperiod, pattern)
if time_qualifier == QUALIFIER_HOURLY:
t = t + timedelta(hours=delta)
return t.strftime(SYNERGY_HOURLY_PATTERN)
elif time_qualifier == QUALIFIER_DAILY:
t = t + timedelta(days=delta)
return t.strftime(SYNERGY_DAILY_PATTERN)
elif time_qualifier == QUALIFIER_MONTHLY:
yearly_increment = abs(delta) // 12
yearly_increment = yearly_increment if delta >= 0 else -yearly_increment
monthly_increment = delta - yearly_increment * 12
if t.month + monthly_increment > 12:
new_month = t.month + monthly_increment - 12
new_year = t.year + yearly_increment + 1
t = t.replace(year=new_year, month=new_month)
elif t.month + monthly_increment < 1:
new_month = t.month + monthly_increment + 12
new_year = t.year + yearly_increment - 1
t = t.replace(year=new_year, month=new_month)
else:
t = t.replace(year=t.year + yearly_increment, month=t.month + monthly_increment)
return t.strftime(SYNERGY_MONTHLY_PATTERN)
elif time_qualifier == QUALIFIER_YEARLY:
t = t.replace(year=t.year + delta)
return t.strftime(SYNERGY_YEARLY_PATTERN)
else:
raise ValueError('unknown time qualifier: {0}'.format(time_qualifier))
|
[
"def",
"increment_timeperiod",
"(",
"time_qualifier",
",",
"timeperiod",
",",
"delta",
"=",
"1",
")",
":",
"pattern",
"=",
"define_pattern",
"(",
"timeperiod",
")",
"t",
"=",
"datetime",
".",
"strptime",
"(",
"timeperiod",
",",
"pattern",
")",
"if",
"time_qualifier",
"==",
"QUALIFIER_HOURLY",
":",
"t",
"=",
"t",
"+",
"timedelta",
"(",
"hours",
"=",
"delta",
")",
"return",
"t",
".",
"strftime",
"(",
"SYNERGY_HOURLY_PATTERN",
")",
"elif",
"time_qualifier",
"==",
"QUALIFIER_DAILY",
":",
"t",
"=",
"t",
"+",
"timedelta",
"(",
"days",
"=",
"delta",
")",
"return",
"t",
".",
"strftime",
"(",
"SYNERGY_DAILY_PATTERN",
")",
"elif",
"time_qualifier",
"==",
"QUALIFIER_MONTHLY",
":",
"yearly_increment",
"=",
"abs",
"(",
"delta",
")",
"//",
"12",
"yearly_increment",
"=",
"yearly_increment",
"if",
"delta",
">=",
"0",
"else",
"-",
"yearly_increment",
"monthly_increment",
"=",
"delta",
"-",
"yearly_increment",
"*",
"12",
"if",
"t",
".",
"month",
"+",
"monthly_increment",
">",
"12",
":",
"new_month",
"=",
"t",
".",
"month",
"+",
"monthly_increment",
"-",
"12",
"new_year",
"=",
"t",
".",
"year",
"+",
"yearly_increment",
"+",
"1",
"t",
"=",
"t",
".",
"replace",
"(",
"year",
"=",
"new_year",
",",
"month",
"=",
"new_month",
")",
"elif",
"t",
".",
"month",
"+",
"monthly_increment",
"<",
"1",
":",
"new_month",
"=",
"t",
".",
"month",
"+",
"monthly_increment",
"+",
"12",
"new_year",
"=",
"t",
".",
"year",
"+",
"yearly_increment",
"-",
"1",
"t",
"=",
"t",
".",
"replace",
"(",
"year",
"=",
"new_year",
",",
"month",
"=",
"new_month",
")",
"else",
":",
"t",
"=",
"t",
".",
"replace",
"(",
"year",
"=",
"t",
".",
"year",
"+",
"yearly_increment",
",",
"month",
"=",
"t",
".",
"month",
"+",
"monthly_increment",
")",
"return",
"t",
".",
"strftime",
"(",
"SYNERGY_MONTHLY_PATTERN",
")",
"elif",
"time_qualifier",
"==",
"QUALIFIER_YEARLY",
":",
"t",
"=",
"t",
".",
"replace",
"(",
"year",
"=",
"t",
".",
"year",
"+",
"delta",
")",
"return",
"t",
".",
"strftime",
"(",
"SYNERGY_YEARLY_PATTERN",
")",
"else",
":",
"raise",
"ValueError",
"(",
"'unknown time qualifier: {0}'",
".",
"format",
"(",
"time_qualifier",
")",
")"
] |
method performs simple increment/decrement of the timeperiods
For instance: 2010010119 with delta=1 -> 2010010120
Or 2010010000 with delta=-1 -> 2009120000, etc
|
[
"method",
"performs",
"simple",
"increment",
"/",
"decrement",
"of",
"the",
"timeperiods",
"For",
"instance",
":",
"2010010119",
"with",
"delta",
"=",
"1",
"-",
">",
"2010010120",
"Or",
"2010010000",
"with",
"delta",
"=",
"-",
"1",
"-",
">",
"2009120000",
"etc"
] |
python
|
train
|
serhatbolsu/robotframework-appiumlibrary
|
AppiumLibrary/keywords/_waiting.py
|
https://github.com/serhatbolsu/robotframework-appiumlibrary/blob/91c808cf0602af6be8135ac529fa488fded04a85/AppiumLibrary/keywords/_waiting.py#L48-L70
|
def wait_until_page_does_not_contain(self, text, timeout=None, error=None):
"""Waits until `text` disappears from current page.
Fails if `timeout` expires before the `text` disappears. See
`introduction` for more information about `timeout` and its
default value.
`error` can be used to override the default error message.
See also `Wait Until Page Contains`,
`Wait Until Page Contains Element`,
`Wait Until Page Does Not Contain Element` and
BuiltIn keyword `Wait Until Keyword Succeeds`.
"""
def check_present():
present = self._is_text_present(text)
if not present:
return
else:
return error or "Text '%s' did not disappear in %s" % (text, self._format_timeout(timeout))
self._wait_until_no_error(timeout, check_present)
|
[
"def",
"wait_until_page_does_not_contain",
"(",
"self",
",",
"text",
",",
"timeout",
"=",
"None",
",",
"error",
"=",
"None",
")",
":",
"def",
"check_present",
"(",
")",
":",
"present",
"=",
"self",
".",
"_is_text_present",
"(",
"text",
")",
"if",
"not",
"present",
":",
"return",
"else",
":",
"return",
"error",
"or",
"\"Text '%s' did not disappear in %s\"",
"%",
"(",
"text",
",",
"self",
".",
"_format_timeout",
"(",
"timeout",
")",
")",
"self",
".",
"_wait_until_no_error",
"(",
"timeout",
",",
"check_present",
")"
] |
Waits until `text` disappears from current page.
Fails if `timeout` expires before the `text` disappears. See
`introduction` for more information about `timeout` and its
default value.
`error` can be used to override the default error message.
See also `Wait Until Page Contains`,
`Wait Until Page Contains Element`,
`Wait Until Page Does Not Contain Element` and
BuiltIn keyword `Wait Until Keyword Succeeds`.
|
[
"Waits",
"until",
"text",
"disappears",
"from",
"current",
"page",
"."
] |
python
|
train
|
SergeySatskiy/cdm-pythonparser
|
cdmpyparser.py
|
https://github.com/SergeySatskiy/cdm-pythonparser/blob/7e933aca899b1853d744082313ffc3a8b1154505/cdmpyparser.py#L500-L510
|
def _onClass(self, name, line, pos, absPosition,
keywordLine, keywordPos,
colonLine, colonPos, level):
"""Memorizes a class"""
self.__flushLevel(level)
c = Class(name, line, pos, absPosition, keywordLine, keywordPos,
colonLine, colonPos)
if self.__lastDecorators is not None:
c.decorators = self.__lastDecorators
self.__lastDecorators = None
self.objectsStack.append(c)
|
[
"def",
"_onClass",
"(",
"self",
",",
"name",
",",
"line",
",",
"pos",
",",
"absPosition",
",",
"keywordLine",
",",
"keywordPos",
",",
"colonLine",
",",
"colonPos",
",",
"level",
")",
":",
"self",
".",
"__flushLevel",
"(",
"level",
")",
"c",
"=",
"Class",
"(",
"name",
",",
"line",
",",
"pos",
",",
"absPosition",
",",
"keywordLine",
",",
"keywordPos",
",",
"colonLine",
",",
"colonPos",
")",
"if",
"self",
".",
"__lastDecorators",
"is",
"not",
"None",
":",
"c",
".",
"decorators",
"=",
"self",
".",
"__lastDecorators",
"self",
".",
"__lastDecorators",
"=",
"None",
"self",
".",
"objectsStack",
".",
"append",
"(",
"c",
")"
] |
Memorizes a class
|
[
"Memorizes",
"a",
"class"
] |
python
|
train
|
pantsbuild/pants
|
src/python/pants/subsystem/subsystem.py
|
https://github.com/pantsbuild/pants/blob/b72e650da0df685824ffdcc71988b8c282d0962d/src/python/pants/subsystem/subsystem.py#L98-L111
|
def scoped_instance(cls, optionable):
"""Returns an instance of this subsystem for exclusive use by the given `optionable`.
:API: public
:param optionable: An optionable type or instance to scope this subsystem under.
:type: :class:`pants.option.optionable.Optionable`
:returns: The scoped subsystem instance.
:rtype: :class:`pants.subsystem.subsystem.Subsystem`
"""
if not isinstance(optionable, Optionable) and not issubclass(optionable, Optionable):
raise TypeError('Can only scope an instance against an Optionable, given {} of type {}.'
.format(optionable, type(optionable)))
return cls._instance_for_scope(cls.subscope(optionable.options_scope))
|
[
"def",
"scoped_instance",
"(",
"cls",
",",
"optionable",
")",
":",
"if",
"not",
"isinstance",
"(",
"optionable",
",",
"Optionable",
")",
"and",
"not",
"issubclass",
"(",
"optionable",
",",
"Optionable",
")",
":",
"raise",
"TypeError",
"(",
"'Can only scope an instance against an Optionable, given {} of type {}.'",
".",
"format",
"(",
"optionable",
",",
"type",
"(",
"optionable",
")",
")",
")",
"return",
"cls",
".",
"_instance_for_scope",
"(",
"cls",
".",
"subscope",
"(",
"optionable",
".",
"options_scope",
")",
")"
] |
Returns an instance of this subsystem for exclusive use by the given `optionable`.
:API: public
:param optionable: An optionable type or instance to scope this subsystem under.
:type: :class:`pants.option.optionable.Optionable`
:returns: The scoped subsystem instance.
:rtype: :class:`pants.subsystem.subsystem.Subsystem`
|
[
"Returns",
"an",
"instance",
"of",
"this",
"subsystem",
"for",
"exclusive",
"use",
"by",
"the",
"given",
"optionable",
"."
] |
python
|
train
|
decryptus/sonicprobe
|
sonicprobe/libs/daemonize.py
|
https://github.com/decryptus/sonicprobe/blob/72f73f3a40d2982d79ad68686e36aa31d94b76f8/sonicprobe/libs/daemonize.py#L107-L139
|
def take_file_lock(own_file, lock_file, own_content):
"""
Atomically "move" @own_file to @lock_file if the latter does not exists,
else just remove @own_file.
@own_file: filepath of the temporary file that contains our PID
@lock_file: destination filepath
@own_content: content of @own_file
Return True if the lock has been successfully taken, else False.
(Caller should also be prepared for OSError exceptions)
"""
try:
try:
os.link(own_file, lock_file)
finally:
os.unlink(own_file)
except OSError, e:
if e.errno == errno.EEXIST:
log.warning("The lock file %r already exists - won't "
"overwrite it. An other instance of ourself "
"is probably running.", lock_file)
return False
else:
raise
content = file(lock_file).read(len(own_content) + 1)
if content != own_content:
log.warning(
"I thought I successfully took the lock file %r but "
"it does not contain what was expected. Somebody is "
"playing with us.", lock_file)
return False
return True
|
[
"def",
"take_file_lock",
"(",
"own_file",
",",
"lock_file",
",",
"own_content",
")",
":",
"try",
":",
"try",
":",
"os",
".",
"link",
"(",
"own_file",
",",
"lock_file",
")",
"finally",
":",
"os",
".",
"unlink",
"(",
"own_file",
")",
"except",
"OSError",
",",
"e",
":",
"if",
"e",
".",
"errno",
"==",
"errno",
".",
"EEXIST",
":",
"log",
".",
"warning",
"(",
"\"The lock file %r already exists - won't \"",
"\"overwrite it. An other instance of ourself \"",
"\"is probably running.\"",
",",
"lock_file",
")",
"return",
"False",
"else",
":",
"raise",
"content",
"=",
"file",
"(",
"lock_file",
")",
".",
"read",
"(",
"len",
"(",
"own_content",
")",
"+",
"1",
")",
"if",
"content",
"!=",
"own_content",
":",
"log",
".",
"warning",
"(",
"\"I thought I successfully took the lock file %r but \"",
"\"it does not contain what was expected. Somebody is \"",
"\"playing with us.\"",
",",
"lock_file",
")",
"return",
"False",
"return",
"True"
] |
Atomically "move" @own_file to @lock_file if the latter does not exists,
else just remove @own_file.
@own_file: filepath of the temporary file that contains our PID
@lock_file: destination filepath
@own_content: content of @own_file
Return True if the lock has been successfully taken, else False.
(Caller should also be prepared for OSError exceptions)
|
[
"Atomically",
"move",
"@own_file",
"to",
"@lock_file",
"if",
"the",
"latter",
"does",
"not",
"exists",
"else",
"just",
"remove",
"@own_file",
"."
] |
python
|
train
|
phaethon/kamene
|
kamene/contrib/gtp.py
|
https://github.com/phaethon/kamene/blob/11d4064844f4f68ac5d7546f5633ac7d02082914/kamene/contrib/gtp.py#L399-L413
|
def IE_Dispatcher(s):
"""Choose the correct Information Element class."""
if len(s) < 1:
return Raw(s)
# Get the IE type
ietype = ord(s[0])
cls = ietypecls.get(ietype, Raw)
# if ietype greater than 128 are TLVs
if cls == Raw and ietype & 128 == 128:
cls = IE_NotImplementedTLV
return cls(s)
|
[
"def",
"IE_Dispatcher",
"(",
"s",
")",
":",
"if",
"len",
"(",
"s",
")",
"<",
"1",
":",
"return",
"Raw",
"(",
"s",
")",
"# Get the IE type",
"ietype",
"=",
"ord",
"(",
"s",
"[",
"0",
"]",
")",
"cls",
"=",
"ietypecls",
".",
"get",
"(",
"ietype",
",",
"Raw",
")",
"# if ietype greater than 128 are TLVs",
"if",
"cls",
"==",
"Raw",
"and",
"ietype",
"&",
"128",
"==",
"128",
":",
"cls",
"=",
"IE_NotImplementedTLV",
"return",
"cls",
"(",
"s",
")"
] |
Choose the correct Information Element class.
|
[
"Choose",
"the",
"correct",
"Information",
"Element",
"class",
"."
] |
python
|
train
|
woolfson-group/isambard
|
isambard/ampal/pseudo_atoms.py
|
https://github.com/woolfson-group/isambard/blob/ebc33b48a28ad217e18f93b910dfba46e6e71e07/isambard/ampal/pseudo_atoms.py#L275-L288
|
def rise_per_residue(self):
"""The rise per residue at each point on the Primitive.
Notes
-----
Each element of the returned list is the rise per residue,
at a point on the Primitive. Element i is the distance
between primitive[i] and primitive[i + 1]. The final value
is None.
"""
rprs = [distance(self[i]['CA'], self[i + 1]['CA'])
for i in range(len(self) - 1)]
rprs.append(None)
return rprs
|
[
"def",
"rise_per_residue",
"(",
"self",
")",
":",
"rprs",
"=",
"[",
"distance",
"(",
"self",
"[",
"i",
"]",
"[",
"'CA'",
"]",
",",
"self",
"[",
"i",
"+",
"1",
"]",
"[",
"'CA'",
"]",
")",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"self",
")",
"-",
"1",
")",
"]",
"rprs",
".",
"append",
"(",
"None",
")",
"return",
"rprs"
] |
The rise per residue at each point on the Primitive.
Notes
-----
Each element of the returned list is the rise per residue,
at a point on the Primitive. Element i is the distance
between primitive[i] and primitive[i + 1]. The final value
is None.
|
[
"The",
"rise",
"per",
"residue",
"at",
"each",
"point",
"on",
"the",
"Primitive",
"."
] |
python
|
train
|
log2timeline/plaso
|
plaso/output/mediator.py
|
https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/output/mediator.py#L148-L217
|
def GetMACBRepresentation(self, event):
"""Retrieves the MACB representation.
Args:
event (EventObject): event.
Returns:
str: MACB representation.
"""
data_type = getattr(event, 'data_type', None)
if not data_type:
return '....'
# The filestat parser is somewhat limited.
if data_type == 'fs:stat':
descriptions = event.timestamp_desc.split(';')
return_characters = ['.', '.', '.', '.']
for description in descriptions:
if description in (
'mtime', definitions.TIME_DESCRIPTION_MODIFICATION):
return_characters[0] = 'M'
elif description in (
'atime', definitions.TIME_DESCRIPTION_LAST_ACCESS):
return_characters[1] = 'A'
elif description in (
'ctime', definitions.TIME_DESCRIPTION_CHANGE):
return_characters[2] = 'C'
elif description in (
'crtime', definitions.TIME_DESCRIPTION_CREATION):
return_characters[3] = 'B'
return ''.join(return_characters)
# Access time.
if event.timestamp_desc in [
definitions.TIME_DESCRIPTION_LAST_ACCESS,
definitions.TIME_DESCRIPTION_ACCOUNT_CREATED,
definitions.TIME_DESCRIPTION_LAST_VISITED,
definitions.TIME_DESCRIPTION_START,
definitions.TIME_DESCRIPTION_LAST_SHUTDOWN,
definitions.TIME_DESCRIPTION_LAST_LOGIN,
definitions.TIME_DESCRIPTION_LAST_PASSWORD_RESET,
definitions.TIME_DESCRIPTION_LAST_CONNECTED,
definitions.TIME_DESCRIPTION_LAST_RUN,
definitions.TIME_DESCRIPTION_LAST_PRINTED]:
return '.A..'
# Content modification.
if event.timestamp_desc in [
definitions.TIME_DESCRIPTION_MODIFICATION,
definitions.TIME_DESCRIPTION_WRITTEN,
definitions.TIME_DESCRIPTION_DELETED]:
return 'M...'
# Content creation time.
if event.timestamp_desc in [
definitions.TIME_DESCRIPTION_CREATION,
definitions.TIME_DESCRIPTION_ADDED,
definitions.TIME_DESCRIPTION_FILE_DOWNLOADED,
definitions.TIME_DESCRIPTION_FIRST_CONNECTED]:
return '...B'
# Metadata modification.
if event.timestamp_desc in [
definitions.TIME_DESCRIPTION_CHANGE,
definitions.TIME_DESCRIPTION_ENTRY_MODIFICATION]:
return '..C.'
return '....'
|
[
"def",
"GetMACBRepresentation",
"(",
"self",
",",
"event",
")",
":",
"data_type",
"=",
"getattr",
"(",
"event",
",",
"'data_type'",
",",
"None",
")",
"if",
"not",
"data_type",
":",
"return",
"'....'",
"# The filestat parser is somewhat limited.",
"if",
"data_type",
"==",
"'fs:stat'",
":",
"descriptions",
"=",
"event",
".",
"timestamp_desc",
".",
"split",
"(",
"';'",
")",
"return_characters",
"=",
"[",
"'.'",
",",
"'.'",
",",
"'.'",
",",
"'.'",
"]",
"for",
"description",
"in",
"descriptions",
":",
"if",
"description",
"in",
"(",
"'mtime'",
",",
"definitions",
".",
"TIME_DESCRIPTION_MODIFICATION",
")",
":",
"return_characters",
"[",
"0",
"]",
"=",
"'M'",
"elif",
"description",
"in",
"(",
"'atime'",
",",
"definitions",
".",
"TIME_DESCRIPTION_LAST_ACCESS",
")",
":",
"return_characters",
"[",
"1",
"]",
"=",
"'A'",
"elif",
"description",
"in",
"(",
"'ctime'",
",",
"definitions",
".",
"TIME_DESCRIPTION_CHANGE",
")",
":",
"return_characters",
"[",
"2",
"]",
"=",
"'C'",
"elif",
"description",
"in",
"(",
"'crtime'",
",",
"definitions",
".",
"TIME_DESCRIPTION_CREATION",
")",
":",
"return_characters",
"[",
"3",
"]",
"=",
"'B'",
"return",
"''",
".",
"join",
"(",
"return_characters",
")",
"# Access time.",
"if",
"event",
".",
"timestamp_desc",
"in",
"[",
"definitions",
".",
"TIME_DESCRIPTION_LAST_ACCESS",
",",
"definitions",
".",
"TIME_DESCRIPTION_ACCOUNT_CREATED",
",",
"definitions",
".",
"TIME_DESCRIPTION_LAST_VISITED",
",",
"definitions",
".",
"TIME_DESCRIPTION_START",
",",
"definitions",
".",
"TIME_DESCRIPTION_LAST_SHUTDOWN",
",",
"definitions",
".",
"TIME_DESCRIPTION_LAST_LOGIN",
",",
"definitions",
".",
"TIME_DESCRIPTION_LAST_PASSWORD_RESET",
",",
"definitions",
".",
"TIME_DESCRIPTION_LAST_CONNECTED",
",",
"definitions",
".",
"TIME_DESCRIPTION_LAST_RUN",
",",
"definitions",
".",
"TIME_DESCRIPTION_LAST_PRINTED",
"]",
":",
"return",
"'.A..'",
"# Content modification.",
"if",
"event",
".",
"timestamp_desc",
"in",
"[",
"definitions",
".",
"TIME_DESCRIPTION_MODIFICATION",
",",
"definitions",
".",
"TIME_DESCRIPTION_WRITTEN",
",",
"definitions",
".",
"TIME_DESCRIPTION_DELETED",
"]",
":",
"return",
"'M...'",
"# Content creation time.",
"if",
"event",
".",
"timestamp_desc",
"in",
"[",
"definitions",
".",
"TIME_DESCRIPTION_CREATION",
",",
"definitions",
".",
"TIME_DESCRIPTION_ADDED",
",",
"definitions",
".",
"TIME_DESCRIPTION_FILE_DOWNLOADED",
",",
"definitions",
".",
"TIME_DESCRIPTION_FIRST_CONNECTED",
"]",
":",
"return",
"'...B'",
"# Metadata modification.",
"if",
"event",
".",
"timestamp_desc",
"in",
"[",
"definitions",
".",
"TIME_DESCRIPTION_CHANGE",
",",
"definitions",
".",
"TIME_DESCRIPTION_ENTRY_MODIFICATION",
"]",
":",
"return",
"'..C.'",
"return",
"'....'"
] |
Retrieves the MACB representation.
Args:
event (EventObject): event.
Returns:
str: MACB representation.
|
[
"Retrieves",
"the",
"MACB",
"representation",
"."
] |
python
|
train
|
shoebot/shoebot
|
lib/web/google.py
|
https://github.com/shoebot/shoebot/blob/d554c1765c1899fa25727c9fc6805d221585562b/lib/web/google.py#L212-L218
|
def search(q, start=0, wait=10, asynchronous=False, cached=False):
""" Returns a Google web query formatted as a GoogleSearch list object.
"""
service = GOOGLE_SEARCH
return GoogleSearch(q, start, service, "", wait, asynchronous, cached)
|
[
"def",
"search",
"(",
"q",
",",
"start",
"=",
"0",
",",
"wait",
"=",
"10",
",",
"asynchronous",
"=",
"False",
",",
"cached",
"=",
"False",
")",
":",
"service",
"=",
"GOOGLE_SEARCH",
"return",
"GoogleSearch",
"(",
"q",
",",
"start",
",",
"service",
",",
"\"\"",
",",
"wait",
",",
"asynchronous",
",",
"cached",
")"
] |
Returns a Google web query formatted as a GoogleSearch list object.
|
[
"Returns",
"a",
"Google",
"web",
"query",
"formatted",
"as",
"a",
"GoogleSearch",
"list",
"object",
"."
] |
python
|
valid
|
mitodl/edx-api-client
|
edx_api/ccx.py
|
https://github.com/mitodl/edx-api-client/blob/083fd23a48b3ef0d39602fc3e7e53ef02f4ad6d6/edx_api/ccx.py#L20-L55
|
def create(self, master_course_id, coach_email, max_students_allowed, title, modules=None):
"""
Creates a CCX
Args:
master_course_id (str): edx course id of the master course
coach_email (str): email of the user to make a coach. This user must exist on edx.
max_students_allowed (int): Maximum number of students to allow in this ccx.
title (str): Title of the CCX to be created
modules (optional list): A list of locator_ids (str) for the modules to enable.
Returns:
ccx_id (str): The ID of the ccx.
"""
payload = {
'master_course_id': master_course_id,
'coach_email': coach_email,
'max_students_allowed': max_students_allowed,
'display_name': title,
}
if modules is not None:
payload['course_modules'] = modules
resp = self.requester.post(
parse.urljoin(self.base_url, '/api/ccx/v0/ccx/'),
json=payload
)
try:
resp.raise_for_status()
except:
log.error(resp.json())
raise
return resp.json()['ccx_course_id']
|
[
"def",
"create",
"(",
"self",
",",
"master_course_id",
",",
"coach_email",
",",
"max_students_allowed",
",",
"title",
",",
"modules",
"=",
"None",
")",
":",
"payload",
"=",
"{",
"'master_course_id'",
":",
"master_course_id",
",",
"'coach_email'",
":",
"coach_email",
",",
"'max_students_allowed'",
":",
"max_students_allowed",
",",
"'display_name'",
":",
"title",
",",
"}",
"if",
"modules",
"is",
"not",
"None",
":",
"payload",
"[",
"'course_modules'",
"]",
"=",
"modules",
"resp",
"=",
"self",
".",
"requester",
".",
"post",
"(",
"parse",
".",
"urljoin",
"(",
"self",
".",
"base_url",
",",
"'/api/ccx/v0/ccx/'",
")",
",",
"json",
"=",
"payload",
")",
"try",
":",
"resp",
".",
"raise_for_status",
"(",
")",
"except",
":",
"log",
".",
"error",
"(",
"resp",
".",
"json",
"(",
")",
")",
"raise",
"return",
"resp",
".",
"json",
"(",
")",
"[",
"'ccx_course_id'",
"]"
] |
Creates a CCX
Args:
master_course_id (str): edx course id of the master course
coach_email (str): email of the user to make a coach. This user must exist on edx.
max_students_allowed (int): Maximum number of students to allow in this ccx.
title (str): Title of the CCX to be created
modules (optional list): A list of locator_ids (str) for the modules to enable.
Returns:
ccx_id (str): The ID of the ccx.
|
[
"Creates",
"a",
"CCX"
] |
python
|
train
|
thumbor-community/redis
|
tc_redis/storages/redis_storage.py
|
https://github.com/thumbor-community/redis/blob/e434c151b2d32b2209ce9935493258ee29fb1d1d/tc_redis/storages/redis_storage.py#L55-L75
|
def on_redis_error(self, fname, exc_type, exc_value):
'''Callback executed when there is a redis error.
:param string fname: Function name that was being called.
:param type exc_type: Exception type
:param Exception exc_value: The current exception
:returns: Default value or raise the current exception
'''
if self.shared_client:
Storage.storage = None
else:
self.storage = None
if self.context.config.REDIS_STORAGE_IGNORE_ERRORS is True:
logger.error("[REDIS_STORAGE] %s" % exc_value)
if fname == '_exists':
return False
return None
else:
raise exc_value
|
[
"def",
"on_redis_error",
"(",
"self",
",",
"fname",
",",
"exc_type",
",",
"exc_value",
")",
":",
"if",
"self",
".",
"shared_client",
":",
"Storage",
".",
"storage",
"=",
"None",
"else",
":",
"self",
".",
"storage",
"=",
"None",
"if",
"self",
".",
"context",
".",
"config",
".",
"REDIS_STORAGE_IGNORE_ERRORS",
"is",
"True",
":",
"logger",
".",
"error",
"(",
"\"[REDIS_STORAGE] %s\"",
"%",
"exc_value",
")",
"if",
"fname",
"==",
"'_exists'",
":",
"return",
"False",
"return",
"None",
"else",
":",
"raise",
"exc_value"
] |
Callback executed when there is a redis error.
:param string fname: Function name that was being called.
:param type exc_type: Exception type
:param Exception exc_value: The current exception
:returns: Default value or raise the current exception
|
[
"Callback",
"executed",
"when",
"there",
"is",
"a",
"redis",
"error",
"."
] |
python
|
train
|
librosa/librosa
|
librosa/util/matching.py
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/util/matching.py#L63-L113
|
def __match_intervals(intervals_from, intervals_to, strict=True): # pragma: no cover
'''Numba-accelerated interval matching algorithm.
'''
# sort index of the interval starts
start_index = np.argsort(intervals_to[:, 0])
# sort index of the interval ends
end_index = np.argsort(intervals_to[:, 1])
# and sorted values of starts
start_sorted = intervals_to[start_index, 0]
# and ends
end_sorted = intervals_to[end_index, 1]
search_ends = np.searchsorted(start_sorted, intervals_from[:, 1], side='right')
search_starts = np.searchsorted(end_sorted, intervals_from[:, 0], side='left')
output = np.empty(len(intervals_from), dtype=numba.uint32)
for i in range(len(intervals_from)):
query = intervals_from[i]
# Find the intervals that start after our query ends
after_query = search_ends[i]
# And the intervals that end after our query begins
before_query = search_starts[i]
# Candidates for overlapping have to (end after we start) and (begin before we end)
candidates = set(start_index[:after_query]) & set(end_index[before_query:])
# Proceed as before
if len(candidates) > 0:
output[i] = __match_interval_overlaps(query, intervals_to, candidates)
elif strict:
# Numba only lets us use compile-time constants in exception messages
raise ParameterError
else:
# Find the closest interval
# (start_index[after_query] - query[1]) is the distance to the next interval
# (query[0] - end_index[before_query])
dist_before = np.inf
dist_after = np.inf
if search_starts[i] > 0:
dist_before = query[0] - end_sorted[search_starts[i]-1]
if search_ends[i] + 1 < len(intervals_to):
dist_after = start_sorted[search_ends[i]+1] - query[1]
if dist_before < dist_after:
output[i] = end_index[search_starts[i]-1]
else:
output[i] = start_index[search_ends[i]+1]
return output
|
[
"def",
"__match_intervals",
"(",
"intervals_from",
",",
"intervals_to",
",",
"strict",
"=",
"True",
")",
":",
"# pragma: no cover",
"# sort index of the interval starts",
"start_index",
"=",
"np",
".",
"argsort",
"(",
"intervals_to",
"[",
":",
",",
"0",
"]",
")",
"# sort index of the interval ends",
"end_index",
"=",
"np",
".",
"argsort",
"(",
"intervals_to",
"[",
":",
",",
"1",
"]",
")",
"# and sorted values of starts",
"start_sorted",
"=",
"intervals_to",
"[",
"start_index",
",",
"0",
"]",
"# and ends",
"end_sorted",
"=",
"intervals_to",
"[",
"end_index",
",",
"1",
"]",
"search_ends",
"=",
"np",
".",
"searchsorted",
"(",
"start_sorted",
",",
"intervals_from",
"[",
":",
",",
"1",
"]",
",",
"side",
"=",
"'right'",
")",
"search_starts",
"=",
"np",
".",
"searchsorted",
"(",
"end_sorted",
",",
"intervals_from",
"[",
":",
",",
"0",
"]",
",",
"side",
"=",
"'left'",
")",
"output",
"=",
"np",
".",
"empty",
"(",
"len",
"(",
"intervals_from",
")",
",",
"dtype",
"=",
"numba",
".",
"uint32",
")",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"intervals_from",
")",
")",
":",
"query",
"=",
"intervals_from",
"[",
"i",
"]",
"# Find the intervals that start after our query ends",
"after_query",
"=",
"search_ends",
"[",
"i",
"]",
"# And the intervals that end after our query begins",
"before_query",
"=",
"search_starts",
"[",
"i",
"]",
"# Candidates for overlapping have to (end after we start) and (begin before we end)",
"candidates",
"=",
"set",
"(",
"start_index",
"[",
":",
"after_query",
"]",
")",
"&",
"set",
"(",
"end_index",
"[",
"before_query",
":",
"]",
")",
"# Proceed as before",
"if",
"len",
"(",
"candidates",
")",
">",
"0",
":",
"output",
"[",
"i",
"]",
"=",
"__match_interval_overlaps",
"(",
"query",
",",
"intervals_to",
",",
"candidates",
")",
"elif",
"strict",
":",
"# Numba only lets us use compile-time constants in exception messages",
"raise",
"ParameterError",
"else",
":",
"# Find the closest interval",
"# (start_index[after_query] - query[1]) is the distance to the next interval",
"# (query[0] - end_index[before_query])",
"dist_before",
"=",
"np",
".",
"inf",
"dist_after",
"=",
"np",
".",
"inf",
"if",
"search_starts",
"[",
"i",
"]",
">",
"0",
":",
"dist_before",
"=",
"query",
"[",
"0",
"]",
"-",
"end_sorted",
"[",
"search_starts",
"[",
"i",
"]",
"-",
"1",
"]",
"if",
"search_ends",
"[",
"i",
"]",
"+",
"1",
"<",
"len",
"(",
"intervals_to",
")",
":",
"dist_after",
"=",
"start_sorted",
"[",
"search_ends",
"[",
"i",
"]",
"+",
"1",
"]",
"-",
"query",
"[",
"1",
"]",
"if",
"dist_before",
"<",
"dist_after",
":",
"output",
"[",
"i",
"]",
"=",
"end_index",
"[",
"search_starts",
"[",
"i",
"]",
"-",
"1",
"]",
"else",
":",
"output",
"[",
"i",
"]",
"=",
"start_index",
"[",
"search_ends",
"[",
"i",
"]",
"+",
"1",
"]",
"return",
"output"
] |
Numba-accelerated interval matching algorithm.
|
[
"Numba",
"-",
"accelerated",
"interval",
"matching",
"algorithm",
"."
] |
python
|
test
|
wmayner/pyphi
|
pyphi/models/actual_causation.py
|
https://github.com/wmayner/pyphi/blob/deeca69a084d782a6fde7bf26f59e93b593c5d77/pyphi/models/actual_causation.py#L254-L257
|
def irreducible_effects(self):
"""The set of irreducible effects in this |Account|."""
return tuple(link for link in self
if link.direction is Direction.EFFECT)
|
[
"def",
"irreducible_effects",
"(",
"self",
")",
":",
"return",
"tuple",
"(",
"link",
"for",
"link",
"in",
"self",
"if",
"link",
".",
"direction",
"is",
"Direction",
".",
"EFFECT",
")"
] |
The set of irreducible effects in this |Account|.
|
[
"The",
"set",
"of",
"irreducible",
"effects",
"in",
"this",
"|Account|",
"."
] |
python
|
train
|
wonambi-python/wonambi
|
wonambi/trans/select.py
|
https://github.com/wonambi-python/wonambi/blob/1d8e3d7e53df8017c199f703bcab582914676e76/wonambi/trans/select.py#L529-L607
|
def _concat(bundles, cat=(0, 0, 0, 0)):
"""Prepare event or epoch start and end times for concatenation."""
chan = sorted(set([x['chan'] for x in bundles]))
cycle = sorted(set([x['cycle'] for x in bundles]))
stage = sorted(set([x['stage'] for x in bundles]))
evt_type = sorted(set([x['name'] for x in bundles]))
all_cycle = None
all_stage = None
all_evt_type = None
if cycle[0] is not None:
all_cycle = ', '.join([str(c) for c in cycle])
if stage[0] is not None:
all_stage = ', '.join(stage)
if evt_type[0] is not None:
all_evt_type = ', '.join(evt_type)
if cat[0]:
cycle = [all_cycle]
if cat[1]:
stage = [all_stage]
if cat[3]:
evt_type = [all_evt_type]
to_concat = []
for ch in chan:
for cyc in cycle:
for st in stage:
for et in evt_type:
new_times = []
for bund in bundles:
chan_cond = ch == bund['chan']
cyc_cond = cyc in (bund['cycle'], all_cycle)
st_cond = st in (bund['stage'], all_stage)
et_cond = et in (bund['name'], all_evt_type)
if chan_cond and cyc_cond and st_cond and et_cond:
new_times.extend(bund['times'])
new_times = sorted(new_times, key=lambda x: x[0])
new_bund = {'times': new_times,
'chan': ch,
'cycle': cyc,
'stage': st,
'name': et
}
to_concat.append(new_bund)
if not cat[2]:
to_concat_new = []
for bund in to_concat:
last = None
bund['times'].append((inf,inf))
start = 0
for i, j in enumerate(bund['times']):
if last is not None:
if not isclose(j[0], last, abs_tol=0.01):
new_times = bund['times'][start:i]
new_bund = bund.copy()
new_bund['times'] = new_times
to_concat_new.append(new_bund)
start = i
last = j[1]
to_concat = to_concat_new
to_concat = [x for x in to_concat if x['times']]
return to_concat
|
[
"def",
"_concat",
"(",
"bundles",
",",
"cat",
"=",
"(",
"0",
",",
"0",
",",
"0",
",",
"0",
")",
")",
":",
"chan",
"=",
"sorted",
"(",
"set",
"(",
"[",
"x",
"[",
"'chan'",
"]",
"for",
"x",
"in",
"bundles",
"]",
")",
")",
"cycle",
"=",
"sorted",
"(",
"set",
"(",
"[",
"x",
"[",
"'cycle'",
"]",
"for",
"x",
"in",
"bundles",
"]",
")",
")",
"stage",
"=",
"sorted",
"(",
"set",
"(",
"[",
"x",
"[",
"'stage'",
"]",
"for",
"x",
"in",
"bundles",
"]",
")",
")",
"evt_type",
"=",
"sorted",
"(",
"set",
"(",
"[",
"x",
"[",
"'name'",
"]",
"for",
"x",
"in",
"bundles",
"]",
")",
")",
"all_cycle",
"=",
"None",
"all_stage",
"=",
"None",
"all_evt_type",
"=",
"None",
"if",
"cycle",
"[",
"0",
"]",
"is",
"not",
"None",
":",
"all_cycle",
"=",
"', '",
".",
"join",
"(",
"[",
"str",
"(",
"c",
")",
"for",
"c",
"in",
"cycle",
"]",
")",
"if",
"stage",
"[",
"0",
"]",
"is",
"not",
"None",
":",
"all_stage",
"=",
"', '",
".",
"join",
"(",
"stage",
")",
"if",
"evt_type",
"[",
"0",
"]",
"is",
"not",
"None",
":",
"all_evt_type",
"=",
"', '",
".",
"join",
"(",
"evt_type",
")",
"if",
"cat",
"[",
"0",
"]",
":",
"cycle",
"=",
"[",
"all_cycle",
"]",
"if",
"cat",
"[",
"1",
"]",
":",
"stage",
"=",
"[",
"all_stage",
"]",
"if",
"cat",
"[",
"3",
"]",
":",
"evt_type",
"=",
"[",
"all_evt_type",
"]",
"to_concat",
"=",
"[",
"]",
"for",
"ch",
"in",
"chan",
":",
"for",
"cyc",
"in",
"cycle",
":",
"for",
"st",
"in",
"stage",
":",
"for",
"et",
"in",
"evt_type",
":",
"new_times",
"=",
"[",
"]",
"for",
"bund",
"in",
"bundles",
":",
"chan_cond",
"=",
"ch",
"==",
"bund",
"[",
"'chan'",
"]",
"cyc_cond",
"=",
"cyc",
"in",
"(",
"bund",
"[",
"'cycle'",
"]",
",",
"all_cycle",
")",
"st_cond",
"=",
"st",
"in",
"(",
"bund",
"[",
"'stage'",
"]",
",",
"all_stage",
")",
"et_cond",
"=",
"et",
"in",
"(",
"bund",
"[",
"'name'",
"]",
",",
"all_evt_type",
")",
"if",
"chan_cond",
"and",
"cyc_cond",
"and",
"st_cond",
"and",
"et_cond",
":",
"new_times",
".",
"extend",
"(",
"bund",
"[",
"'times'",
"]",
")",
"new_times",
"=",
"sorted",
"(",
"new_times",
",",
"key",
"=",
"lambda",
"x",
":",
"x",
"[",
"0",
"]",
")",
"new_bund",
"=",
"{",
"'times'",
":",
"new_times",
",",
"'chan'",
":",
"ch",
",",
"'cycle'",
":",
"cyc",
",",
"'stage'",
":",
"st",
",",
"'name'",
":",
"et",
"}",
"to_concat",
".",
"append",
"(",
"new_bund",
")",
"if",
"not",
"cat",
"[",
"2",
"]",
":",
"to_concat_new",
"=",
"[",
"]",
"for",
"bund",
"in",
"to_concat",
":",
"last",
"=",
"None",
"bund",
"[",
"'times'",
"]",
".",
"append",
"(",
"(",
"inf",
",",
"inf",
")",
")",
"start",
"=",
"0",
"for",
"i",
",",
"j",
"in",
"enumerate",
"(",
"bund",
"[",
"'times'",
"]",
")",
":",
"if",
"last",
"is",
"not",
"None",
":",
"if",
"not",
"isclose",
"(",
"j",
"[",
"0",
"]",
",",
"last",
",",
"abs_tol",
"=",
"0.01",
")",
":",
"new_times",
"=",
"bund",
"[",
"'times'",
"]",
"[",
"start",
":",
"i",
"]",
"new_bund",
"=",
"bund",
".",
"copy",
"(",
")",
"new_bund",
"[",
"'times'",
"]",
"=",
"new_times",
"to_concat_new",
".",
"append",
"(",
"new_bund",
")",
"start",
"=",
"i",
"last",
"=",
"j",
"[",
"1",
"]",
"to_concat",
"=",
"to_concat_new",
"to_concat",
"=",
"[",
"x",
"for",
"x",
"in",
"to_concat",
"if",
"x",
"[",
"'times'",
"]",
"]",
"return",
"to_concat"
] |
Prepare event or epoch start and end times for concatenation.
|
[
"Prepare",
"event",
"or",
"epoch",
"start",
"and",
"end",
"times",
"for",
"concatenation",
"."
] |
python
|
train
|
tensorpack/tensorpack
|
examples/FasterRCNN/model_fpn.py
|
https://github.com/tensorpack/tensorpack/blob/d7a13cb74c9066bc791d7aafc3b744b60ee79a9f/examples/FasterRCNN/model_fpn.py#L166-L219
|
def generate_fpn_proposals(
multilevel_pred_boxes, multilevel_label_logits, image_shape2d):
"""
Args:
multilevel_pred_boxes: #lvl HxWxAx4 boxes
multilevel_label_logits: #lvl tensors of shape HxWxA
Returns:
boxes: kx4 float
scores: k logits
"""
num_lvl = len(cfg.FPN.ANCHOR_STRIDES)
assert len(multilevel_pred_boxes) == num_lvl
assert len(multilevel_label_logits) == num_lvl
training = get_current_tower_context().is_training
all_boxes = []
all_scores = []
if cfg.FPN.PROPOSAL_MODE == 'Level':
fpn_nms_topk = cfg.RPN.TRAIN_PER_LEVEL_NMS_TOPK if training else cfg.RPN.TEST_PER_LEVEL_NMS_TOPK
for lvl in range(num_lvl):
with tf.name_scope('Lvl{}'.format(lvl + 2)):
pred_boxes_decoded = multilevel_pred_boxes[lvl]
proposal_boxes, proposal_scores = generate_rpn_proposals(
tf.reshape(pred_boxes_decoded, [-1, 4]),
tf.reshape(multilevel_label_logits[lvl], [-1]),
image_shape2d, fpn_nms_topk)
all_boxes.append(proposal_boxes)
all_scores.append(proposal_scores)
proposal_boxes = tf.concat(all_boxes, axis=0) # nx4
proposal_scores = tf.concat(all_scores, axis=0) # n
# Here we are different from Detectron.
# Detectron picks top-k within the batch, rather than within an image. However we do not have a batch.
proposal_topk = tf.minimum(tf.size(proposal_scores), fpn_nms_topk)
proposal_scores, topk_indices = tf.nn.top_k(proposal_scores, k=proposal_topk, sorted=False)
proposal_boxes = tf.gather(proposal_boxes, topk_indices)
else:
for lvl in range(num_lvl):
with tf.name_scope('Lvl{}'.format(lvl + 2)):
pred_boxes_decoded = multilevel_pred_boxes[lvl]
all_boxes.append(tf.reshape(pred_boxes_decoded, [-1, 4]))
all_scores.append(tf.reshape(multilevel_label_logits[lvl], [-1]))
all_boxes = tf.concat(all_boxes, axis=0)
all_scores = tf.concat(all_scores, axis=0)
proposal_boxes, proposal_scores = generate_rpn_proposals(
all_boxes, all_scores, image_shape2d,
cfg.RPN.TRAIN_PRE_NMS_TOPK if training else cfg.RPN.TEST_PRE_NMS_TOPK,
cfg.RPN.TRAIN_POST_NMS_TOPK if training else cfg.RPN.TEST_POST_NMS_TOPK)
tf.sigmoid(proposal_scores, name='probs') # for visualization
return tf.stop_gradient(proposal_boxes, name='boxes'), \
tf.stop_gradient(proposal_scores, name='scores')
|
[
"def",
"generate_fpn_proposals",
"(",
"multilevel_pred_boxes",
",",
"multilevel_label_logits",
",",
"image_shape2d",
")",
":",
"num_lvl",
"=",
"len",
"(",
"cfg",
".",
"FPN",
".",
"ANCHOR_STRIDES",
")",
"assert",
"len",
"(",
"multilevel_pred_boxes",
")",
"==",
"num_lvl",
"assert",
"len",
"(",
"multilevel_label_logits",
")",
"==",
"num_lvl",
"training",
"=",
"get_current_tower_context",
"(",
")",
".",
"is_training",
"all_boxes",
"=",
"[",
"]",
"all_scores",
"=",
"[",
"]",
"if",
"cfg",
".",
"FPN",
".",
"PROPOSAL_MODE",
"==",
"'Level'",
":",
"fpn_nms_topk",
"=",
"cfg",
".",
"RPN",
".",
"TRAIN_PER_LEVEL_NMS_TOPK",
"if",
"training",
"else",
"cfg",
".",
"RPN",
".",
"TEST_PER_LEVEL_NMS_TOPK",
"for",
"lvl",
"in",
"range",
"(",
"num_lvl",
")",
":",
"with",
"tf",
".",
"name_scope",
"(",
"'Lvl{}'",
".",
"format",
"(",
"lvl",
"+",
"2",
")",
")",
":",
"pred_boxes_decoded",
"=",
"multilevel_pred_boxes",
"[",
"lvl",
"]",
"proposal_boxes",
",",
"proposal_scores",
"=",
"generate_rpn_proposals",
"(",
"tf",
".",
"reshape",
"(",
"pred_boxes_decoded",
",",
"[",
"-",
"1",
",",
"4",
"]",
")",
",",
"tf",
".",
"reshape",
"(",
"multilevel_label_logits",
"[",
"lvl",
"]",
",",
"[",
"-",
"1",
"]",
")",
",",
"image_shape2d",
",",
"fpn_nms_topk",
")",
"all_boxes",
".",
"append",
"(",
"proposal_boxes",
")",
"all_scores",
".",
"append",
"(",
"proposal_scores",
")",
"proposal_boxes",
"=",
"tf",
".",
"concat",
"(",
"all_boxes",
",",
"axis",
"=",
"0",
")",
"# nx4",
"proposal_scores",
"=",
"tf",
".",
"concat",
"(",
"all_scores",
",",
"axis",
"=",
"0",
")",
"# n",
"# Here we are different from Detectron.",
"# Detectron picks top-k within the batch, rather than within an image. However we do not have a batch.",
"proposal_topk",
"=",
"tf",
".",
"minimum",
"(",
"tf",
".",
"size",
"(",
"proposal_scores",
")",
",",
"fpn_nms_topk",
")",
"proposal_scores",
",",
"topk_indices",
"=",
"tf",
".",
"nn",
".",
"top_k",
"(",
"proposal_scores",
",",
"k",
"=",
"proposal_topk",
",",
"sorted",
"=",
"False",
")",
"proposal_boxes",
"=",
"tf",
".",
"gather",
"(",
"proposal_boxes",
",",
"topk_indices",
")",
"else",
":",
"for",
"lvl",
"in",
"range",
"(",
"num_lvl",
")",
":",
"with",
"tf",
".",
"name_scope",
"(",
"'Lvl{}'",
".",
"format",
"(",
"lvl",
"+",
"2",
")",
")",
":",
"pred_boxes_decoded",
"=",
"multilevel_pred_boxes",
"[",
"lvl",
"]",
"all_boxes",
".",
"append",
"(",
"tf",
".",
"reshape",
"(",
"pred_boxes_decoded",
",",
"[",
"-",
"1",
",",
"4",
"]",
")",
")",
"all_scores",
".",
"append",
"(",
"tf",
".",
"reshape",
"(",
"multilevel_label_logits",
"[",
"lvl",
"]",
",",
"[",
"-",
"1",
"]",
")",
")",
"all_boxes",
"=",
"tf",
".",
"concat",
"(",
"all_boxes",
",",
"axis",
"=",
"0",
")",
"all_scores",
"=",
"tf",
".",
"concat",
"(",
"all_scores",
",",
"axis",
"=",
"0",
")",
"proposal_boxes",
",",
"proposal_scores",
"=",
"generate_rpn_proposals",
"(",
"all_boxes",
",",
"all_scores",
",",
"image_shape2d",
",",
"cfg",
".",
"RPN",
".",
"TRAIN_PRE_NMS_TOPK",
"if",
"training",
"else",
"cfg",
".",
"RPN",
".",
"TEST_PRE_NMS_TOPK",
",",
"cfg",
".",
"RPN",
".",
"TRAIN_POST_NMS_TOPK",
"if",
"training",
"else",
"cfg",
".",
"RPN",
".",
"TEST_POST_NMS_TOPK",
")",
"tf",
".",
"sigmoid",
"(",
"proposal_scores",
",",
"name",
"=",
"'probs'",
")",
"# for visualization",
"return",
"tf",
".",
"stop_gradient",
"(",
"proposal_boxes",
",",
"name",
"=",
"'boxes'",
")",
",",
"tf",
".",
"stop_gradient",
"(",
"proposal_scores",
",",
"name",
"=",
"'scores'",
")"
] |
Args:
multilevel_pred_boxes: #lvl HxWxAx4 boxes
multilevel_label_logits: #lvl tensors of shape HxWxA
Returns:
boxes: kx4 float
scores: k logits
|
[
"Args",
":",
"multilevel_pred_boxes",
":",
"#lvl",
"HxWxAx4",
"boxes",
"multilevel_label_logits",
":",
"#lvl",
"tensors",
"of",
"shape",
"HxWxA"
] |
python
|
train
|
jobovy/galpy
|
galpy/orbit/OrbitTop.py
|
https://github.com/jobovy/galpy/blob/9c5b9fe65d58835624dffe432be282060918ee08/galpy/orbit/OrbitTop.py#L1245-L1304
|
def _setupaA(self,pot=None,type='staeckel',**kwargs):
"""
NAME:
_setupaA
PURPOSE:
set up an actionAngle module for this Orbit
INPUT:
pot - potential
type= ('staeckel') type of actionAngle module to use
1) 'adiabatic'
2) 'staeckel'
3) 'isochroneApprox'
4) 'spherical'
OUTPUT:
HISTORY:
2010-11-30 - Written - Bovy (NYU)
2013-11-27 - Re-written in terms of new actionAngle modules - Bovy (IAS)
2017-12-25 - Changed default method to 'staeckel' and automatic delta estimation - Bovy (UofT)
"""
if hasattr(self,'_aA'):
if not self._resetaA(pot=pot,type=type): return None
if pot is None:
try:
pot= self._pot
except AttributeError:
raise AttributeError("Integrate orbit or specify pot=")
self._aAPot= pot
self._aAType= type
#Setup
if self._aAType.lower() == 'adiabatic':
self._aA= actionAngle.actionAngleAdiabatic(pot=self._aAPot,
**kwargs)
elif self._aAType.lower() == 'staeckel':
try:
delta= \
kwargs.pop('delta',
actionAngle.estimateDeltaStaeckel(\
self._aAPot,self.R(use_physical=False),
self.z(use_physical=False)+(nu.fabs(self.z(use_physical=False)) < 1e-8) * (2.*(self.z(use_physical=False) >= 0)-1.)*1e-10)) # try to make sure this is not 0
except PotentialError as e:
if 'deriv' in str(e):
raise PotentialError('Automagic calculation of delta parameter for Staeckel approximation failed because the necessary second derivatives of the given potential are not implemented; set delta= explicitly')
elif 'non-axi' in str(e):
raise PotentialError('Automagic calculation of delta parameter for Staeckel approximation failed because the given potential is not axisymmetric; pass an axisymmetric potential instead')
else: #pragma: no cover
raise
if delta < 1e-6:
self._setupaA(pot=pot,type='spherical')
else:
self._aA= actionAngle.actionAngleStaeckel(pot=self._aAPot,
delta=delta,
**kwargs)
elif self._aAType.lower() == 'isochroneapprox':
from galpy.actionAngle import actionAngleIsochroneApprox
self._aA= actionAngleIsochroneApprox(pot=self._aAPot,
**kwargs)
elif self._aAType.lower() == 'spherical':
self._aA= actionAngle.actionAngleSpherical(pot=self._aAPot,
**kwargs)
return None
|
[
"def",
"_setupaA",
"(",
"self",
",",
"pot",
"=",
"None",
",",
"type",
"=",
"'staeckel'",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"hasattr",
"(",
"self",
",",
"'_aA'",
")",
":",
"if",
"not",
"self",
".",
"_resetaA",
"(",
"pot",
"=",
"pot",
",",
"type",
"=",
"type",
")",
":",
"return",
"None",
"if",
"pot",
"is",
"None",
":",
"try",
":",
"pot",
"=",
"self",
".",
"_pot",
"except",
"AttributeError",
":",
"raise",
"AttributeError",
"(",
"\"Integrate orbit or specify pot=\"",
")",
"self",
".",
"_aAPot",
"=",
"pot",
"self",
".",
"_aAType",
"=",
"type",
"#Setup",
"if",
"self",
".",
"_aAType",
".",
"lower",
"(",
")",
"==",
"'adiabatic'",
":",
"self",
".",
"_aA",
"=",
"actionAngle",
".",
"actionAngleAdiabatic",
"(",
"pot",
"=",
"self",
".",
"_aAPot",
",",
"*",
"*",
"kwargs",
")",
"elif",
"self",
".",
"_aAType",
".",
"lower",
"(",
")",
"==",
"'staeckel'",
":",
"try",
":",
"delta",
"=",
"kwargs",
".",
"pop",
"(",
"'delta'",
",",
"actionAngle",
".",
"estimateDeltaStaeckel",
"(",
"self",
".",
"_aAPot",
",",
"self",
".",
"R",
"(",
"use_physical",
"=",
"False",
")",
",",
"self",
".",
"z",
"(",
"use_physical",
"=",
"False",
")",
"+",
"(",
"nu",
".",
"fabs",
"(",
"self",
".",
"z",
"(",
"use_physical",
"=",
"False",
")",
")",
"<",
"1e-8",
")",
"*",
"(",
"2.",
"*",
"(",
"self",
".",
"z",
"(",
"use_physical",
"=",
"False",
")",
">=",
"0",
")",
"-",
"1.",
")",
"*",
"1e-10",
")",
")",
"# try to make sure this is not 0",
"except",
"PotentialError",
"as",
"e",
":",
"if",
"'deriv'",
"in",
"str",
"(",
"e",
")",
":",
"raise",
"PotentialError",
"(",
"'Automagic calculation of delta parameter for Staeckel approximation failed because the necessary second derivatives of the given potential are not implemented; set delta= explicitly'",
")",
"elif",
"'non-axi'",
"in",
"str",
"(",
"e",
")",
":",
"raise",
"PotentialError",
"(",
"'Automagic calculation of delta parameter for Staeckel approximation failed because the given potential is not axisymmetric; pass an axisymmetric potential instead'",
")",
"else",
":",
"#pragma: no cover",
"raise",
"if",
"delta",
"<",
"1e-6",
":",
"self",
".",
"_setupaA",
"(",
"pot",
"=",
"pot",
",",
"type",
"=",
"'spherical'",
")",
"else",
":",
"self",
".",
"_aA",
"=",
"actionAngle",
".",
"actionAngleStaeckel",
"(",
"pot",
"=",
"self",
".",
"_aAPot",
",",
"delta",
"=",
"delta",
",",
"*",
"*",
"kwargs",
")",
"elif",
"self",
".",
"_aAType",
".",
"lower",
"(",
")",
"==",
"'isochroneapprox'",
":",
"from",
"galpy",
".",
"actionAngle",
"import",
"actionAngleIsochroneApprox",
"self",
".",
"_aA",
"=",
"actionAngleIsochroneApprox",
"(",
"pot",
"=",
"self",
".",
"_aAPot",
",",
"*",
"*",
"kwargs",
")",
"elif",
"self",
".",
"_aAType",
".",
"lower",
"(",
")",
"==",
"'spherical'",
":",
"self",
".",
"_aA",
"=",
"actionAngle",
".",
"actionAngleSpherical",
"(",
"pot",
"=",
"self",
".",
"_aAPot",
",",
"*",
"*",
"kwargs",
")",
"return",
"None"
] |
NAME:
_setupaA
PURPOSE:
set up an actionAngle module for this Orbit
INPUT:
pot - potential
type= ('staeckel') type of actionAngle module to use
1) 'adiabatic'
2) 'staeckel'
3) 'isochroneApprox'
4) 'spherical'
OUTPUT:
HISTORY:
2010-11-30 - Written - Bovy (NYU)
2013-11-27 - Re-written in terms of new actionAngle modules - Bovy (IAS)
2017-12-25 - Changed default method to 'staeckel' and automatic delta estimation - Bovy (UofT)
|
[
"NAME",
":",
"_setupaA",
"PURPOSE",
":",
"set",
"up",
"an",
"actionAngle",
"module",
"for",
"this",
"Orbit",
"INPUT",
":",
"pot",
"-",
"potential",
"type",
"=",
"(",
"staeckel",
")",
"type",
"of",
"actionAngle",
"module",
"to",
"use",
"1",
")",
"adiabatic",
"2",
")",
"staeckel",
"3",
")",
"isochroneApprox",
"4",
")",
"spherical",
"OUTPUT",
":",
"HISTORY",
":",
"2010",
"-",
"11",
"-",
"30",
"-",
"Written",
"-",
"Bovy",
"(",
"NYU",
")",
"2013",
"-",
"11",
"-",
"27",
"-",
"Re",
"-",
"written",
"in",
"terms",
"of",
"new",
"actionAngle",
"modules",
"-",
"Bovy",
"(",
"IAS",
")",
"2017",
"-",
"12",
"-",
"25",
"-",
"Changed",
"default",
"method",
"to",
"staeckel",
"and",
"automatic",
"delta",
"estimation",
"-",
"Bovy",
"(",
"UofT",
")"
] |
python
|
train
|
snare/scruffy
|
scruffy/state.py
|
https://github.com/snare/scruffy/blob/0fedc08cfdb6db927ff93c09f25f24ce5a04c541/scruffy/state.py#L54-L59
|
def save(self):
"""
Save the state to a file.
"""
with open(self.path, 'w') as f:
f.write(yaml.dump(dict(self.d)))
|
[
"def",
"save",
"(",
"self",
")",
":",
"with",
"open",
"(",
"self",
".",
"path",
",",
"'w'",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"yaml",
".",
"dump",
"(",
"dict",
"(",
"self",
".",
"d",
")",
")",
")"
] |
Save the state to a file.
|
[
"Save",
"the",
"state",
"to",
"a",
"file",
"."
] |
python
|
test
|
fermiPy/fermipy
|
fermipy/diffuse/diffuse_src_manager.py
|
https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/diffuse/diffuse_src_manager.py#L134-L152
|
def make_ring_filelist(self, sourcekeys, rings, galprop_run):
""" Make a list of all the template files for a merged component
Parameters
----------
sourcekeys : list-like of str
The names of the componenents to merge
rings : list-like of int
The indices of the rings to merge
galprop_run : str
String identifying the galprop parameters
"""
flist = []
for sourcekey in sourcekeys:
for ring in rings:
flist += [self.make_ring_filename(sourcekey,
ring, galprop_run)]
return flist
|
[
"def",
"make_ring_filelist",
"(",
"self",
",",
"sourcekeys",
",",
"rings",
",",
"galprop_run",
")",
":",
"flist",
"=",
"[",
"]",
"for",
"sourcekey",
"in",
"sourcekeys",
":",
"for",
"ring",
"in",
"rings",
":",
"flist",
"+=",
"[",
"self",
".",
"make_ring_filename",
"(",
"sourcekey",
",",
"ring",
",",
"galprop_run",
")",
"]",
"return",
"flist"
] |
Make a list of all the template files for a merged component
Parameters
----------
sourcekeys : list-like of str
The names of the componenents to merge
rings : list-like of int
The indices of the rings to merge
galprop_run : str
String identifying the galprop parameters
|
[
"Make",
"a",
"list",
"of",
"all",
"the",
"template",
"files",
"for",
"a",
"merged",
"component"
] |
python
|
train
|
monarch-initiative/dipper
|
dipper/sources/MGI.py
|
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/MGI.py#L1343-L1429
|
def _process_mrk_marker_view(self, limit):
"""
This is the definition of markers
(as in genes, but other genomic loci types as well).
It looks up the identifiers in the hashmap
This includes their labels, specific class, and identifiers
TODO should we use the mrk_mouse_view instead?
Triples:
<marker_id> a owl:Class OR owl:NamedIndividual
GENO:marker_type
rdf:label <symbol>
RO:in_taxon <NCBITaxon_id>
:param limit:
:return:
"""
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
model = Model(graph)
geno = Genotype(graph)
line_counter = 0
raw = '/'.join((self.rawdir, 'mrk_marker_view'))
LOG.info("getting markers and assigning types")
with open(raw, 'r') as f:
f.readline() # read the header row; skip
for line in f:
line = line.rstrip("\n")
line_counter += 1
(marker_key, organism_key, marker_status_key,
symbol, name, latin_name, marker_type) = line.split('\t')
if self.test_mode is True:
if int(marker_key) not in self.test_keys.get('marker'):
continue
# use only non-withdrawn markers
if marker_status_key != '2':
marker_id = self.idhash['marker'].get(marker_key)
# only pull info for mouse genes for now
# other species should come from other dbs
if organism_key != '1':
continue
if marker_id is None:
LOG.error(
"can't find %s %s in the id hash", marker_key, symbol)
mapped_marker_type = self.resolve(marker_type.strip())
# if it's unlocated, or is not a gene,
# then don't add it as a class because
# it's not added as a gene.
# everything except for genes are modeled as individuals
if mapped_marker_type in [
self.globaltt['gene'],
self.globaltt['pseudogene']]:
model.addClassToGraph(
marker_id, symbol, mapped_marker_type, name)
model.addSynonym(
marker_id, name, self.globaltt['has_exact_synonym'])
self.markers['classes'].append(marker_id)
else:
model.addIndividualToGraph(
marker_id, symbol, mapped_marker_type, name)
model.addSynonym(
marker_id, name, self.globaltt['has_exact_synonym'])
self.markers['indiv'].append(marker_id)
self.label_hash[marker_id] = symbol
# add the taxon
taxon_id = self.resolve(latin_name) # not always proper binomial
geno.addTaxon(taxon_id, marker_id)
# make MGI the leader for mouse genes.
if taxon_id == self.globaltt['Mus musculus']:
model.makeLeader(marker_id)
if not self.test_mode and limit is not None and line_counter > limit:
break
return
|
[
"def",
"_process_mrk_marker_view",
"(",
"self",
",",
"limit",
")",
":",
"if",
"self",
".",
"test_mode",
":",
"graph",
"=",
"self",
".",
"testgraph",
"else",
":",
"graph",
"=",
"self",
".",
"graph",
"model",
"=",
"Model",
"(",
"graph",
")",
"geno",
"=",
"Genotype",
"(",
"graph",
")",
"line_counter",
"=",
"0",
"raw",
"=",
"'/'",
".",
"join",
"(",
"(",
"self",
".",
"rawdir",
",",
"'mrk_marker_view'",
")",
")",
"LOG",
".",
"info",
"(",
"\"getting markers and assigning types\"",
")",
"with",
"open",
"(",
"raw",
",",
"'r'",
")",
"as",
"f",
":",
"f",
".",
"readline",
"(",
")",
"# read the header row; skip",
"for",
"line",
"in",
"f",
":",
"line",
"=",
"line",
".",
"rstrip",
"(",
"\"\\n\"",
")",
"line_counter",
"+=",
"1",
"(",
"marker_key",
",",
"organism_key",
",",
"marker_status_key",
",",
"symbol",
",",
"name",
",",
"latin_name",
",",
"marker_type",
")",
"=",
"line",
".",
"split",
"(",
"'\\t'",
")",
"if",
"self",
".",
"test_mode",
"is",
"True",
":",
"if",
"int",
"(",
"marker_key",
")",
"not",
"in",
"self",
".",
"test_keys",
".",
"get",
"(",
"'marker'",
")",
":",
"continue",
"# use only non-withdrawn markers",
"if",
"marker_status_key",
"!=",
"'2'",
":",
"marker_id",
"=",
"self",
".",
"idhash",
"[",
"'marker'",
"]",
".",
"get",
"(",
"marker_key",
")",
"# only pull info for mouse genes for now",
"# other species should come from other dbs",
"if",
"organism_key",
"!=",
"'1'",
":",
"continue",
"if",
"marker_id",
"is",
"None",
":",
"LOG",
".",
"error",
"(",
"\"can't find %s %s in the id hash\"",
",",
"marker_key",
",",
"symbol",
")",
"mapped_marker_type",
"=",
"self",
".",
"resolve",
"(",
"marker_type",
".",
"strip",
"(",
")",
")",
"# if it's unlocated, or is not a gene,",
"# then don't add it as a class because",
"# it's not added as a gene.",
"# everything except for genes are modeled as individuals",
"if",
"mapped_marker_type",
"in",
"[",
"self",
".",
"globaltt",
"[",
"'gene'",
"]",
",",
"self",
".",
"globaltt",
"[",
"'pseudogene'",
"]",
"]",
":",
"model",
".",
"addClassToGraph",
"(",
"marker_id",
",",
"symbol",
",",
"mapped_marker_type",
",",
"name",
")",
"model",
".",
"addSynonym",
"(",
"marker_id",
",",
"name",
",",
"self",
".",
"globaltt",
"[",
"'has_exact_synonym'",
"]",
")",
"self",
".",
"markers",
"[",
"'classes'",
"]",
".",
"append",
"(",
"marker_id",
")",
"else",
":",
"model",
".",
"addIndividualToGraph",
"(",
"marker_id",
",",
"symbol",
",",
"mapped_marker_type",
",",
"name",
")",
"model",
".",
"addSynonym",
"(",
"marker_id",
",",
"name",
",",
"self",
".",
"globaltt",
"[",
"'has_exact_synonym'",
"]",
")",
"self",
".",
"markers",
"[",
"'indiv'",
"]",
".",
"append",
"(",
"marker_id",
")",
"self",
".",
"label_hash",
"[",
"marker_id",
"]",
"=",
"symbol",
"# add the taxon",
"taxon_id",
"=",
"self",
".",
"resolve",
"(",
"latin_name",
")",
"# not always proper binomial",
"geno",
".",
"addTaxon",
"(",
"taxon_id",
",",
"marker_id",
")",
"# make MGI the leader for mouse genes.",
"if",
"taxon_id",
"==",
"self",
".",
"globaltt",
"[",
"'Mus musculus'",
"]",
":",
"model",
".",
"makeLeader",
"(",
"marker_id",
")",
"if",
"not",
"self",
".",
"test_mode",
"and",
"limit",
"is",
"not",
"None",
"and",
"line_counter",
">",
"limit",
":",
"break",
"return"
] |
This is the definition of markers
(as in genes, but other genomic loci types as well).
It looks up the identifiers in the hashmap
This includes their labels, specific class, and identifiers
TODO should we use the mrk_mouse_view instead?
Triples:
<marker_id> a owl:Class OR owl:NamedIndividual
GENO:marker_type
rdf:label <symbol>
RO:in_taxon <NCBITaxon_id>
:param limit:
:return:
|
[
"This",
"is",
"the",
"definition",
"of",
"markers",
"(",
"as",
"in",
"genes",
"but",
"other",
"genomic",
"loci",
"types",
"as",
"well",
")",
".",
"It",
"looks",
"up",
"the",
"identifiers",
"in",
"the",
"hashmap",
"This",
"includes",
"their",
"labels",
"specific",
"class",
"and",
"identifiers",
"TODO",
"should",
"we",
"use",
"the",
"mrk_mouse_view",
"instead?"
] |
python
|
train
|
atlassian-api/atlassian-python-api
|
atlassian/jira.py
|
https://github.com/atlassian-api/atlassian-python-api/blob/540d269905c3e7547b666fe30c647b2d512cf358/atlassian/jira.py#L758-L761
|
def delete_issue_link_type(self, issue_link_type_id):
"""Delete the specified issue link type."""
url = 'rest/api/2/issueLinkType/{issueLinkTypeId}'.format(issueLinkTypeId=issue_link_type_id)
return self.delete(url)
|
[
"def",
"delete_issue_link_type",
"(",
"self",
",",
"issue_link_type_id",
")",
":",
"url",
"=",
"'rest/api/2/issueLinkType/{issueLinkTypeId}'",
".",
"format",
"(",
"issueLinkTypeId",
"=",
"issue_link_type_id",
")",
"return",
"self",
".",
"delete",
"(",
"url",
")"
] |
Delete the specified issue link type.
|
[
"Delete",
"the",
"specified",
"issue",
"link",
"type",
"."
] |
python
|
train
|
mdsol/rwslib
|
rwslib/builders/metadata.py
|
https://github.com/mdsol/rwslib/blob/1a86bc072d408c009ed1de8bf6e98a1769f54d18/rwslib/builders/metadata.py#L125-L130
|
def build(self, builder):
"""Build XML by appending to builder"""
builder.start("BasicDefinitions", {})
for child in self.measurement_units:
child.build(builder)
builder.end("BasicDefinitions")
|
[
"def",
"build",
"(",
"self",
",",
"builder",
")",
":",
"builder",
".",
"start",
"(",
"\"BasicDefinitions\"",
",",
"{",
"}",
")",
"for",
"child",
"in",
"self",
".",
"measurement_units",
":",
"child",
".",
"build",
"(",
"builder",
")",
"builder",
".",
"end",
"(",
"\"BasicDefinitions\"",
")"
] |
Build XML by appending to builder
|
[
"Build",
"XML",
"by",
"appending",
"to",
"builder"
] |
python
|
train
|
globality-corp/microcosm-flask
|
microcosm_flask/swagger/definitions.py
|
https://github.com/globality-corp/microcosm-flask/blob/c2eaf57f03e7d041eea343751a4a90fcc80df418/microcosm_flask/swagger/definitions.py#L295-L307
|
def build_response(description, resource=None):
"""
Build a response definition.
"""
response = swagger.Response(
description=description,
)
if resource is not None:
response.schema = swagger.JsonReference({
"$ref": "#/definitions/{}".format(type_name(name_for(resource))),
})
return response
|
[
"def",
"build_response",
"(",
"description",
",",
"resource",
"=",
"None",
")",
":",
"response",
"=",
"swagger",
".",
"Response",
"(",
"description",
"=",
"description",
",",
")",
"if",
"resource",
"is",
"not",
"None",
":",
"response",
".",
"schema",
"=",
"swagger",
".",
"JsonReference",
"(",
"{",
"\"$ref\"",
":",
"\"#/definitions/{}\"",
".",
"format",
"(",
"type_name",
"(",
"name_for",
"(",
"resource",
")",
")",
")",
",",
"}",
")",
"return",
"response"
] |
Build a response definition.
|
[
"Build",
"a",
"response",
"definition",
"."
] |
python
|
train
|
HewlettPackard/python-hpOneView
|
hpOneView/resources/settings/firmware_bundles.py
|
https://github.com/HewlettPackard/python-hpOneView/blob/3c6219723ef25e6e0c83d44a89007f89bc325b89/hpOneView/resources/settings/firmware_bundles.py#L48-L62
|
def upload(self, file_path, timeout=-1):
"""
Upload an SPP ISO image file or a hotfix file to the appliance.
The API supports upload of one hotfix at a time into the system.
For the successful upload of a hotfix, ensure its original name and extension are not altered.
Args:
file_path: Full path to firmware.
timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation
in OneView; it just stops waiting for its completion.
Returns:
dict: Information about the updated firmware bundle.
"""
return self._client.upload(file_path, timeout=timeout)
|
[
"def",
"upload",
"(",
"self",
",",
"file_path",
",",
"timeout",
"=",
"-",
"1",
")",
":",
"return",
"self",
".",
"_client",
".",
"upload",
"(",
"file_path",
",",
"timeout",
"=",
"timeout",
")"
] |
Upload an SPP ISO image file or a hotfix file to the appliance.
The API supports upload of one hotfix at a time into the system.
For the successful upload of a hotfix, ensure its original name and extension are not altered.
Args:
file_path: Full path to firmware.
timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation
in OneView; it just stops waiting for its completion.
Returns:
dict: Information about the updated firmware bundle.
|
[
"Upload",
"an",
"SPP",
"ISO",
"image",
"file",
"or",
"a",
"hotfix",
"file",
"to",
"the",
"appliance",
".",
"The",
"API",
"supports",
"upload",
"of",
"one",
"hotfix",
"at",
"a",
"time",
"into",
"the",
"system",
".",
"For",
"the",
"successful",
"upload",
"of",
"a",
"hotfix",
"ensure",
"its",
"original",
"name",
"and",
"extension",
"are",
"not",
"altered",
"."
] |
python
|
train
|
cloud9ers/gurumate
|
environment/lib/python2.7/site-packages/IPython/nbformat/v3/rwbase.py
|
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/nbformat/v3/rwbase.py#L51-L62
|
def _join_lines(lines):
"""join lines that have been written by splitlines()
Has logic to protect against `splitlines()`, which
should have been `splitlines(True)`
"""
if lines and lines[0].endswith(('\n', '\r')):
# created by splitlines(True)
return u''.join(lines)
else:
# created by splitlines()
return u'\n'.join(lines)
|
[
"def",
"_join_lines",
"(",
"lines",
")",
":",
"if",
"lines",
"and",
"lines",
"[",
"0",
"]",
".",
"endswith",
"(",
"(",
"'\\n'",
",",
"'\\r'",
")",
")",
":",
"# created by splitlines(True)",
"return",
"u''",
".",
"join",
"(",
"lines",
")",
"else",
":",
"# created by splitlines()",
"return",
"u'\\n'",
".",
"join",
"(",
"lines",
")"
] |
join lines that have been written by splitlines()
Has logic to protect against `splitlines()`, which
should have been `splitlines(True)`
|
[
"join",
"lines",
"that",
"have",
"been",
"written",
"by",
"splitlines",
"()",
"Has",
"logic",
"to",
"protect",
"against",
"splitlines",
"()",
"which",
"should",
"have",
"been",
"splitlines",
"(",
"True",
")"
] |
python
|
test
|
edx/edx-enterprise
|
enterprise/api/v1/views.py
|
https://github.com/edx/edx-enterprise/blob/aea91379ab0a87cd3bc798961fce28b60ee49a80/enterprise/api/v1/views.py#L124-L145
|
def contains_content_items(self, request, pk, course_run_ids, program_uuids):
"""
Return whether or not the specified content is available to the EnterpriseCustomer.
Multiple course_run_ids and/or program_uuids query parameters can be sent to this view to check
for their existence in the EnterpriseCustomerCatalogs associated with this EnterpriseCustomer.
At least one course run key or program UUID value must be included in the request.
"""
enterprise_customer = self.get_object()
# Maintain plus characters in course key.
course_run_ids = [unquote(quote_plus(course_run_id)) for course_run_id in course_run_ids]
contains_content_items = False
for catalog in enterprise_customer.enterprise_customer_catalogs.all():
contains_course_runs = not course_run_ids or catalog.contains_courses(course_run_ids)
contains_program_uuids = not program_uuids or catalog.contains_programs(program_uuids)
if contains_course_runs and contains_program_uuids:
contains_content_items = True
break
return Response({'contains_content_items': contains_content_items})
|
[
"def",
"contains_content_items",
"(",
"self",
",",
"request",
",",
"pk",
",",
"course_run_ids",
",",
"program_uuids",
")",
":",
"enterprise_customer",
"=",
"self",
".",
"get_object",
"(",
")",
"# Maintain plus characters in course key.",
"course_run_ids",
"=",
"[",
"unquote",
"(",
"quote_plus",
"(",
"course_run_id",
")",
")",
"for",
"course_run_id",
"in",
"course_run_ids",
"]",
"contains_content_items",
"=",
"False",
"for",
"catalog",
"in",
"enterprise_customer",
".",
"enterprise_customer_catalogs",
".",
"all",
"(",
")",
":",
"contains_course_runs",
"=",
"not",
"course_run_ids",
"or",
"catalog",
".",
"contains_courses",
"(",
"course_run_ids",
")",
"contains_program_uuids",
"=",
"not",
"program_uuids",
"or",
"catalog",
".",
"contains_programs",
"(",
"program_uuids",
")",
"if",
"contains_course_runs",
"and",
"contains_program_uuids",
":",
"contains_content_items",
"=",
"True",
"break",
"return",
"Response",
"(",
"{",
"'contains_content_items'",
":",
"contains_content_items",
"}",
")"
] |
Return whether or not the specified content is available to the EnterpriseCustomer.
Multiple course_run_ids and/or program_uuids query parameters can be sent to this view to check
for their existence in the EnterpriseCustomerCatalogs associated with this EnterpriseCustomer.
At least one course run key or program UUID value must be included in the request.
|
[
"Return",
"whether",
"or",
"not",
"the",
"specified",
"content",
"is",
"available",
"to",
"the",
"EnterpriseCustomer",
"."
] |
python
|
valid
|
mongolab/mongoctl
|
mongoctl/users.py
|
https://github.com/mongolab/mongoctl/blob/fab15216127ad4bf8ea9aa8a95d75504c0ef01a2/mongoctl/users.py#L195-L206
|
def prepend_global_admin_user(other_users, server):
"""
When making lists of administrative users -- e.g., seeding a new server --
it's useful to put the credentials supplied on the command line at the head
of the queue.
"""
cred0 = get_global_login_user(server, "admin")
if cred0 and cred0["username"] and cred0["password"]:
log_verbose("Seeding : CRED0 to the front of the line!")
return [cred0] + other_users if other_users else [cred0]
else:
return other_users
|
[
"def",
"prepend_global_admin_user",
"(",
"other_users",
",",
"server",
")",
":",
"cred0",
"=",
"get_global_login_user",
"(",
"server",
",",
"\"admin\"",
")",
"if",
"cred0",
"and",
"cred0",
"[",
"\"username\"",
"]",
"and",
"cred0",
"[",
"\"password\"",
"]",
":",
"log_verbose",
"(",
"\"Seeding : CRED0 to the front of the line!\"",
")",
"return",
"[",
"cred0",
"]",
"+",
"other_users",
"if",
"other_users",
"else",
"[",
"cred0",
"]",
"else",
":",
"return",
"other_users"
] |
When making lists of administrative users -- e.g., seeding a new server --
it's useful to put the credentials supplied on the command line at the head
of the queue.
|
[
"When",
"making",
"lists",
"of",
"administrative",
"users",
"--",
"e",
".",
"g",
".",
"seeding",
"a",
"new",
"server",
"--",
"it",
"s",
"useful",
"to",
"put",
"the",
"credentials",
"supplied",
"on",
"the",
"command",
"line",
"at",
"the",
"head",
"of",
"the",
"queue",
"."
] |
python
|
train
|
beregond/super_state_machine
|
super_state_machine/utils.py
|
https://github.com/beregond/super_state_machine/blob/31ad527f4e6b7a01e315ce865735ca18957c223e/super_state_machine/utils.py#L127-L142
|
def translate(self, value):
"""Translate value to enum instance.
If value is already enum instance, check if this value belongs to base
enum.
"""
if self._check_if_already_proper(value):
return value
try:
return self.search_table[value]
except KeyError:
raise ValueError("Value {value} doesn't match any state.".format(
value=value
))
|
[
"def",
"translate",
"(",
"self",
",",
"value",
")",
":",
"if",
"self",
".",
"_check_if_already_proper",
"(",
"value",
")",
":",
"return",
"value",
"try",
":",
"return",
"self",
".",
"search_table",
"[",
"value",
"]",
"except",
"KeyError",
":",
"raise",
"ValueError",
"(",
"\"Value {value} doesn't match any state.\"",
".",
"format",
"(",
"value",
"=",
"value",
")",
")"
] |
Translate value to enum instance.
If value is already enum instance, check if this value belongs to base
enum.
|
[
"Translate",
"value",
"to",
"enum",
"instance",
"."
] |
python
|
train
|
skelsec/minikerberos
|
minikerberos/communication.py
|
https://github.com/skelsec/minikerberos/blob/caf14c1d0132119d6e8a8f05120efb7d0824b2c6/minikerberos/communication.py#L215-L285
|
def get_TGT(self, override_etype = None, decrypt_tgt = True):
"""
decrypt_tgt: used for asreproast attacks
Steps performed:
1. Send and empty (no encrypted timestamp) AS_REQ with all the encryption types we support
2. Depending on the response (either error or AS_REP with TGT) we either send another AS_REQ with the encrypted data or return the TGT (or fail miserably)
3. PROFIT
"""
logger.debug('Generating initial TGT without authentication data')
now = datetime.datetime.utcnow()
kdc_req_body = {}
kdc_req_body['kdc-options'] = KDCOptions(set(['forwardable','renewable','proxiable']))
kdc_req_body['cname'] = PrincipalName({'name-type': NAME_TYPE.PRINCIPAL.value, 'name-string': [self.usercreds.username]})
kdc_req_body['realm'] = self.usercreds.domain.upper()
kdc_req_body['sname'] = PrincipalName({'name-type': NAME_TYPE.PRINCIPAL.value, 'name-string': ['krbtgt', self.usercreds.domain.upper()]})
kdc_req_body['till'] = now + datetime.timedelta(days=1)
kdc_req_body['rtime'] = now + datetime.timedelta(days=1)
kdc_req_body['nonce'] = secrets.randbits(31)
if override_etype is None:
kdc_req_body['etype'] = self.usercreds.get_supported_enctypes()
else:
kdc_req_body['etype'] = override_etype
pa_data_1 = {}
pa_data_1['padata-type'] = int(PADATA_TYPE('PA-PAC-REQUEST'))
pa_data_1['padata-value'] = PA_PAC_REQUEST({'include-pac': True}).dump()
kdc_req = {}
kdc_req['pvno'] = krb5_pvno
kdc_req['msg-type'] = MESSAGE_TYPE.KRB_AS_REQ.value
kdc_req['padata'] = [pa_data_1]
kdc_req['req-body'] = KDC_REQ_BODY(kdc_req_body)
req = AS_REQ(kdc_req)
logger.debug('Sending initial TGT to %s' % self.ksoc.get_addr_str())
rep = self.ksoc.sendrecv(req.dump(), throw = False)
if rep.name != 'KRB_ERROR':
#user can do kerberos auth without preauthentication!
self.kerberos_TGT = rep.native
#if we want to roast the asrep (tgt rep) part then we dont even have the proper keys to decrypt
#so we just return, the asrep can be extracted from this object anyhow
if decrypt_tgt == False:
return
self.kerberos_cipher = _enctype_table[23]
self.kerberos_cipher_type = 23
self.kerberos_key = Key(self.kerberos_cipher.enctype, self.usercreds.get_key_for_enctype(EncryptionType.ARCFOUR_HMAC_MD5))
else:
if rep.native['error-code'] != KerberosErrorCode.KDC_ERR_PREAUTH_REQUIRED.value:
raise KerberosError(rep)
rep = rep.native
logger.debug('Got reply from server, asikg to provide auth data')
rep = self.do_preauth(rep)
logger.debug('Got valid TGT response from server')
rep = rep.native
self.kerberos_TGT = rep
cipherText = rep['enc-part']['cipher']
temp = self.kerberos_cipher.decrypt(self.kerberos_key, 3, cipherText)
self.kerberos_TGT_encpart = EncASRepPart.load(temp).native
self.kerberos_session_key = Key(self.kerberos_cipher.enctype, self.kerberos_TGT_encpart['key']['keyvalue'])
self.ccache.add_tgt(self.kerberos_TGT, self.kerberos_TGT_encpart, override_pp = True)
logger.debug('Got valid TGT')
return
|
[
"def",
"get_TGT",
"(",
"self",
",",
"override_etype",
"=",
"None",
",",
"decrypt_tgt",
"=",
"True",
")",
":",
"logger",
".",
"debug",
"(",
"'Generating initial TGT without authentication data'",
")",
"now",
"=",
"datetime",
".",
"datetime",
".",
"utcnow",
"(",
")",
"kdc_req_body",
"=",
"{",
"}",
"kdc_req_body",
"[",
"'kdc-options'",
"]",
"=",
"KDCOptions",
"(",
"set",
"(",
"[",
"'forwardable'",
",",
"'renewable'",
",",
"'proxiable'",
"]",
")",
")",
"kdc_req_body",
"[",
"'cname'",
"]",
"=",
"PrincipalName",
"(",
"{",
"'name-type'",
":",
"NAME_TYPE",
".",
"PRINCIPAL",
".",
"value",
",",
"'name-string'",
":",
"[",
"self",
".",
"usercreds",
".",
"username",
"]",
"}",
")",
"kdc_req_body",
"[",
"'realm'",
"]",
"=",
"self",
".",
"usercreds",
".",
"domain",
".",
"upper",
"(",
")",
"kdc_req_body",
"[",
"'sname'",
"]",
"=",
"PrincipalName",
"(",
"{",
"'name-type'",
":",
"NAME_TYPE",
".",
"PRINCIPAL",
".",
"value",
",",
"'name-string'",
":",
"[",
"'krbtgt'",
",",
"self",
".",
"usercreds",
".",
"domain",
".",
"upper",
"(",
")",
"]",
"}",
")",
"kdc_req_body",
"[",
"'till'",
"]",
"=",
"now",
"+",
"datetime",
".",
"timedelta",
"(",
"days",
"=",
"1",
")",
"kdc_req_body",
"[",
"'rtime'",
"]",
"=",
"now",
"+",
"datetime",
".",
"timedelta",
"(",
"days",
"=",
"1",
")",
"kdc_req_body",
"[",
"'nonce'",
"]",
"=",
"secrets",
".",
"randbits",
"(",
"31",
")",
"if",
"override_etype",
"is",
"None",
":",
"kdc_req_body",
"[",
"'etype'",
"]",
"=",
"self",
".",
"usercreds",
".",
"get_supported_enctypes",
"(",
")",
"else",
":",
"kdc_req_body",
"[",
"'etype'",
"]",
"=",
"override_etype",
"pa_data_1",
"=",
"{",
"}",
"pa_data_1",
"[",
"'padata-type'",
"]",
"=",
"int",
"(",
"PADATA_TYPE",
"(",
"'PA-PAC-REQUEST'",
")",
")",
"pa_data_1",
"[",
"'padata-value'",
"]",
"=",
"PA_PAC_REQUEST",
"(",
"{",
"'include-pac'",
":",
"True",
"}",
")",
".",
"dump",
"(",
")",
"kdc_req",
"=",
"{",
"}",
"kdc_req",
"[",
"'pvno'",
"]",
"=",
"krb5_pvno",
"kdc_req",
"[",
"'msg-type'",
"]",
"=",
"MESSAGE_TYPE",
".",
"KRB_AS_REQ",
".",
"value",
"kdc_req",
"[",
"'padata'",
"]",
"=",
"[",
"pa_data_1",
"]",
"kdc_req",
"[",
"'req-body'",
"]",
"=",
"KDC_REQ_BODY",
"(",
"kdc_req_body",
")",
"req",
"=",
"AS_REQ",
"(",
"kdc_req",
")",
"logger",
".",
"debug",
"(",
"'Sending initial TGT to %s'",
"%",
"self",
".",
"ksoc",
".",
"get_addr_str",
"(",
")",
")",
"rep",
"=",
"self",
".",
"ksoc",
".",
"sendrecv",
"(",
"req",
".",
"dump",
"(",
")",
",",
"throw",
"=",
"False",
")",
"if",
"rep",
".",
"name",
"!=",
"'KRB_ERROR'",
":",
"#user can do kerberos auth without preauthentication!",
"self",
".",
"kerberos_TGT",
"=",
"rep",
".",
"native",
"#if we want to roast the asrep (tgt rep) part then we dont even have the proper keys to decrypt",
"#so we just return, the asrep can be extracted from this object anyhow",
"if",
"decrypt_tgt",
"==",
"False",
":",
"return",
"self",
".",
"kerberos_cipher",
"=",
"_enctype_table",
"[",
"23",
"]",
"self",
".",
"kerberos_cipher_type",
"=",
"23",
"self",
".",
"kerberos_key",
"=",
"Key",
"(",
"self",
".",
"kerberos_cipher",
".",
"enctype",
",",
"self",
".",
"usercreds",
".",
"get_key_for_enctype",
"(",
"EncryptionType",
".",
"ARCFOUR_HMAC_MD5",
")",
")",
"else",
":",
"if",
"rep",
".",
"native",
"[",
"'error-code'",
"]",
"!=",
"KerberosErrorCode",
".",
"KDC_ERR_PREAUTH_REQUIRED",
".",
"value",
":",
"raise",
"KerberosError",
"(",
"rep",
")",
"rep",
"=",
"rep",
".",
"native",
"logger",
".",
"debug",
"(",
"'Got reply from server, asikg to provide auth data'",
")",
"rep",
"=",
"self",
".",
"do_preauth",
"(",
"rep",
")",
"logger",
".",
"debug",
"(",
"'Got valid TGT response from server'",
")",
"rep",
"=",
"rep",
".",
"native",
"self",
".",
"kerberos_TGT",
"=",
"rep",
"cipherText",
"=",
"rep",
"[",
"'enc-part'",
"]",
"[",
"'cipher'",
"]",
"temp",
"=",
"self",
".",
"kerberos_cipher",
".",
"decrypt",
"(",
"self",
".",
"kerberos_key",
",",
"3",
",",
"cipherText",
")",
"self",
".",
"kerberos_TGT_encpart",
"=",
"EncASRepPart",
".",
"load",
"(",
"temp",
")",
".",
"native",
"self",
".",
"kerberos_session_key",
"=",
"Key",
"(",
"self",
".",
"kerberos_cipher",
".",
"enctype",
",",
"self",
".",
"kerberos_TGT_encpart",
"[",
"'key'",
"]",
"[",
"'keyvalue'",
"]",
")",
"self",
".",
"ccache",
".",
"add_tgt",
"(",
"self",
".",
"kerberos_TGT",
",",
"self",
".",
"kerberos_TGT_encpart",
",",
"override_pp",
"=",
"True",
")",
"logger",
".",
"debug",
"(",
"'Got valid TGT'",
")",
"return"
] |
decrypt_tgt: used for asreproast attacks
Steps performed:
1. Send and empty (no encrypted timestamp) AS_REQ with all the encryption types we support
2. Depending on the response (either error or AS_REP with TGT) we either send another AS_REQ with the encrypted data or return the TGT (or fail miserably)
3. PROFIT
|
[
"decrypt_tgt",
":",
"used",
"for",
"asreproast",
"attacks",
"Steps",
"performed",
":",
"1",
".",
"Send",
"and",
"empty",
"(",
"no",
"encrypted",
"timestamp",
")",
"AS_REQ",
"with",
"all",
"the",
"encryption",
"types",
"we",
"support",
"2",
".",
"Depending",
"on",
"the",
"response",
"(",
"either",
"error",
"or",
"AS_REP",
"with",
"TGT",
")",
"we",
"either",
"send",
"another",
"AS_REQ",
"with",
"the",
"encrypted",
"data",
"or",
"return",
"the",
"TGT",
"(",
"or",
"fail",
"miserably",
")",
"3",
".",
"PROFIT"
] |
python
|
train
|
lingthio/Flask-User
|
flask_user/user_manager__utils.py
|
https://github.com/lingthio/Flask-User/blob/a379fa0a281789618c484b459cb41236779b95b1/flask_user/user_manager__utils.py#L54-L72
|
def make_safe_url(self, url):
"""Makes a URL safe by removing optional hostname and port.
Example:
| ``make_safe_url('https://hostname:80/path1/path2?q1=v1&q2=v2#fragment')``
| returns ``'/path1/path2?q1=v1&q2=v2#fragment'``
Override this method if you need to allow a list of safe hostnames.
"""
# Split the URL into scheme, netloc, path, query and fragment
parts = list(urlsplit(url))
# Clear scheme and netloc and rebuild URL
parts[0] = '' # Empty scheme
parts[1] = '' # Empty netloc (hostname:port)
safe_url = urlunsplit(parts)
return safe_url
|
[
"def",
"make_safe_url",
"(",
"self",
",",
"url",
")",
":",
"# Split the URL into scheme, netloc, path, query and fragment",
"parts",
"=",
"list",
"(",
"urlsplit",
"(",
"url",
")",
")",
"# Clear scheme and netloc and rebuild URL",
"parts",
"[",
"0",
"]",
"=",
"''",
"# Empty scheme",
"parts",
"[",
"1",
"]",
"=",
"''",
"# Empty netloc (hostname:port)",
"safe_url",
"=",
"urlunsplit",
"(",
"parts",
")",
"return",
"safe_url"
] |
Makes a URL safe by removing optional hostname and port.
Example:
| ``make_safe_url('https://hostname:80/path1/path2?q1=v1&q2=v2#fragment')``
| returns ``'/path1/path2?q1=v1&q2=v2#fragment'``
Override this method if you need to allow a list of safe hostnames.
|
[
"Makes",
"a",
"URL",
"safe",
"by",
"removing",
"optional",
"hostname",
"and",
"port",
"."
] |
python
|
train
|
rpcope1/PythonConfluenceAPI
|
PythonConfluenceAPI/api.py
|
https://github.com/rpcope1/PythonConfluenceAPI/blob/b7f0ca2a390f964715fdf3a60b5b0c5ef7116d40/PythonConfluenceAPI/api.py#L463-L501
|
def get_content_comments(self, content_id, expand=None, parent_version=None, start=None, limit=None,
location=None, depth=None, callback=None):
"""
Returns the comments associated with a piece of content.
:param content_id (string): A string containing the id of the content to retrieve children for.
:param expand (string): OPTIONAL: a comma separated list of properties to expand on the children.
We can also specify some extensions such as extensions.inlineProperties (for getting
inline comment-specific properties) or extensions.resolution for the resolution status
of each comment in the results. Default: Empty
:param parent_version (int): OPTIONAL: An int representing the version of the content to retrieve children for.
Default: 0
:param start (int): OPTIONAL: The index of the first item within the result set that should be returned.
Default: 0.
:param limit (int): OPTIONAL: How many items should be returned after the start index. Default: Site limit.
:param location (string): OPTIONAL: The location of the comments. Possible values are: "inline", "footer",
"resolved". You can define multiple location params. The results will be the comments
matched by any location. Default: "" (all).
:param depth: The depth of the comments. Possible values are: "" (ROOT only), "all". Default: "".
:param callback: OPTIONAL: The callback to execute on the resulting data, before the method returns.
Default: None (no callback, raw data returned).
:return: The JSON data returned from the content/{id}/child/comment endpoint, or the results of the
callback. Will raise requests.HTTPError on bad input, potentially.
"""
params = {}
if expand:
params["expand"] = expand
if parent_version:
params["parentVersion"] = parent_version
if start is not None:
params["start"] = int(start)
if limit is not None:
params["limit"] = int(limit)
if location:
params["location"] = location
if depth:
assert depth in {"", "all"}
params["depth"] = depth
return self._service_get_request("rest/api/content/{id}/child/comment".format(id=content_id),
params=params, callback=callback)
|
[
"def",
"get_content_comments",
"(",
"self",
",",
"content_id",
",",
"expand",
"=",
"None",
",",
"parent_version",
"=",
"None",
",",
"start",
"=",
"None",
",",
"limit",
"=",
"None",
",",
"location",
"=",
"None",
",",
"depth",
"=",
"None",
",",
"callback",
"=",
"None",
")",
":",
"params",
"=",
"{",
"}",
"if",
"expand",
":",
"params",
"[",
"\"expand\"",
"]",
"=",
"expand",
"if",
"parent_version",
":",
"params",
"[",
"\"parentVersion\"",
"]",
"=",
"parent_version",
"if",
"start",
"is",
"not",
"None",
":",
"params",
"[",
"\"start\"",
"]",
"=",
"int",
"(",
"start",
")",
"if",
"limit",
"is",
"not",
"None",
":",
"params",
"[",
"\"limit\"",
"]",
"=",
"int",
"(",
"limit",
")",
"if",
"location",
":",
"params",
"[",
"\"location\"",
"]",
"=",
"location",
"if",
"depth",
":",
"assert",
"depth",
"in",
"{",
"\"\"",
",",
"\"all\"",
"}",
"params",
"[",
"\"depth\"",
"]",
"=",
"depth",
"return",
"self",
".",
"_service_get_request",
"(",
"\"rest/api/content/{id}/child/comment\"",
".",
"format",
"(",
"id",
"=",
"content_id",
")",
",",
"params",
"=",
"params",
",",
"callback",
"=",
"callback",
")"
] |
Returns the comments associated with a piece of content.
:param content_id (string): A string containing the id of the content to retrieve children for.
:param expand (string): OPTIONAL: a comma separated list of properties to expand on the children.
We can also specify some extensions such as extensions.inlineProperties (for getting
inline comment-specific properties) or extensions.resolution for the resolution status
of each comment in the results. Default: Empty
:param parent_version (int): OPTIONAL: An int representing the version of the content to retrieve children for.
Default: 0
:param start (int): OPTIONAL: The index of the first item within the result set that should be returned.
Default: 0.
:param limit (int): OPTIONAL: How many items should be returned after the start index. Default: Site limit.
:param location (string): OPTIONAL: The location of the comments. Possible values are: "inline", "footer",
"resolved". You can define multiple location params. The results will be the comments
matched by any location. Default: "" (all).
:param depth: The depth of the comments. Possible values are: "" (ROOT only), "all". Default: "".
:param callback: OPTIONAL: The callback to execute on the resulting data, before the method returns.
Default: None (no callback, raw data returned).
:return: The JSON data returned from the content/{id}/child/comment endpoint, or the results of the
callback. Will raise requests.HTTPError on bad input, potentially.
|
[
"Returns",
"the",
"comments",
"associated",
"with",
"a",
"piece",
"of",
"content",
".",
":",
"param",
"content_id",
"(",
"string",
")",
":",
"A",
"string",
"containing",
"the",
"id",
"of",
"the",
"content",
"to",
"retrieve",
"children",
"for",
".",
":",
"param",
"expand",
"(",
"string",
")",
":",
"OPTIONAL",
":",
"a",
"comma",
"separated",
"list",
"of",
"properties",
"to",
"expand",
"on",
"the",
"children",
".",
"We",
"can",
"also",
"specify",
"some",
"extensions",
"such",
"as",
"extensions",
".",
"inlineProperties",
"(",
"for",
"getting",
"inline",
"comment",
"-",
"specific",
"properties",
")",
"or",
"extensions",
".",
"resolution",
"for",
"the",
"resolution",
"status",
"of",
"each",
"comment",
"in",
"the",
"results",
".",
"Default",
":",
"Empty",
":",
"param",
"parent_version",
"(",
"int",
")",
":",
"OPTIONAL",
":",
"An",
"int",
"representing",
"the",
"version",
"of",
"the",
"content",
"to",
"retrieve",
"children",
"for",
".",
"Default",
":",
"0",
":",
"param",
"start",
"(",
"int",
")",
":",
"OPTIONAL",
":",
"The",
"index",
"of",
"the",
"first",
"item",
"within",
"the",
"result",
"set",
"that",
"should",
"be",
"returned",
".",
"Default",
":",
"0",
".",
":",
"param",
"limit",
"(",
"int",
")",
":",
"OPTIONAL",
":",
"How",
"many",
"items",
"should",
"be",
"returned",
"after",
"the",
"start",
"index",
".",
"Default",
":",
"Site",
"limit",
".",
":",
"param",
"location",
"(",
"string",
")",
":",
"OPTIONAL",
":",
"The",
"location",
"of",
"the",
"comments",
".",
"Possible",
"values",
"are",
":",
"inline",
"footer",
"resolved",
".",
"You",
"can",
"define",
"multiple",
"location",
"params",
".",
"The",
"results",
"will",
"be",
"the",
"comments",
"matched",
"by",
"any",
"location",
".",
"Default",
":",
"(",
"all",
")",
".",
":",
"param",
"depth",
":",
"The",
"depth",
"of",
"the",
"comments",
".",
"Possible",
"values",
"are",
":",
"(",
"ROOT",
"only",
")",
"all",
".",
"Default",
":",
".",
":",
"param",
"callback",
":",
"OPTIONAL",
":",
"The",
"callback",
"to",
"execute",
"on",
"the",
"resulting",
"data",
"before",
"the",
"method",
"returns",
".",
"Default",
":",
"None",
"(",
"no",
"callback",
"raw",
"data",
"returned",
")",
".",
":",
"return",
":",
"The",
"JSON",
"data",
"returned",
"from",
"the",
"content",
"/",
"{",
"id",
"}",
"/",
"child",
"/",
"comment",
"endpoint",
"or",
"the",
"results",
"of",
"the",
"callback",
".",
"Will",
"raise",
"requests",
".",
"HTTPError",
"on",
"bad",
"input",
"potentially",
"."
] |
python
|
train
|
klahnakoski/pyLibrary
|
mo_collections/matrix.py
|
https://github.com/klahnakoski/pyLibrary/blob/fa2dcbc48fda8d26999baef400e9a98149e0b982/mo_collections/matrix.py#L226-L232
|
def items(self):
"""
ITERATE THROUGH ALL coord, value PAIRS
"""
for c in self._all_combos():
_, value = _getitem(self.cube, c)
yield c, value
|
[
"def",
"items",
"(",
"self",
")",
":",
"for",
"c",
"in",
"self",
".",
"_all_combos",
"(",
")",
":",
"_",
",",
"value",
"=",
"_getitem",
"(",
"self",
".",
"cube",
",",
"c",
")",
"yield",
"c",
",",
"value"
] |
ITERATE THROUGH ALL coord, value PAIRS
|
[
"ITERATE",
"THROUGH",
"ALL",
"coord",
"value",
"PAIRS"
] |
python
|
train
|
iktakahiro/slackpy
|
slackpy/slackpy.py
|
https://github.com/iktakahiro/slackpy/blob/23c0a2a33e408d0e8d24bc704d7021e85d6ae39a/slackpy/slackpy.py#L90-L117
|
def __send_notification(self, message, title, title_link='', color='good',
fields='', log_level=LogLv.INFO):
"""Send a message to a channel.
Args:
title: Message title.
title_link: Link of the message title.
message: Message body.
color: Message line color on Slack. This parameter should be one of the following values: 'good', 'warning',
'danger' or any hex color code.
Returns:
response: Response of Slack API.
Raises:
Exception:
"""
if log_level < self.log_level:
return None
payload = self.__build_payload(message, title, title_link, color, fields)
try:
response = self.__post(payload)
except Exception:
raise Exception(traceback.format_exc())
return response
|
[
"def",
"__send_notification",
"(",
"self",
",",
"message",
",",
"title",
",",
"title_link",
"=",
"''",
",",
"color",
"=",
"'good'",
",",
"fields",
"=",
"''",
",",
"log_level",
"=",
"LogLv",
".",
"INFO",
")",
":",
"if",
"log_level",
"<",
"self",
".",
"log_level",
":",
"return",
"None",
"payload",
"=",
"self",
".",
"__build_payload",
"(",
"message",
",",
"title",
",",
"title_link",
",",
"color",
",",
"fields",
")",
"try",
":",
"response",
"=",
"self",
".",
"__post",
"(",
"payload",
")",
"except",
"Exception",
":",
"raise",
"Exception",
"(",
"traceback",
".",
"format_exc",
"(",
")",
")",
"return",
"response"
] |
Send a message to a channel.
Args:
title: Message title.
title_link: Link of the message title.
message: Message body.
color: Message line color on Slack. This parameter should be one of the following values: 'good', 'warning',
'danger' or any hex color code.
Returns:
response: Response of Slack API.
Raises:
Exception:
|
[
"Send",
"a",
"message",
"to",
"a",
"channel",
".",
"Args",
":",
"title",
":",
"Message",
"title",
".",
"title_link",
":",
"Link",
"of",
"the",
"message",
"title",
".",
"message",
":",
"Message",
"body",
".",
"color",
":",
"Message",
"line",
"color",
"on",
"Slack",
".",
"This",
"parameter",
"should",
"be",
"one",
"of",
"the",
"following",
"values",
":",
"good",
"warning",
"danger",
"or",
"any",
"hex",
"color",
"code",
"."
] |
python
|
train
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.