nwo stringlengths 5 106 | sha stringlengths 40 40 | path stringlengths 4 174 | language stringclasses 1
value | identifier stringlengths 1 140 | parameters stringlengths 0 87.7k | argument_list stringclasses 1
value | return_statement stringlengths 0 426k | docstring stringlengths 0 64.3k | docstring_summary stringlengths 0 26.3k | docstring_tokens list | function stringlengths 18 4.83M | function_tokens list | url stringlengths 83 304 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
TencentCloud/tencentcloud-sdk-python | 3677fd1cdc8c5fd626ce001c13fd3b59d1f279d2 | tencentcloud/cam/v20190116/models.py | python | DeleteSAMLProviderResponse.__init__ | (self) | r"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str | r"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str | [
"r",
":",
"param",
"RequestId",
":",
"唯一请求",
"ID,每次请求都会返回。定位问题时需要提供该次请求的",
"RequestId。",
":",
"type",
"RequestId",
":",
"str"
] | def __init__(self):
r"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None | [
"def",
"__init__",
"(",
"self",
")",
":",
"self",
".",
"RequestId",
"=",
"None"
] | https://github.com/TencentCloud/tencentcloud-sdk-python/blob/3677fd1cdc8c5fd626ce001c13fd3b59d1f279d2/tencentcloud/cam/v20190116/models.py#L1256-L1261 | ||
rapidsai/cusignal | 6a025660aa36a1699f2f5fa999205e207ec6ce61 | python/cusignal/bsplines/bsplines.py | python | quadratic | (x) | return _quadratic_kernel(x) | A quadratic B-spline.
This is a special case of `bspline`, and equivalent to ``bspline(x, 2)``. | A quadratic B-spline. | [
"A",
"quadratic",
"B",
"-",
"spline",
"."
] | def quadratic(x):
"""A quadratic B-spline.
This is a special case of `bspline`, and equivalent to ``bspline(x, 2)``.
"""
x = cp.asarray(x)
return _quadratic_kernel(x) | [
"def",
"quadratic",
"(",
"x",
")",
":",
"x",
"=",
"cp",
".",
"asarray",
"(",
"x",
")",
"return",
"_quadratic_kernel",
"(",
"x",
")"
] | https://github.com/rapidsai/cusignal/blob/6a025660aa36a1699f2f5fa999205e207ec6ce61/python/cusignal/bsplines/bsplines.py#L98-L105 | |
mozman/ezdxf | 59d0fc2ea63f5cf82293428f5931da7e9f9718e9 | src/ezdxf/addons/r12writer.py | python | R12FastStreamWriter.add_point | (
self,
location: Vertex,
layer: str = "0",
color: int = None,
linetype: str = None,
) | Add a POINT entity.
Args:
location: point location as ``(x, y [,z])`` tuple
layer: layer name as string see :meth:`add_line`
color: color as :ref:`ACI` see :meth:`add_line`
linetype: line type as string see :meth:`add_line` | Add a POINT entity. | [
"Add",
"a",
"POINT",
"entity",
"."
] | def add_point(
self,
location: Vertex,
layer: str = "0",
color: int = None,
linetype: str = None,
) -> None:
"""
Add a POINT entity.
Args:
location: point location as ``(x, y [,z])`` tuple
layer: layer name as string see :meth:`add_line`
color: color as :ref:`ACI` see :meth:`add_line`
linetype: line type as string see :meth:`add_line`
"""
dxf = ["0\nPOINT\n"]
dxf.append(dxf_attribs(layer, color, linetype))
dxf.append(dxf_vertex(location))
self.stream.write("".join(dxf)) | [
"def",
"add_point",
"(",
"self",
",",
"location",
":",
"Vertex",
",",
"layer",
":",
"str",
"=",
"\"0\"",
",",
"color",
":",
"int",
"=",
"None",
",",
"linetype",
":",
"str",
"=",
"None",
",",
")",
"->",
"None",
":",
"dxf",
"=",
"[",
"\"0\\nPOINT\\n\... | https://github.com/mozman/ezdxf/blob/59d0fc2ea63f5cf82293428f5931da7e9f9718e9/src/ezdxf/addons/r12writer.py#L210-L230 | ||
ssato/python-anyconfig | 09af1950f3226759932f5168d52f5e06ab88815c | src/anyconfig/schema/jsonschema.py | python | gen_schema | (data: InDataExT, **options) | return scm | Generate a node represents JSON schema object with type annotation added
for given object node.
:param data: Configuration data object (dict[-like] or namedtuple)
:param options: Other keyword options such as:
- ac_schema_strict: True if more strict (precise) schema is needed
- ac_schema_typemap: Type to JSON schema type mappings
:return: A dict represents JSON schema of this node | Generate a node represents JSON schema object with type annotation added
for given object node. | [
"Generate",
"a",
"node",
"represents",
"JSON",
"schema",
"object",
"with",
"type",
"annotation",
"added",
"for",
"given",
"object",
"node",
"."
] | def gen_schema(data: InDataExT, **options) -> InDataT:
"""
Generate a node represents JSON schema object with type annotation added
for given object node.
:param data: Configuration data object (dict[-like] or namedtuple)
:param options: Other keyword options such as:
- ac_schema_strict: True if more strict (precise) schema is needed
- ac_schema_typemap: Type to JSON schema type mappings
:return: A dict represents JSON schema of this node
"""
if data is None:
return {'type': 'null'}
_type = type(data)
if _type in _SIMPLE_TYPES:
typemap = options.get('ac_schema_typemap', _SIMPLETYPE_MAP)
scm = {'type': typemap[_type]}
elif is_dict_like(data):
scm = object_to_schema(data, **options) # type: ignore
elif is_list_like(data):
scm = array_to_schema(
typing.cast(typing.Iterable[InDataT], data), **options
)
return scm | [
"def",
"gen_schema",
"(",
"data",
":",
"InDataExT",
",",
"*",
"*",
"options",
")",
"->",
"InDataT",
":",
"if",
"data",
"is",
"None",
":",
"return",
"{",
"'type'",
":",
"'null'",
"}",
"_type",
"=",
"type",
"(",
"data",
")",
"if",
"_type",
"in",
"_SI... | https://github.com/ssato/python-anyconfig/blob/09af1950f3226759932f5168d52f5e06ab88815c/src/anyconfig/schema/jsonschema.py#L190-L220 | |
kuri65536/python-for-android | 26402a08fc46b09ef94e8d7a6bbc3a54ff9d0891 | python3-alpha/python-libs/atom/__init__.py | python | Contributor.__init__ | (self, name=None, email=None, uri=None,
extension_elements=None, extension_attributes=None, text=None) | Constructor for Contributor
Args:
name: Name
email: Email
uri: Uri
extension_elements: list A list of ExtensionElement instances
extension_attributes: dict A dictionary of attribute value string pairs
text: str The text data in the this element | Constructor for Contributor | [
"Constructor",
"for",
"Contributor"
] | def __init__(self, name=None, email=None, uri=None,
extension_elements=None, extension_attributes=None, text=None):
"""Constructor for Contributor
Args:
name: Name
email: Email
uri: Uri
extension_elements: list A list of ExtensionElement instances
extension_attributes: dict A dictionary of attribute value string pairs
text: str The text data in the this element
"""
self.name = name
self.email = email
self.uri = uri
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
self.text = text | [
"def",
"__init__",
"(",
"self",
",",
"name",
"=",
"None",
",",
"email",
"=",
"None",
",",
"uri",
"=",
"None",
",",
"extension_elements",
"=",
"None",
",",
"extension_attributes",
"=",
"None",
",",
"text",
"=",
"None",
")",
":",
"self",
".",
"name",
"... | https://github.com/kuri65536/python-for-android/blob/26402a08fc46b09ef94e8d7a6bbc3a54ff9d0891/python3-alpha/python-libs/atom/__init__.py#L547-L565 | ||
oilshell/oil | 94388e7d44a9ad879b12615f6203b38596b5a2d3 | Python-2.7.13/Tools/scripts/texi2html.py | python | TexinfoParser.close_pxref | (self) | [] | def close_pxref(self):
self.makeref() | [
"def",
"close_pxref",
"(",
"self",
")",
":",
"self",
".",
"makeref",
"(",
")"
] | https://github.com/oilshell/oil/blob/94388e7d44a9ad879b12615f6203b38596b5a2d3/Python-2.7.13/Tools/scripts/texi2html.py#L682-L683 | ||||
JaniceWuo/MovieRecommend | 4c86db64ca45598917d304f535413df3bc9fea65 | movierecommend/venv1/Lib/site-packages/django/db/models/aggregates.py | python | StdDev.__init__ | (self, expression, sample=False, **extra) | [] | def __init__(self, expression, sample=False, **extra):
self.function = 'STDDEV_SAMP' if sample else 'STDDEV_POP'
super(StdDev, self).__init__(expression, output_field=FloatField(), **extra) | [
"def",
"__init__",
"(",
"self",
",",
"expression",
",",
"sample",
"=",
"False",
",",
"*",
"*",
"extra",
")",
":",
"self",
".",
"function",
"=",
"'STDDEV_SAMP'",
"if",
"sample",
"else",
"'STDDEV_POP'",
"super",
"(",
"StdDev",
",",
"self",
")",
".",
"__i... | https://github.com/JaniceWuo/MovieRecommend/blob/4c86db64ca45598917d304f535413df3bc9fea65/movierecommend/venv1/Lib/site-packages/django/db/models/aggregates.py#L97-L99 | ||||
spesmilo/electrum | bdbd59300fbd35b01605e66145458e5f396108e8 | electrum/gui/qt/transaction_dialog.py | python | TxOutputColoring.__init__ | (
self,
*,
legend: str,
color: ColorSchemeItem,
tooltip: str,
) | [] | def __init__(
self,
*,
legend: str,
color: ColorSchemeItem,
tooltip: str,
):
self.color = color.as_color(background=True)
self.legend_label = QLabel("<font color={color}>{box_char}</font> = {label}".format(
color=self.color.name(),
box_char="█",
label=legend,
))
font = self.legend_label.font()
font.setPointSize(font.pointSize() - 1)
self.legend_label.setFont(font)
self.legend_label.setVisible(False)
self.text_char_format = QTextCharFormat()
self.text_char_format.setBackground(QBrush(self.color))
self.text_char_format.setToolTip(tooltip) | [
"def",
"__init__",
"(",
"self",
",",
"*",
",",
"legend",
":",
"str",
",",
"color",
":",
"ColorSchemeItem",
",",
"tooltip",
":",
"str",
",",
")",
":",
"self",
".",
"color",
"=",
"color",
".",
"as_color",
"(",
"background",
"=",
"True",
")",
"self",
... | https://github.com/spesmilo/electrum/blob/bdbd59300fbd35b01605e66145458e5f396108e8/electrum/gui/qt/transaction_dialog.py#L754-L773 | ||||
ebroecker/canmatrix | 219a19adf4639b0b4fd5328f039563c6d4060887 | src/canmatrix/formats/arxml.py | python | Earxml.selector | (self, start_element, selector) | return sorted(result_list, key=lambda element: element.sourceline) | [] | def selector(self, start_element, selector):
start_pos = 0
token = ""
result_list = [start_element]
last_found_token = 0
while start_pos < len(selector):
token_match = re.search(r'//|/|>>|>|<<|<|#|$', selector[start_pos:])
found_token = token_match.span()
if start_pos > 0: # at least one Token found...
value = selector[last_found_token:start_pos + found_token[0]]
if token == "//":
result_list = [c for a in result_list for c in self.findall(value, a)]
elif token == "/":
if value == "..":
result_list = [self.get_referencable_parent(a.getparent()) for a in result_list]
else:
result_list = [self.get_sub_by_name(a, value) for a in result_list]
elif token == ">>":
result_list = [self.xml_element_cache[a.text][0].getparent() for start in result_list for a in
self.get_all_sub_by_name(start, value) if a.text in self.xml_element_cache]
elif token == ">":
result_list = [self.xml_element_cache[self.get_sub_by_name(a, value).text][0].getparent() for a in result_list
if self.get_sub_by_name(a, value) is not None and self.get_sub_by_name(a, value).text in self.xml_element_cache]
elif token == "<<":
result_list = [c for a in result_list for c in
self.find_references_of_type(a, value, referencable_parent=True)]
elif token == "<":
result_list = [self.find_references_of_type(a, value, referencable_parent=True)[0]
for a in result_list
if len(self.find_references_of_type(a, value, referencable_parent=True)) > 0]
elif token == "#":
sn_snippets = value.split("|")
filtered_results = []
for tag in result_list:
sn = self.get_short_name(tag)
for test_name in sn_snippets:
if test_name in sn:
filtered_results.append(tag)
result_list = filtered_results
result_list = list(set(result_list))
last_found_token = found_token[1] + start_pos
token = selector[start_pos + found_token[0]:start_pos + found_token[1]]
start_pos += found_token[1]
return sorted(result_list, key=lambda element: element.sourceline) | [
"def",
"selector",
"(",
"self",
",",
"start_element",
",",
"selector",
")",
":",
"start_pos",
"=",
"0",
"token",
"=",
"\"\"",
"result_list",
"=",
"[",
"start_element",
"]",
"last_found_token",
"=",
"0",
"while",
"start_pos",
"<",
"len",
"(",
"selector",
")... | https://github.com/ebroecker/canmatrix/blob/219a19adf4639b0b4fd5328f039563c6d4060887/src/canmatrix/formats/arxml.py#L245-L291 | |||
mozillazg/pypy | 2ff5cd960c075c991389f842c6d59e71cf0cb7d0 | lib-python/2.7/compiler/pyassem.py | python | PyFlowGraph.getCode | (self) | return self.newCodeObject() | Get a Python code object | Get a Python code object | [
"Get",
"a",
"Python",
"code",
"object"
] | def getCode(self):
"""Get a Python code object"""
assert self.stage == RAW
self.computeStackDepth()
self.flattenGraph()
assert self.stage == FLAT
self.convertArgs()
assert self.stage == CONV
self.makeByteCode()
assert self.stage == DONE
return self.newCodeObject() | [
"def",
"getCode",
"(",
"self",
")",
":",
"assert",
"self",
".",
"stage",
"==",
"RAW",
"self",
".",
"computeStackDepth",
"(",
")",
"self",
".",
"flattenGraph",
"(",
")",
"assert",
"self",
".",
"stage",
"==",
"FLAT",
"self",
".",
"convertArgs",
"(",
")",... | https://github.com/mozillazg/pypy/blob/2ff5cd960c075c991389f842c6d59e71cf0cb7d0/lib-python/2.7/compiler/pyassem.py#L305-L315 | |
skorch-dev/skorch | cf6615be4e62a16af6f8d83a47e8b59b5c48a58c | skorch/callbacks/lr_scheduler.py | python | LRScheduler._get_scheduler | (self, net, policy, **scheduler_kwargs) | return policy(net.optimizer_, **scheduler_kwargs) | Return scheduler, based on indicated policy, with appropriate
parameters. | Return scheduler, based on indicated policy, with appropriate
parameters. | [
"Return",
"scheduler",
"based",
"on",
"indicated",
"policy",
"with",
"appropriate",
"parameters",
"."
] | def _get_scheduler(self, net, policy, **scheduler_kwargs):
"""Return scheduler, based on indicated policy, with appropriate
parameters.
"""
if policy not in [ReduceLROnPlateau] and \
'last_epoch' not in scheduler_kwargs:
last_epoch = len(net.history) - 1
scheduler_kwargs['last_epoch'] = last_epoch
return policy(net.optimizer_, **scheduler_kwargs) | [
"def",
"_get_scheduler",
"(",
"self",
",",
"net",
",",
"policy",
",",
"*",
"*",
"scheduler_kwargs",
")",
":",
"if",
"policy",
"not",
"in",
"[",
"ReduceLROnPlateau",
"]",
"and",
"'last_epoch'",
"not",
"in",
"scheduler_kwargs",
":",
"last_epoch",
"=",
"len",
... | https://github.com/skorch-dev/skorch/blob/cf6615be4e62a16af6f8d83a47e8b59b5c48a58c/skorch/callbacks/lr_scheduler.py#L180-L189 | |
jython/jython3 | def4f8ec47cb7a9c799ea4c745f12badf92c5769 | lib-python/3.5.1/asynchat.py | python | async_chat.readable | (self) | return 1 | predicate for inclusion in the readable for select() | predicate for inclusion in the readable for select() | [
"predicate",
"for",
"inclusion",
"in",
"the",
"readable",
"for",
"select",
"()"
] | def readable(self):
"predicate for inclusion in the readable for select()"
# cannot use the old predicate, it violates the claim of the
# set_terminator method.
# return (len(self.ac_in_buffer) <= self.ac_in_buffer_size)
return 1 | [
"def",
"readable",
"(",
"self",
")",
":",
"# cannot use the old predicate, it violates the claim of the",
"# set_terminator method.",
"# return (len(self.ac_in_buffer) <= self.ac_in_buffer_size)",
"return",
"1"
] | https://github.com/jython/jython3/blob/def4f8ec47cb7a9c799ea4c745f12badf92c5769/lib-python/3.5.1/asynchat.py#L208-L214 | |
inventree/InvenTree | 4a5e4a88ac3e91d64a21e8cab3708ecbc6e2bd8b | InvenTree/build/serializers.py | python | BuildUnallocationSerializer.validate_output | (self, stock_item) | return stock_item | [] | def validate_output(self, stock_item):
# Stock item must point to the same build order!
build = self.context['build']
if stock_item and stock_item.build != build:
raise ValidationError(_("Build output must point to the same build"))
return stock_item | [
"def",
"validate_output",
"(",
"self",
",",
"stock_item",
")",
":",
"# Stock item must point to the same build order!",
"build",
"=",
"self",
".",
"context",
"[",
"'build'",
"]",
"if",
"stock_item",
"and",
"stock_item",
".",
"build",
"!=",
"build",
":",
"raise",
... | https://github.com/inventree/InvenTree/blob/4a5e4a88ac3e91d64a21e8cab3708ecbc6e2bd8b/InvenTree/build/serializers.py#L313-L321 | |||
securesystemslab/zippy | ff0e84ac99442c2c55fe1d285332cfd4e185e089 | zippy/lib-python/3/idlelib/RemoteDebugger.py | python | IdbProxy.set_quit | (self) | [] | def set_quit(self):
self.call("set_quit") | [
"def",
"set_quit",
"(",
"self",
")",
":",
"self",
".",
"call",
"(",
"\"set_quit\"",
")"
] | https://github.com/securesystemslab/zippy/blob/ff0e84ac99442c2c55fe1d285332cfd4e185e089/zippy/lib-python/3/idlelib/RemoteDebugger.py#L332-L333 | ||||
python-diamond/Diamond | 7000e16cfdf4508ed9291fc4b3800592557b2431 | src/collectors/netstat/netstat.py | python | NetstatCollector._hex2dec | (s) | return str(int(s, 16)) | [] | def _hex2dec(s):
return str(int(s, 16)) | [
"def",
"_hex2dec",
"(",
"s",
")",
":",
"return",
"str",
"(",
"int",
"(",
"s",
",",
"16",
")",
")"
] | https://github.com/python-diamond/Diamond/blob/7000e16cfdf4508ed9291fc4b3800592557b2431/src/collectors/netstat/netstat.py#L70-L71 | |||
gramps-project/gramps | 04d4651a43eb210192f40a9f8c2bad8ee8fa3753 | gramps/plugins/view/familyview.py | python | FamilyView.remove | (self, *obj) | Method called when deleting a family from a family view. | Method called when deleting a family from a family view. | [
"Method",
"called",
"when",
"deleting",
"a",
"family",
"from",
"a",
"family",
"view",
"."
] | def remove(self, *obj):
"""
Method called when deleting a family from a family view.
"""
from gramps.gui.dialog import QuestionDialog, MultiSelectDialog
from gramps.gen.utils.string import data_recover_msg
handles = self.selected_handles()
if len(handles) == 1:
family = self.dbstate.db.get_family_from_handle(handles[0])
msg1 = self._message1_format(family)
msg2 = self._message2_format(family)
msg2 = "%s %s" % (msg2, data_recover_msg)
QuestionDialog(msg1,
msg2,
_('_Delete Family'),
lambda: self.delete_family_response(family),
parent=self.uistate.window)
else:
MultiSelectDialog(self._message1_format,
self._message2_format,
handles,
self.dbstate.db.get_family_from_handle,
yes_func=self.delete_family_response,
parent=self.uistate.window) | [
"def",
"remove",
"(",
"self",
",",
"*",
"obj",
")",
":",
"from",
"gramps",
".",
"gui",
".",
"dialog",
"import",
"QuestionDialog",
",",
"MultiSelectDialog",
"from",
"gramps",
".",
"gen",
".",
"utils",
".",
"string",
"import",
"data_recover_msg",
"handles",
... | https://github.com/gramps-project/gramps/blob/04d4651a43eb210192f40a9f8c2bad8ee8fa3753/gramps/plugins/view/familyview.py#L358-L381 | ||
holzschu/Carnets | 44effb10ddfc6aa5c8b0687582a724ba82c6b547 | Library/lib/python3.7/site-packages/sympy/functions/special/delta_functions.py | python | DiracDelta._eval_rewrite_as_SingularityFunction | (self, *args, **kwargs) | Returns the DiracDelta expression written in the form of Singularity Functions. | Returns the DiracDelta expression written in the form of Singularity Functions. | [
"Returns",
"the",
"DiracDelta",
"expression",
"written",
"in",
"the",
"form",
"of",
"Singularity",
"Functions",
"."
] | def _eval_rewrite_as_SingularityFunction(self, *args, **kwargs):
"""
Returns the DiracDelta expression written in the form of Singularity Functions.
"""
from sympy.solvers import solve
from sympy.functions import SingularityFunction
if self == DiracDelta(0):
return SingularityFunction(0, 0, -1)
if self == DiracDelta(0, 1):
return SingularityFunction(0, 0, -2)
free = self.free_symbols
if len(free) == 1:
x = (free.pop())
if len(args) == 1:
return SingularityFunction(x, solve(args[0], x)[0], -1)
return SingularityFunction(x, solve(args[0], x)[0], -args[1] - 1)
else:
# I don't know how to handle the case for DiracDelta expressions
# having arguments with more than one variable.
raise TypeError(filldedent('''
rewrite(SingularityFunction) doesn't support
arguments with more that 1 variable.''')) | [
"def",
"_eval_rewrite_as_SingularityFunction",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"from",
"sympy",
".",
"solvers",
"import",
"solve",
"from",
"sympy",
".",
"functions",
"import",
"SingularityFunction",
"if",
"self",
"==",
"DiracD... | https://github.com/holzschu/Carnets/blob/44effb10ddfc6aa5c8b0687582a724ba82c6b547/Library/lib/python3.7/site-packages/sympy/functions/special/delta_functions.py#L345-L367 | ||
Chaffelson/nipyapi | d3b186fd701ce308c2812746d98af9120955e810 | nipyapi/nifi/models/process_group_dto.py | python | ProcessGroupDTO.output_port_count | (self) | return self._output_port_count | Gets the output_port_count of this ProcessGroupDTO.
The number of output ports in the process group.
:return: The output_port_count of this ProcessGroupDTO.
:rtype: int | Gets the output_port_count of this ProcessGroupDTO.
The number of output ports in the process group. | [
"Gets",
"the",
"output_port_count",
"of",
"this",
"ProcessGroupDTO",
".",
"The",
"number",
"of",
"output",
"ports",
"in",
"the",
"process",
"group",
"."
] | def output_port_count(self):
"""
Gets the output_port_count of this ProcessGroupDTO.
The number of output ports in the process group.
:return: The output_port_count of this ProcessGroupDTO.
:rtype: int
"""
return self._output_port_count | [
"def",
"output_port_count",
"(",
"self",
")",
":",
"return",
"self",
".",
"_output_port_count"
] | https://github.com/Chaffelson/nipyapi/blob/d3b186fd701ce308c2812746d98af9120955e810/nipyapi/nifi/models/process_group_dto.py#L932-L940 | |
rhinstaller/anaconda | 63edc8680f1b05cbfe11bef28703acba808c5174 | pyanaconda/modules/boss/module_manager/module_manager.py | python | ModuleManager.start_modules_with_task | (self) | return task | Start modules with the task. | Start modules with the task. | [
"Start",
"modules",
"with",
"the",
"task",
"."
] | def start_modules_with_task(self):
"""Start modules with the task."""
task = StartModulesTask(
message_bus=DBus,
activatable=conf.anaconda.activatable_modules,
forbidden=conf.anaconda.forbidden_modules,
optional=conf.anaconda.optional_modules,
)
task.succeeded_signal.connect(
lambda: self.set_module_observers(task.get_result())
)
return task | [
"def",
"start_modules_with_task",
"(",
"self",
")",
":",
"task",
"=",
"StartModulesTask",
"(",
"message_bus",
"=",
"DBus",
",",
"activatable",
"=",
"conf",
".",
"anaconda",
".",
"activatable_modules",
",",
"forbidden",
"=",
"conf",
".",
"anaconda",
".",
"forbi... | https://github.com/rhinstaller/anaconda/blob/63edc8680f1b05cbfe11bef28703acba808c5174/pyanaconda/modules/boss/module_manager/module_manager.py#L45-L56 | |
geekan/scrapy-examples | edb1cb116bd6def65a6ef01f953b58eb43e54305 | youtube_trending/youtube_trending/pipelines.py | python | RedisPipeline.process_item | (self, item, spider) | [] | def process_item(self, item, spider):
if not item['id']:
print 'no id item!!'
str_recorded_item = self.r.get(item['id'])
final_item = None
if str_recorded_item is None:
final_item = item
else:
ritem = eval(self.r.get(item['id']))
final_item = dict(item.items() + ritem.items())
self.r.set(item['id'], final_item) | [
"def",
"process_item",
"(",
"self",
",",
"item",
",",
"spider",
")",
":",
"if",
"not",
"item",
"[",
"'id'",
"]",
":",
"print",
"'no id item!!'",
"str_recorded_item",
"=",
"self",
".",
"r",
".",
"get",
"(",
"item",
"[",
"'id'",
"]",
")",
"final_item",
... | https://github.com/geekan/scrapy-examples/blob/edb1cb116bd6def65a6ef01f953b58eb43e54305/youtube_trending/youtube_trending/pipelines.py#L36-L47 | ||||
oleg-yaroshevskiy/quest_qa_labeling | 730a9632314e54584f69f909d5e2ef74d843e02c | packages/fairseq-hacked/fairseq/file_utils.py | python | filename_to_url | (filename, cache_dir=None) | return url, etag | Return the url and etag (which may be ``None``) stored for `filename`.
Raise ``EnvironmentError`` if `filename` or its stored metadata do not exist. | Return the url and etag (which may be ``None``) stored for `filename`.
Raise ``EnvironmentError`` if `filename` or its stored metadata do not exist. | [
"Return",
"the",
"url",
"and",
"etag",
"(",
"which",
"may",
"be",
"None",
")",
"stored",
"for",
"filename",
".",
"Raise",
"EnvironmentError",
"if",
"filename",
"or",
"its",
"stored",
"metadata",
"do",
"not",
"exist",
"."
] | def filename_to_url(filename, cache_dir=None):
"""
Return the url and etag (which may be ``None``) stored for `filename`.
Raise ``EnvironmentError`` if `filename` or its stored metadata do not exist.
"""
if cache_dir is None:
cache_dir = PYTORCH_FAIRSEQ_CACHE
if isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
cache_path = os.path.join(cache_dir, filename)
if not os.path.exists(cache_path):
raise EnvironmentError("file {} not found".format(cache_path))
meta_path = cache_path + ".json"
if not os.path.exists(meta_path):
raise EnvironmentError("file {} not found".format(meta_path))
with open(meta_path, encoding="utf-8") as meta_file:
metadata = json.load(meta_file)
url = metadata["url"]
etag = metadata["etag"]
return url, etag | [
"def",
"filename_to_url",
"(",
"filename",
",",
"cache_dir",
"=",
"None",
")",
":",
"if",
"cache_dir",
"is",
"None",
":",
"cache_dir",
"=",
"PYTORCH_FAIRSEQ_CACHE",
"if",
"isinstance",
"(",
"cache_dir",
",",
"Path",
")",
":",
"cache_dir",
"=",
"str",
"(",
... | https://github.com/oleg-yaroshevskiy/quest_qa_labeling/blob/730a9632314e54584f69f909d5e2ef74d843e02c/packages/fairseq-hacked/fairseq/file_utils.py#L113-L136 | |
angr/angr | 4b04d56ace135018083d36d9083805be8146688b | angr/engines/vex/heavy/heavy.py | python | HeavyVEXMixin._perform_vex_stmt_Dirty_call | (self, func_name, ty, args, func=None) | return retval | [] | def _perform_vex_stmt_Dirty_call(self, func_name, ty, args, func=None):
if func is None:
try:
func = getattr(dirty, func_name)
except AttributeError as e:
raise errors.UnsupportedDirtyError("Unsupported dirty helper %s" % func_name) from e
retval, retval_constraints = func(self.state, *args)
self.state.add_constraints(*retval_constraints)
return retval | [
"def",
"_perform_vex_stmt_Dirty_call",
"(",
"self",
",",
"func_name",
",",
"ty",
",",
"args",
",",
"func",
"=",
"None",
")",
":",
"if",
"func",
"is",
"None",
":",
"try",
":",
"func",
"=",
"getattr",
"(",
"dirty",
",",
"func_name",
")",
"except",
"Attri... | https://github.com/angr/angr/blob/4b04d56ace135018083d36d9083805be8146688b/angr/engines/vex/heavy/heavy.py#L265-L273 | |||
zestedesavoir/zds-site | 2ba922223c859984a413cc6c108a8aa4023b113e | zds/tutorialv2/models/versioned.py | python | Container.get_tree_level | (self) | Return the level in the tree of this container, i.e the depth of its deepest child.
:return: tree level
:rtype: int | Return the level in the tree of this container, i.e the depth of its deepest child. | [
"Return",
"the",
"level",
"in",
"the",
"tree",
"of",
"this",
"container",
"i",
".",
"e",
"the",
"depth",
"of",
"its",
"deepest",
"child",
"."
] | def get_tree_level(self):
"""Return the level in the tree of this container, i.e the depth of its deepest child.
:return: tree level
:rtype: int
"""
if len(self.children) == 0:
return 1
elif isinstance(self.children[0], Extract):
return 2
else:
return 1 + max([i.get_tree_level() for i in self.children]) | [
"def",
"get_tree_level",
"(",
"self",
")",
":",
"if",
"len",
"(",
"self",
".",
"children",
")",
"==",
"0",
":",
"return",
"1",
"elif",
"isinstance",
"(",
"self",
".",
"children",
"[",
"0",
"]",
",",
"Extract",
")",
":",
"return",
"2",
"else",
":",
... | https://github.com/zestedesavoir/zds-site/blob/2ba922223c859984a413cc6c108a8aa4023b113e/zds/tutorialv2/models/versioned.py#L131-L143 | ||
wxWidgets/Phoenix | b2199e299a6ca6d866aa6f3d0888499136ead9d6 | wx/lib/pydocview.py | python | FilePropertiesService.ProcessUpdateUIEvent | (self, event) | Updates the File/Properties menu item. | Updates the File/Properties menu item. | [
"Updates",
"the",
"File",
"/",
"Properties",
"menu",
"item",
"."
] | def ProcessUpdateUIEvent(self, event):
"""
Updates the File/Properties menu item.
"""
id = event.GetId()
if id == FilePropertiesService.PROPERTIES_ID:
for eventHandler in self._customEventHandlers:
if eventHandler.ProcessUpdateUIEvent(event):
return True
event.Enable(wx.GetApp().GetDocumentManager().GetCurrentDocument() is not None)
return True
else:
return False | [
"def",
"ProcessUpdateUIEvent",
"(",
"self",
",",
"event",
")",
":",
"id",
"=",
"event",
".",
"GetId",
"(",
")",
"if",
"id",
"==",
"FilePropertiesService",
".",
"PROPERTIES_ID",
":",
"for",
"eventHandler",
"in",
"self",
".",
"_customEventHandlers",
":",
"if",... | https://github.com/wxWidgets/Phoenix/blob/b2199e299a6ca6d866aa6f3d0888499136ead9d6/wx/lib/pydocview.py#L2583-L2596 | ||
doyensec/inql | 8ee5f2d5d967fcf8f64676d355359d299c47037b | inql/burp_ext/extender.py | python | BurpExtender.registerExtenderCallbacks | (self, callbacks) | Overrides IBurpExtender method, it registers all the elements that compose this extension
:param callbacks: burp callbacks
:return: None | Overrides IBurpExtender method, it registers all the elements that compose this extension | [
"Overrides",
"IBurpExtender",
"method",
"it",
"registers",
"all",
"the",
"elements",
"that",
"compose",
"this",
"extension"
] | def registerExtenderCallbacks(self, callbacks):
"""
Overrides IBurpExtender method, it registers all the elements that compose this extension
:param callbacks: burp callbacks
:return: None
"""
self._tmpdir = tempfile.mkdtemp()
os.chdir(self._tmpdir)
helpers = callbacks.getHelpers()
callbacks.setExtensionName("InQL: Introspection GraphQL Scanner %s" % __version__)
callbacks.issueAlert("InQL Scanner Started")
print("InQL Scanner Started! (tmpdir: %s )" % os.getcwd())
# Registering GraphQL Tab
callbacks.registerMessageEditorTabFactory(lambda _, editable: GraphQLEditorTab(callbacks, editable))
# Register ourselves as a custom scanner check
callbacks.registerScannerCheck(BurpScannerCheck(callbacks))
# Register Suite Tab(s)
self._tab = GeneratorTab(callbacks, helpers)
callbacks.addSuiteTab(self._tab)
callbacks.addSuiteTab(TimerTab(callbacks, helpers))
# Register extension state listener
callbacks.registerExtensionStateListener(self) | [
"def",
"registerExtenderCallbacks",
"(",
"self",
",",
"callbacks",
")",
":",
"self",
".",
"_tmpdir",
"=",
"tempfile",
".",
"mkdtemp",
"(",
")",
"os",
".",
"chdir",
"(",
"self",
".",
"_tmpdir",
")",
"helpers",
"=",
"callbacks",
".",
"getHelpers",
"(",
")"... | https://github.com/doyensec/inql/blob/8ee5f2d5d967fcf8f64676d355359d299c47037b/inql/burp_ext/extender.py#L25-L47 | ||
bikalims/bika.lims | 35e4bbdb5a3912cae0b5eb13e51097c8b0486349 | bika/lims/content/supplyorder.py | python | SupplyOrder.getVATAmount | (self) | return Decimal(self.getTotal()) - Decimal(self.getSubtotal()) | Compute VAT | Compute VAT | [
"Compute",
"VAT"
] | def getVATAmount(self):
""" Compute VAT """
return Decimal(self.getTotal()) - Decimal(self.getSubtotal()) | [
"def",
"getVATAmount",
"(",
"self",
")",
":",
"return",
"Decimal",
"(",
"self",
".",
"getTotal",
"(",
")",
")",
"-",
"Decimal",
"(",
"self",
".",
"getSubtotal",
"(",
")",
")"
] | https://github.com/bikalims/bika.lims/blob/35e4bbdb5a3912cae0b5eb13e51097c8b0486349/bika/lims/content/supplyorder.py#L182-L184 | |
kubernetes-client/python | 47b9da9de2d02b2b7a34fbe05afb44afd130d73a | kubernetes/client/models/v1_volume_attachment_status.py | python | V1VolumeAttachmentStatus.attachment_metadata | (self, attachment_metadata) | Sets the attachment_metadata of this V1VolumeAttachmentStatus.
Upon successful attach, this field is populated with any information returned by the attach operation that must be passed into subsequent WaitForAttach or Mount calls. This field must only be set by the entity completing the attach operation, i.e. the external-attacher. # noqa: E501
:param attachment_metadata: The attachment_metadata of this V1VolumeAttachmentStatus. # noqa: E501
:type: dict(str, str) | Sets the attachment_metadata of this V1VolumeAttachmentStatus. | [
"Sets",
"the",
"attachment_metadata",
"of",
"this",
"V1VolumeAttachmentStatus",
"."
] | def attachment_metadata(self, attachment_metadata):
"""Sets the attachment_metadata of this V1VolumeAttachmentStatus.
Upon successful attach, this field is populated with any information returned by the attach operation that must be passed into subsequent WaitForAttach or Mount calls. This field must only be set by the entity completing the attach operation, i.e. the external-attacher. # noqa: E501
:param attachment_metadata: The attachment_metadata of this V1VolumeAttachmentStatus. # noqa: E501
:type: dict(str, str)
"""
self._attachment_metadata = attachment_metadata | [
"def",
"attachment_metadata",
"(",
"self",
",",
"attachment_metadata",
")",
":",
"self",
".",
"_attachment_metadata",
"=",
"attachment_metadata"
] | https://github.com/kubernetes-client/python/blob/47b9da9de2d02b2b7a34fbe05afb44afd130d73a/kubernetes/client/models/v1_volume_attachment_status.py#L127-L136 | ||
soft-matter/trackpy | a316c658ffd03d4b6fe705b9bedd63c1ab8276c0 | trackpy/masks.py | python | get_slice | (coords, shape, radius) | return tuple(slices), origin | Returns the slice and origin that belong to ``slice_image`` | Returns the slice and origin that belong to ``slice_image`` | [
"Returns",
"the",
"slice",
"and",
"origin",
"that",
"belong",
"to",
"slice_image"
] | def get_slice(coords, shape, radius):
"""Returns the slice and origin that belong to ``slice_image``"""
# interpret parameters
ndim = len(shape)
radius = validate_tuple(radius, ndim)
coords = np.atleast_2d(np.round(coords).astype(int))
# drop features that have no pixels inside the image
in_bounds = np.array([(coords[:, i] >= -r) & (coords[:, i] < sh + r)
for i, sh, r in zip(range(ndim), shape, radius)])
coords = coords[np.all(in_bounds, axis=0)]
# return if no coordinates are left
if len(coords) == 0:
return tuple([slice(None, 0)] * ndim), None
# calculate the box
lower = coords.min(axis=0) - radius
upper = coords.max(axis=0) + radius + 1
# calculate the slices
origin = [None] * ndim
slices = [None] * ndim
for i, sh, low, up in zip(range(ndim), shape, lower, upper):
lower_bound_trunc = max(0, low)
upper_bound_trunc = min(sh, up)
slices[i] = slice(int(round(lower_bound_trunc)),
int(round(upper_bound_trunc)))
origin[i] = lower_bound_trunc
return tuple(slices), origin | [
"def",
"get_slice",
"(",
"coords",
",",
"shape",
",",
"radius",
")",
":",
"# interpret parameters",
"ndim",
"=",
"len",
"(",
"shape",
")",
"radius",
"=",
"validate_tuple",
"(",
"radius",
",",
"ndim",
")",
"coords",
"=",
"np",
".",
"atleast_2d",
"(",
"np"... | https://github.com/soft-matter/trackpy/blob/a316c658ffd03d4b6fe705b9bedd63c1ab8276c0/trackpy/masks.py#L96-L121 | |
veusz/veusz | 5a1e2af5f24df0eb2a2842be51f2997c4999c7fb | veusz/dialogs/preferences.py | python | PreferencesDialog.securityDirAddClicked | (self) | Add a secure directory. | Add a secure directory. | [
"Add",
"a",
"secure",
"directory",
"."
] | def securityDirAddClicked(self):
"""Add a secure directory."""
dirname = qt.QFileDialog.getExistingDirectory(
self, _('Choose secure directory to add'))
if dirname:
self.securityDirList.addItem(dirname) | [
"def",
"securityDirAddClicked",
"(",
"self",
")",
":",
"dirname",
"=",
"qt",
".",
"QFileDialog",
".",
"getExistingDirectory",
"(",
"self",
",",
"_",
"(",
"'Choose secure directory to add'",
")",
")",
"if",
"dirname",
":",
"self",
".",
"securityDirList",
".",
"... | https://github.com/veusz/veusz/blob/5a1e2af5f24df0eb2a2842be51f2997c4999c7fb/veusz/dialogs/preferences.py#L332-L338 | ||
EnterpriseDB/barman | 487bad92edec72712531ead4746fad72bb310270 | barman/server.py | python | Server.write_sync_wals_info_file | (self, primary_info) | Write the content of SYNC_WALS_INFO_FILE on disk
:param dict primary_info: | Write the content of SYNC_WALS_INFO_FILE on disk | [
"Write",
"the",
"content",
"of",
"SYNC_WALS_INFO_FILE",
"on",
"disk"
] | def write_sync_wals_info_file(self, primary_info):
"""
Write the content of SYNC_WALS_INFO_FILE on disk
:param dict primary_info:
"""
try:
with open(
os.path.join(self.config.wals_directory, SYNC_WALS_INFO_FILE), "w"
) as syncfile:
syncfile.write(
"%s\t%s"
% (primary_info["last_name"], primary_info["last_position"])
)
except (OSError, IOError):
# Wrap file access exceptions using SyncError
raise SyncError(
"Unable to write %s file for server %s"
% (SYNC_WALS_INFO_FILE, self.config.name)
) | [
"def",
"write_sync_wals_info_file",
"(",
"self",
",",
"primary_info",
")",
":",
"try",
":",
"with",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"config",
".",
"wals_directory",
",",
"SYNC_WALS_INFO_FILE",
")",
",",
"\"w\"",
")",
"as",
... | https://github.com/EnterpriseDB/barman/blob/487bad92edec72712531ead4746fad72bb310270/barman/server.py#L3857-L3876 | ||
awslabs/aws-lambda-powertools-python | 0c6ac0fe183476140ee1df55fe9fa1cc20925577 | aws_lambda_powertools/utilities/data_classes/api_gateway_proxy_event.py | python | RequestContextV2AuthorizerIam.user_arn | (self) | return self.get("userArn") | The Amazon Resource Name (ARN) of the effective user identified after authentication. | The Amazon Resource Name (ARN) of the effective user identified after authentication. | [
"The",
"Amazon",
"Resource",
"Name",
"(",
"ARN",
")",
"of",
"the",
"effective",
"user",
"identified",
"after",
"authentication",
"."
] | def user_arn(self) -> Optional[str]:
"""The Amazon Resource Name (ARN) of the effective user identified after authentication."""
return self.get("userArn") | [
"def",
"user_arn",
"(",
"self",
")",
"->",
"Optional",
"[",
"str",
"]",
":",
"return",
"self",
".",
"get",
"(",
"\"userArn\"",
")"
] | https://github.com/awslabs/aws-lambda-powertools-python/blob/0c6ac0fe183476140ee1df55fe9fa1cc20925577/aws_lambda_powertools/utilities/data_classes/api_gateway_proxy_event.py#L150-L152 | |
jgagneastro/coffeegrindsize | 22661ebd21831dba4cf32bfc6ba59fe3d49f879c | App/dist/coffeegrindsize.app/Contents/Resources/lib/python3.7/scipy/spatial/transform/rotation.py | python | Rotation.apply | (self, vectors, inverse=False) | Apply this rotation to a set of vectors.
If the original frame rotates to the final frame by this rotation, then
its application to a vector can be seen in two ways:
- As a projection of vector components expressed in the final frame
to the original frame.
- As the physical rotation of a vector being glued to the original
frame as it rotates. In this case the vector components are
expressed in the original frame before and after the rotation.
In terms of DCMs, this application is the same as
``self.as_dcm().dot(vectors)``.
Parameters
----------
vectors : array_like, shape (3,) or (N, 3)
Each `vectors[i]` represents a vector in 3D space. A single vector
can either be specified with shape `(3, )` or `(1, 3)`. The number
of rotations and number of vectors given must follow standard numpy
broadcasting rules: either one of them equals unity or they both
equal each other.
inverse : boolean, optional
If True then the inverse of the rotation(s) is applied to the input
vectors. Default is False.
Returns
-------
rotated_vectors : ndarray, shape (3,) or (N, 3)
Result of applying rotation on input vectors.
Shape depends on the following cases:
- If object contains a single rotation (as opposed to a stack
with a single rotation) and a single vector is specified with
shape ``(3,)``, then `rotated_vectors` has shape ``(3,)``.
- In all other cases, `rotated_vectors` has shape ``(N, 3)``,
where ``N`` is either the number of rotations or vectors.
Examples
--------
>>> from scipy.spatial.transform import Rotation as R
Single rotation applied on a single vector:
>>> vector = np.array([1, 0, 0])
>>> r = R.from_rotvec([0, 0, np.pi/2])
>>> r.as_dcm()
array([[ 2.22044605e-16, -1.00000000e+00, 0.00000000e+00],
[ 1.00000000e+00, 2.22044605e-16, 0.00000000e+00],
[ 0.00000000e+00, 0.00000000e+00, 1.00000000e+00]])
>>> r.apply(vector)
array([2.22044605e-16, 1.00000000e+00, 0.00000000e+00])
>>> r.apply(vector).shape
(3,)
Single rotation applied on multiple vectors:
>>> vectors = np.array([
... [1, 0, 0],
... [1, 2, 3]])
>>> r = R.from_rotvec([0, 0, np.pi/4])
>>> r.as_dcm()
array([[ 0.70710678, -0.70710678, 0. ],
[ 0.70710678, 0.70710678, 0. ],
[ 0. , 0. , 1. ]])
>>> r.apply(vectors)
array([[ 0.70710678, 0.70710678, 0. ],
[-0.70710678, 2.12132034, 3. ]])
>>> r.apply(vectors).shape
(2, 3)
Multiple rotations on a single vector:
>>> r = R.from_rotvec([[0, 0, np.pi/4], [np.pi/2, 0, 0]])
>>> vector = np.array([1,2,3])
>>> r.as_dcm()
array([[[ 7.07106781e-01, -7.07106781e-01, 0.00000000e+00],
[ 7.07106781e-01, 7.07106781e-01, 0.00000000e+00],
[ 0.00000000e+00, 0.00000000e+00, 1.00000000e+00]],
[[ 1.00000000e+00, 0.00000000e+00, 0.00000000e+00],
[ 0.00000000e+00, 2.22044605e-16, -1.00000000e+00],
[ 0.00000000e+00, 1.00000000e+00, 2.22044605e-16]]])
>>> r.apply(vector)
array([[-0.70710678, 2.12132034, 3. ],
[ 1. , -3. , 2. ]])
>>> r.apply(vector).shape
(2, 3)
Multiple rotations on multiple vectors. Each rotation is applied on the
corresponding vector:
>>> r = R.from_euler('zxy', [
... [0, 0, 90],
... [45, 30, 60]], degrees=True)
>>> vectors = [
... [1, 2, 3],
... [1, 0, -1]]
>>> r.apply(vectors)
array([[ 3. , 2. , -1. ],
[-0.09026039, 1.11237244, -0.86860844]])
>>> r.apply(vectors).shape
(2, 3)
It is also possible to apply the inverse rotation:
>>> r = R.from_euler('zxy', [
... [0, 0, 90],
... [45, 30, 60]], degrees=True)
>>> vectors = [
... [1, 2, 3],
... [1, 0, -1]]
>>> r.apply(vectors, inverse=True)
array([[-3. , 2. , 1. ],
[ 1.09533535, -0.8365163 , 0.3169873 ]]) | Apply this rotation to a set of vectors. | [
"Apply",
"this",
"rotation",
"to",
"a",
"set",
"of",
"vectors",
"."
] | def apply(self, vectors, inverse=False):
"""Apply this rotation to a set of vectors.
If the original frame rotates to the final frame by this rotation, then
its application to a vector can be seen in two ways:
- As a projection of vector components expressed in the final frame
to the original frame.
- As the physical rotation of a vector being glued to the original
frame as it rotates. In this case the vector components are
expressed in the original frame before and after the rotation.
In terms of DCMs, this application is the same as
``self.as_dcm().dot(vectors)``.
Parameters
----------
vectors : array_like, shape (3,) or (N, 3)
Each `vectors[i]` represents a vector in 3D space. A single vector
can either be specified with shape `(3, )` or `(1, 3)`. The number
of rotations and number of vectors given must follow standard numpy
broadcasting rules: either one of them equals unity or they both
equal each other.
inverse : boolean, optional
If True then the inverse of the rotation(s) is applied to the input
vectors. Default is False.
Returns
-------
rotated_vectors : ndarray, shape (3,) or (N, 3)
Result of applying rotation on input vectors.
Shape depends on the following cases:
- If object contains a single rotation (as opposed to a stack
with a single rotation) and a single vector is specified with
shape ``(3,)``, then `rotated_vectors` has shape ``(3,)``.
- In all other cases, `rotated_vectors` has shape ``(N, 3)``,
where ``N`` is either the number of rotations or vectors.
Examples
--------
>>> from scipy.spatial.transform import Rotation as R
Single rotation applied on a single vector:
>>> vector = np.array([1, 0, 0])
>>> r = R.from_rotvec([0, 0, np.pi/2])
>>> r.as_dcm()
array([[ 2.22044605e-16, -1.00000000e+00, 0.00000000e+00],
[ 1.00000000e+00, 2.22044605e-16, 0.00000000e+00],
[ 0.00000000e+00, 0.00000000e+00, 1.00000000e+00]])
>>> r.apply(vector)
array([2.22044605e-16, 1.00000000e+00, 0.00000000e+00])
>>> r.apply(vector).shape
(3,)
Single rotation applied on multiple vectors:
>>> vectors = np.array([
... [1, 0, 0],
... [1, 2, 3]])
>>> r = R.from_rotvec([0, 0, np.pi/4])
>>> r.as_dcm()
array([[ 0.70710678, -0.70710678, 0. ],
[ 0.70710678, 0.70710678, 0. ],
[ 0. , 0. , 1. ]])
>>> r.apply(vectors)
array([[ 0.70710678, 0.70710678, 0. ],
[-0.70710678, 2.12132034, 3. ]])
>>> r.apply(vectors).shape
(2, 3)
Multiple rotations on a single vector:
>>> r = R.from_rotvec([[0, 0, np.pi/4], [np.pi/2, 0, 0]])
>>> vector = np.array([1,2,3])
>>> r.as_dcm()
array([[[ 7.07106781e-01, -7.07106781e-01, 0.00000000e+00],
[ 7.07106781e-01, 7.07106781e-01, 0.00000000e+00],
[ 0.00000000e+00, 0.00000000e+00, 1.00000000e+00]],
[[ 1.00000000e+00, 0.00000000e+00, 0.00000000e+00],
[ 0.00000000e+00, 2.22044605e-16, -1.00000000e+00],
[ 0.00000000e+00, 1.00000000e+00, 2.22044605e-16]]])
>>> r.apply(vector)
array([[-0.70710678, 2.12132034, 3. ],
[ 1. , -3. , 2. ]])
>>> r.apply(vector).shape
(2, 3)
Multiple rotations on multiple vectors. Each rotation is applied on the
corresponding vector:
>>> r = R.from_euler('zxy', [
... [0, 0, 90],
... [45, 30, 60]], degrees=True)
>>> vectors = [
... [1, 2, 3],
... [1, 0, -1]]
>>> r.apply(vectors)
array([[ 3. , 2. , -1. ],
[-0.09026039, 1.11237244, -0.86860844]])
>>> r.apply(vectors).shape
(2, 3)
It is also possible to apply the inverse rotation:
>>> r = R.from_euler('zxy', [
... [0, 0, 90],
... [45, 30, 60]], degrees=True)
>>> vectors = [
... [1, 2, 3],
... [1, 0, -1]]
>>> r.apply(vectors, inverse=True)
array([[-3. , 2. , 1. ],
[ 1.09533535, -0.8365163 , 0.3169873 ]])
"""
vectors = np.asarray(vectors)
if vectors.ndim > 2 or vectors.shape[-1] != 3:
raise ValueError("Expected input of shape (3,) or (P, 3), "
"got {}.".format(vectors.shape))
single_vector = False
if vectors.shape == (3,):
single_vector = True
vectors = vectors[None, :]
dcm = self.as_dcm()
if self._single:
dcm = dcm[None, :, :]
n_vectors = vectors.shape[0]
n_rotations = len(self)
if n_vectors != 1 and n_rotations != 1 and n_vectors != n_rotations:
raise ValueError("Expected equal numbers of rotations and vectors "
", or a single rotation, or a single vector, got "
"{} rotations and {} vectors.".format(
n_rotations, n_vectors))
if inverse:
result = np.einsum('ikj,ik->ij', dcm, vectors)
else:
result = np.einsum('ijk,ik->ij', dcm, vectors)
if self._single and single_vector:
return result[0]
else:
return result | [
"def",
"apply",
"(",
"self",
",",
"vectors",
",",
"inverse",
"=",
"False",
")",
":",
"vectors",
"=",
"np",
".",
"asarray",
"(",
"vectors",
")",
"if",
"vectors",
".",
"ndim",
">",
"2",
"or",
"vectors",
".",
"shape",
"[",
"-",
"1",
"]",
"!=",
"3",
... | https://github.com/jgagneastro/coffeegrindsize/blob/22661ebd21831dba4cf32bfc6ba59fe3d49f879c/App/dist/coffeegrindsize.app/Contents/Resources/lib/python3.7/scipy/spatial/transform/rotation.py#L1168-L1316 | ||
tdamdouni/Pythonista | 3e082d53b6b9b501a3c8cf3251a8ad4c8be9c2ad | markdown/markdown2pdf/reportlab/platypus/frames.py | python | Frame.addFromList | (self, drawlist, canv) | Consumes objects from the front of the list until the
frame is full. If it cannot fit one object, raises
an exception. | Consumes objects from the front of the list until the
frame is full. If it cannot fit one object, raises
an exception. | [
"Consumes",
"objects",
"from",
"the",
"front",
"of",
"the",
"list",
"until",
"the",
"frame",
"is",
"full",
".",
"If",
"it",
"cannot",
"fit",
"one",
"object",
"raises",
"an",
"exception",
"."
] | def addFromList(self, drawlist, canv):
"""Consumes objects from the front of the list until the
frame is full. If it cannot fit one object, raises
an exception."""
if self._debug: logger.debug("enter Frame.addFromlist() for frame %s" % self.id)
if self.showBoundary:
self.drawBoundary(canv)
while len(drawlist) > 0:
head = drawlist[0]
if self.add(head,canv,trySplit=0):
del drawlist[0]
else:
#leave it in the list for later
break | [
"def",
"addFromList",
"(",
"self",
",",
"drawlist",
",",
"canv",
")",
":",
"if",
"self",
".",
"_debug",
":",
"logger",
".",
"debug",
"(",
"\"enter Frame.addFromlist() for frame %s\"",
"%",
"self",
".",
"id",
")",
"if",
"self",
".",
"showBoundary",
":",
"se... | https://github.com/tdamdouni/Pythonista/blob/3e082d53b6b9b501a3c8cf3251a8ad4c8be9c2ad/markdown/markdown2pdf/reportlab/platypus/frames.py#L256-L271 | ||
IronLanguages/ironpython3 | 7a7bb2a872eeab0d1009fc8a6e24dca43f65b693 | Src/StdLib/Lib/distutils/msvc9compiler.py | python | MSVCCompiler.manifest_get_embed_info | (self, target_desc, ld_args) | return temp_manifest, mfid | [] | def manifest_get_embed_info(self, target_desc, ld_args):
# If a manifest should be embedded, return a tuple of
# (manifest_filename, resource_id). Returns None if no manifest
# should be embedded. See http://bugs.python.org/issue7833 for why
# we want to avoid any manifest for extension modules if we can)
for arg in ld_args:
if arg.startswith("/MANIFESTFILE:"):
temp_manifest = arg.split(":", 1)[1]
break
else:
# no /MANIFESTFILE so nothing to do.
return None
if target_desc == CCompiler.EXECUTABLE:
# by default, executables always get the manifest with the
# CRT referenced.
mfid = 1
else:
# Extension modules try and avoid any manifest if possible.
mfid = 2
temp_manifest = self._remove_visual_c_ref(temp_manifest)
if temp_manifest is None:
return None
return temp_manifest, mfid | [
"def",
"manifest_get_embed_info",
"(",
"self",
",",
"target_desc",
",",
"ld_args",
")",
":",
"# If a manifest should be embedded, return a tuple of",
"# (manifest_filename, resource_id). Returns None if no manifest",
"# should be embedded. See http://bugs.python.org/issue7833 for why",
"... | https://github.com/IronLanguages/ironpython3/blob/7a7bb2a872eeab0d1009fc8a6e24dca43f65b693/Src/StdLib/Lib/distutils/msvc9compiler.py#L672-L694 | |||
ukdtom/ExportTools.bundle | 49aba4292a2897f640162a833c2792480aa4f0b6 | Contents/Code/output.py | python | setMax | (Max) | Set vars and file for status | Set vars and file for status | [
"Set",
"vars",
"and",
"file",
"for",
"status"
] | def setMax(Max):
''' Set vars and file for status '''
global iMax
iMax = Max
global iCurrent
iCurrent = 0
global CurStatusFile
CurStatusFile = getStatusFileName()
try:
io.open(CurStatusFile, 'a').close()
except Exception, e:
# Failback to utf8 if encoding cant be found
io.open(CurStatusFile, 'a', encoding='utf8').close() | [
"def",
"setMax",
"(",
"Max",
")",
":",
"global",
"iMax",
"iMax",
"=",
"Max",
"global",
"iCurrent",
"iCurrent",
"=",
"0",
"global",
"CurStatusFile",
"CurStatusFile",
"=",
"getStatusFileName",
"(",
")",
"try",
":",
"io",
".",
"open",
"(",
"CurStatusFile",
",... | https://github.com/ukdtom/ExportTools.bundle/blob/49aba4292a2897f640162a833c2792480aa4f0b6/Contents/Code/output.py#L35-L47 | ||
realpython/book2-exercises | cde325eac8e6d8cff2316601c2e5b36bb46af7d0 | web2py/venv/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.py | python | WorkingSet._build_from_requirements | (cls, req_spec) | return ws | Build a working set from a requirement spec. Rewrites sys.path. | Build a working set from a requirement spec. Rewrites sys.path. | [
"Build",
"a",
"working",
"set",
"from",
"a",
"requirement",
"spec",
".",
"Rewrites",
"sys",
".",
"path",
"."
] | def _build_from_requirements(cls, req_spec):
"""
Build a working set from a requirement spec. Rewrites sys.path.
"""
# try it without defaults already on sys.path
# by starting with an empty path
ws = cls([])
reqs = parse_requirements(req_spec)
dists = ws.resolve(reqs, Environment())
for dist in dists:
ws.add(dist)
# add any missing entries from sys.path
for entry in sys.path:
if entry not in ws.entries:
ws.add_entry(entry)
# then copy back to sys.path
sys.path[:] = ws.entries
return ws | [
"def",
"_build_from_requirements",
"(",
"cls",
",",
"req_spec",
")",
":",
"# try it without defaults already on sys.path",
"# by starting with an empty path",
"ws",
"=",
"cls",
"(",
"[",
"]",
")",
"reqs",
"=",
"parse_requirements",
"(",
"req_spec",
")",
"dists",
"=",
... | https://github.com/realpython/book2-exercises/blob/cde325eac8e6d8cff2316601c2e5b36bb46af7d0/web2py/venv/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.py#L667-L686 | |
matplotlib/matplotlib | 8d7a2b9d2a38f01ee0d6802dd4f9e98aec812322 | lib/mpl_toolkits/axes_grid1/anchored_artists.py | python | AnchoredAuxTransformBox.__init__ | (self, transform, loc,
pad=0.4, borderpad=0.5, prop=None, frameon=True, **kwargs) | An anchored container with transformed coordinates.
Artists added to the *drawing_area* are scaled according to the
coordinates of the transformation used. The dimensions of this artist
will scale to contain the artists added.
Parameters
----------
transform : `matplotlib.transforms.Transform`
The transformation object for the coordinate system in use, i.e.,
:attr:`matplotlib.axes.Axes.transData`.
loc : str
Location of this artist. Valid locations are
'upper left', 'upper center', 'upper right',
'center left', 'center', 'center right',
'lower left', 'lower center, 'lower right'.
For backward compatibility, numeric values are accepted as well.
See the parameter *loc* of `.Legend` for details.
pad : float, default: 0.4
Padding around the child objects, in fraction of the font size.
borderpad : float, default: 0.5
Border padding, in fraction of the font size.
prop : `matplotlib.font_manager.FontProperties`, optional
Font property used as a reference for paddings.
frameon : bool, default: True
If True, draw a box around this artists.
**kwargs
Keyword arguments forwarded to `.AnchoredOffsetbox`.
Attributes
----------
drawing_area : `matplotlib.offsetbox.AuxTransformBox`
A container for artists to display.
Examples
--------
To display an ellipse in the upper left, with a width of 0.1 and
height of 0.4 in data coordinates:
>>> box = AnchoredAuxTransformBox(ax.transData, loc='upper left')
>>> el = Ellipse((0, 0), width=0.1, height=0.4, angle=30)
>>> box.drawing_area.add_artist(el)
>>> ax.add_artist(box) | An anchored container with transformed coordinates. | [
"An",
"anchored",
"container",
"with",
"transformed",
"coordinates",
"."
] | def __init__(self, transform, loc,
pad=0.4, borderpad=0.5, prop=None, frameon=True, **kwargs):
"""
An anchored container with transformed coordinates.
Artists added to the *drawing_area* are scaled according to the
coordinates of the transformation used. The dimensions of this artist
will scale to contain the artists added.
Parameters
----------
transform : `matplotlib.transforms.Transform`
The transformation object for the coordinate system in use, i.e.,
:attr:`matplotlib.axes.Axes.transData`.
loc : str
Location of this artist. Valid locations are
'upper left', 'upper center', 'upper right',
'center left', 'center', 'center right',
'lower left', 'lower center, 'lower right'.
For backward compatibility, numeric values are accepted as well.
See the parameter *loc* of `.Legend` for details.
pad : float, default: 0.4
Padding around the child objects, in fraction of the font size.
borderpad : float, default: 0.5
Border padding, in fraction of the font size.
prop : `matplotlib.font_manager.FontProperties`, optional
Font property used as a reference for paddings.
frameon : bool, default: True
If True, draw a box around this artists.
**kwargs
Keyword arguments forwarded to `.AnchoredOffsetbox`.
Attributes
----------
drawing_area : `matplotlib.offsetbox.AuxTransformBox`
A container for artists to display.
Examples
--------
To display an ellipse in the upper left, with a width of 0.1 and
height of 0.4 in data coordinates:
>>> box = AnchoredAuxTransformBox(ax.transData, loc='upper left')
>>> el = Ellipse((0, 0), width=0.1, height=0.4, angle=30)
>>> box.drawing_area.add_artist(el)
>>> ax.add_artist(box)
"""
self.drawing_area = AuxTransformBox(transform)
super().__init__(loc, pad=pad, borderpad=borderpad,
child=self.drawing_area, prop=prop, frameon=frameon,
**kwargs) | [
"def",
"__init__",
"(",
"self",
",",
"transform",
",",
"loc",
",",
"pad",
"=",
"0.4",
",",
"borderpad",
"=",
"0.5",
",",
"prop",
"=",
"None",
",",
"frameon",
"=",
"True",
",",
"*",
"*",
"kwargs",
")",
":",
"self",
".",
"drawing_area",
"=",
"AuxTran... | https://github.com/matplotlib/matplotlib/blob/8d7a2b9d2a38f01ee0d6802dd4f9e98aec812322/lib/mpl_toolkits/axes_grid1/anchored_artists.py#L73-L124 | ||
mozillazg/pypy | 2ff5cd960c075c991389f842c6d59e71cf0cb7d0 | lib-python/2.7/difflib.py | python | SequenceMatcher.set_seq1 | (self, a) | Set the first sequence to be compared.
The second sequence to be compared is not changed.
>>> s = SequenceMatcher(None, "abcd", "bcde")
>>> s.ratio()
0.75
>>> s.set_seq1("bcde")
>>> s.ratio()
1.0
>>>
SequenceMatcher computes and caches detailed information about the
second sequence, so if you want to compare one sequence S against
many sequences, use .set_seq2(S) once and call .set_seq1(x)
repeatedly for each of the other sequences.
See also set_seqs() and set_seq2(). | Set the first sequence to be compared. | [
"Set",
"the",
"first",
"sequence",
"to",
"be",
"compared",
"."
] | def set_seq1(self, a):
"""Set the first sequence to be compared.
The second sequence to be compared is not changed.
>>> s = SequenceMatcher(None, "abcd", "bcde")
>>> s.ratio()
0.75
>>> s.set_seq1("bcde")
>>> s.ratio()
1.0
>>>
SequenceMatcher computes and caches detailed information about the
second sequence, so if you want to compare one sequence S against
many sequences, use .set_seq2(S) once and call .set_seq1(x)
repeatedly for each of the other sequences.
See also set_seqs() and set_seq2().
"""
if a is self.a:
return
self.a = a
self.matching_blocks = self.opcodes = None | [
"def",
"set_seq1",
"(",
"self",
",",
"a",
")",
":",
"if",
"a",
"is",
"self",
".",
"a",
":",
"return",
"self",
".",
"a",
"=",
"a",
"self",
".",
"matching_blocks",
"=",
"self",
".",
"opcodes",
"=",
"None"
] | https://github.com/mozillazg/pypy/blob/2ff5cd960c075c991389f842c6d59e71cf0cb7d0/lib-python/2.7/difflib.py#L233-L257 | ||
taigaio/taiga-ncurses | 65312098f2d167762e0dbd1c16019754ab64d068 | taiga_ncurses/ui/widgets/backlog.py | python | MIlestoneSelectorPopup._milestone_selector | (self) | return urwid.BoxAdapter(urwid.ListBox(list_walker), 20) | [] | def _milestone_selector(self):
contents = []
for milestone in data.list_of_milestones(self.project):
option = MilestoneOptionEntry(milestone)
self.options.append(option)
contents.append(option)
contents.append(generic.box_solid_fill(" ", 1))
list_walker = urwid.SimpleFocusListWalker(contents)
if len(contents) > 0:
list_walker.set_focus(0)
return urwid.BoxAdapter(urwid.ListBox(list_walker), 20) | [
"def",
"_milestone_selector",
"(",
"self",
")",
":",
"contents",
"=",
"[",
"]",
"for",
"milestone",
"in",
"data",
".",
"list_of_milestones",
"(",
"self",
".",
"project",
")",
":",
"option",
"=",
"MilestoneOptionEntry",
"(",
"milestone",
")",
"self",
".",
"... | https://github.com/taigaio/taiga-ncurses/blob/65312098f2d167762e0dbd1c16019754ab64d068/taiga_ncurses/ui/widgets/backlog.py#L507-L519 | |||
PySimpleGUI/PySimpleGUI | 6c0d1fb54f493d45e90180b322fbbe70f7a5af3c | DemoPrograms/Demo_Uno_Card_Game.py | python | Hand.__getitem__ | (self, item) | [] | def __getitem__(self, item):
try:
return self.hand[item]
except:
return '' | [
"def",
"__getitem__",
"(",
"self",
",",
"item",
")",
":",
"try",
":",
"return",
"self",
".",
"hand",
"[",
"item",
"]",
"except",
":",
"return",
"''"
] | https://github.com/PySimpleGUI/PySimpleGUI/blob/6c0d1fb54f493d45e90180b322fbbe70f7a5af3c/DemoPrograms/Demo_Uno_Card_Game.py#L263-L267 | ||||
weecology/retriever | e5ba505f7b9958c70e60155f3c5495899da27e7e | retriever/lib/engine.py | python | Engine.get_cursor | (self) | return self._cursor | Get db cursor. | Get db cursor. | [
"Get",
"db",
"cursor",
"."
] | def get_cursor(self):
"""Get db cursor."""
if self._cursor is None:
self._cursor = self.connection.cursor()
return self._cursor | [
"def",
"get_cursor",
"(",
"self",
")",
":",
"if",
"self",
".",
"_cursor",
"is",
"None",
":",
"self",
".",
"_cursor",
"=",
"self",
".",
"connection",
".",
"cursor",
"(",
")",
"return",
"self",
".",
"_cursor"
] | https://github.com/weecology/retriever/blob/e5ba505f7b9958c70e60155f3c5495899da27e7e/retriever/lib/engine.py#L889-L893 | |
googledatalab/pydatalab | 1c86e26a0d24e3bc8097895ddeab4d0607be4c40 | google/datalab/contrib/pipeline/_pipeline.py | python | PipelineGenerator._get_operator_class_name | (task_detail_type) | return operator_class_name, module | Internal helper gets the name of the Airflow operator class. We maintain
this in a map, so this method really returns the enum name, concatenated
with the string "Operator". | Internal helper gets the name of the Airflow operator class. We maintain
this in a map, so this method really returns the enum name, concatenated
with the string "Operator". | [
"Internal",
"helper",
"gets",
"the",
"name",
"of",
"the",
"Airflow",
"operator",
"class",
".",
"We",
"maintain",
"this",
"in",
"a",
"map",
"so",
"this",
"method",
"really",
"returns",
"the",
"enum",
"name",
"concatenated",
"with",
"the",
"string",
"Operator"... | def _get_operator_class_name(task_detail_type):
""" Internal helper gets the name of the Airflow operator class. We maintain
this in a map, so this method really returns the enum name, concatenated
with the string "Operator".
"""
# TODO(rajivpb): Rename this var correctly.
task_type_to_operator_prefix_mapping = {
'pydatalab.bq.execute': ('Execute',
'google.datalab.contrib.bigquery.operators._bq_execute_operator'),
'pydatalab.bq.extract': ('Extract',
'google.datalab.contrib.bigquery.operators._bq_extract_operator'),
'pydatalab.bq.load': ('Load', 'google.datalab.contrib.bigquery.operators._bq_load_operator'),
'Bash': ('Bash', 'airflow.operators.bash_operator')
}
(operator_class_prefix, module) = task_type_to_operator_prefix_mapping.get(
task_detail_type, (None, __name__))
format_string = '{0}Operator'
operator_class_name = format_string.format(operator_class_prefix)
if operator_class_prefix is None:
return format_string.format(task_detail_type), module
return operator_class_name, module | [
"def",
"_get_operator_class_name",
"(",
"task_detail_type",
")",
":",
"# TODO(rajivpb): Rename this var correctly.",
"task_type_to_operator_prefix_mapping",
"=",
"{",
"'pydatalab.bq.execute'",
":",
"(",
"'Execute'",
",",
"'google.datalab.contrib.bigquery.operators._bq_execute_operator'... | https://github.com/googledatalab/pydatalab/blob/1c86e26a0d24e3bc8097895ddeab4d0607be4c40/google/datalab/contrib/pipeline/_pipeline.py#L178-L198 | |
lovelylain/pyctp | fd304de4b50c4ddc31a4190b1caaeb5dec66bc5d | option/ctp/ApiStruct.py | python | Notice.__init__ | (self, BrokerID='', Content='', SequenceLabel='') | [] | def __init__(self, BrokerID='', Content='', SequenceLabel=''):
self.BrokerID = '' #经纪公司代码, char[11]
self.Content = '' #消息正文, char[501]
self.SequenceLabel = '' | [
"def",
"__init__",
"(",
"self",
",",
"BrokerID",
"=",
"''",
",",
"Content",
"=",
"''",
",",
"SequenceLabel",
"=",
"''",
")",
":",
"self",
".",
"BrokerID",
"=",
"''",
"#经纪公司代码, char[11]",
"self",
".",
"Content",
"=",
"''",
"#消息正文, char[501]",
"self",
".",... | https://github.com/lovelylain/pyctp/blob/fd304de4b50c4ddc31a4190b1caaeb5dec66bc5d/option/ctp/ApiStruct.py#L4765-L4768 | ||||
ansible/ansibullbot | c0a777dba16411db2ab0ce1a13eb166de2bed063 | ansibullbot/plugins/shipit.py | python | needs_community_review | (meta) | return True | Notify community for more shipits? | Notify community for more shipits? | [
"Notify",
"community",
"for",
"more",
"shipits?"
] | def needs_community_review(meta):
'''Notify community for more shipits?'''
if not meta['is_new_module']:
return False
if meta['shipit']:
return False
if meta['is_needs_revision']:
return False
if meta['is_needs_rebase']:
return False
if meta['is_needs_info']:
return False
if meta['ci_state'] == 'pending':
return False
if not meta['has_ci']:
return False
if not meta['mergeable']:
return False
mm = meta.get('module_match', {})
if not mm:
return False
if meta['component_support'] != ['community']:
return False
# expensive call done earlier in processing
if not meta['notify_community_shipit']:
return False
return True | [
"def",
"needs_community_review",
"(",
"meta",
")",
":",
"if",
"not",
"meta",
"[",
"'is_new_module'",
"]",
":",
"return",
"False",
"if",
"meta",
"[",
"'shipit'",
"]",
":",
"return",
"False",
"if",
"meta",
"[",
"'is_needs_revision'",
"]",
":",
"return",
"Fal... | https://github.com/ansible/ansibullbot/blob/c0a777dba16411db2ab0ce1a13eb166de2bed063/ansibullbot/plugins/shipit.py#L139-L177 | |
chribsen/simple-machine-learning-examples | dc94e52a4cebdc8bb959ff88b81ff8cfeca25022 | venv/lib/python2.7/site-packages/numpy/matrixlib/defmatrix.py | python | matrix.getI | (self) | return asmatrix(func(self)) | Returns the (multiplicative) inverse of invertible `self`.
Parameters
----------
None
Returns
-------
ret : matrix object
If `self` is non-singular, `ret` is such that ``ret * self`` ==
``self * ret`` == ``np.matrix(np.eye(self[0,:].size)`` all return
``True``.
Raises
------
numpy.linalg.LinAlgError: Singular matrix
If `self` is singular.
See Also
--------
linalg.inv
Examples
--------
>>> m = np.matrix('[1, 2; 3, 4]'); m
matrix([[1, 2],
[3, 4]])
>>> m.getI()
matrix([[-2. , 1. ],
[ 1.5, -0.5]])
>>> m.getI() * m
matrix([[ 1., 0.],
[ 0., 1.]]) | Returns the (multiplicative) inverse of invertible `self`. | [
"Returns",
"the",
"(",
"multiplicative",
")",
"inverse",
"of",
"invertible",
"self",
"."
] | def getI(self):
"""
Returns the (multiplicative) inverse of invertible `self`.
Parameters
----------
None
Returns
-------
ret : matrix object
If `self` is non-singular, `ret` is such that ``ret * self`` ==
``self * ret`` == ``np.matrix(np.eye(self[0,:].size)`` all return
``True``.
Raises
------
numpy.linalg.LinAlgError: Singular matrix
If `self` is singular.
See Also
--------
linalg.inv
Examples
--------
>>> m = np.matrix('[1, 2; 3, 4]'); m
matrix([[1, 2],
[3, 4]])
>>> m.getI()
matrix([[-2. , 1. ],
[ 1.5, -0.5]])
>>> m.getI() * m
matrix([[ 1., 0.],
[ 0., 1.]])
"""
M, N = self.shape
if M == N:
from numpy.dual import inv as func
else:
from numpy.dual import pinv as func
return asmatrix(func(self)) | [
"def",
"getI",
"(",
"self",
")",
":",
"M",
",",
"N",
"=",
"self",
".",
"shape",
"if",
"M",
"==",
"N",
":",
"from",
"numpy",
".",
"dual",
"import",
"inv",
"as",
"func",
"else",
":",
"from",
"numpy",
".",
"dual",
"import",
"pinv",
"as",
"func",
"... | https://github.com/chribsen/simple-machine-learning-examples/blob/dc94e52a4cebdc8bb959ff88b81ff8cfeca25022/venv/lib/python2.7/site-packages/numpy/matrixlib/defmatrix.py#L930-L972 | |
linxid/Machine_Learning_Study_Path | 558e82d13237114bbb8152483977806fc0c222af | Machine Learning In Action/Chapter4-NaiveBayes/venv/Lib/site-packages/pip/_vendor/pkg_resources/__init__.py | python | NullProvider.metadata_isdir | (self, name) | return self.egg_info and self._isdir(self._fn(self.egg_info, name)) | [] | def metadata_isdir(self, name):
return self.egg_info and self._isdir(self._fn(self.egg_info, name)) | [
"def",
"metadata_isdir",
"(",
"self",
",",
"name",
")",
":",
"return",
"self",
".",
"egg_info",
"and",
"self",
".",
"_isdir",
"(",
"self",
".",
"_fn",
"(",
"self",
".",
"egg_info",
",",
"name",
")",
")"
] | https://github.com/linxid/Machine_Learning_Study_Path/blob/558e82d13237114bbb8152483977806fc0c222af/Machine Learning In Action/Chapter4-NaiveBayes/venv/Lib/site-packages/pip/_vendor/pkg_resources/__init__.py#L1477-L1478 | |||
facebookresearch/Detectron | 1809dd41c1ffc881c0d6b1c16ea38d08894f8b6d | detectron/modeling/ResNet.py | python | add_residual_block | (
model,
prefix,
blob_in,
dim_in,
dim_out,
dim_inner,
dilation,
stride_init=2,
inplace_sum=False
) | return model.Relu(s, s) | Add a residual block to the model. | Add a residual block to the model. | [
"Add",
"a",
"residual",
"block",
"to",
"the",
"model",
"."
] | def add_residual_block(
model,
prefix,
blob_in,
dim_in,
dim_out,
dim_inner,
dilation,
stride_init=2,
inplace_sum=False
):
"""Add a residual block to the model."""
# prefix = res<stage>_<sub_stage>, e.g., res2_3
# Max pooling is performed prior to the first stage (which is uniquely
# distinguished by dim_in = 64), thus we keep stride = 1 for the first stage
stride = stride_init if (
dim_in != dim_out and dim_in != 64 and dilation == 1
) else 1
# transformation blob
tr = globals()[cfg.RESNETS.TRANS_FUNC](
model,
blob_in,
dim_in,
dim_out,
stride,
prefix,
dim_inner,
group=cfg.RESNETS.NUM_GROUPS,
dilation=dilation
)
# sum -> ReLU
# shortcut function: by default using bn; support gn
add_shortcut = globals()[cfg.RESNETS.SHORTCUT_FUNC]
sc = add_shortcut(model, prefix, blob_in, dim_in, dim_out, stride)
if inplace_sum:
s = model.net.Sum([tr, sc], tr)
else:
s = model.net.Sum([tr, sc], prefix + '_sum')
return model.Relu(s, s) | [
"def",
"add_residual_block",
"(",
"model",
",",
"prefix",
",",
"blob_in",
",",
"dim_in",
",",
"dim_out",
",",
"dim_inner",
",",
"dilation",
",",
"stride_init",
"=",
"2",
",",
"inplace_sum",
"=",
"False",
")",
":",
"# prefix = res<stage>_<sub_stage>, e.g., res2_3",... | https://github.com/facebookresearch/Detectron/blob/1809dd41c1ffc881c0d6b1c16ea38d08894f8b6d/detectron/modeling/ResNet.py#L153-L195 | |
batiste/django-page-cms | 8ba3fa07ecc4aab1013db457ff50a1ebe1ac4d06 | pages/settings.py | python | get_page_templates | () | The callable that is used by the CMS. | The callable that is used by the CMS. | [
"The",
"callable",
"that",
"is",
"used",
"by",
"the",
"CMS",
"."
] | def get_page_templates():
"""The callable that is used by the CMS."""
PAGE_TEMPLATES = get_setting('PAGE_TEMPLATES',
default_value=())
if isinstance(PAGE_TEMPLATES, collections.Callable):
return PAGE_TEMPLATES()
else:
return PAGE_TEMPLATES | [
"def",
"get_page_templates",
"(",
")",
":",
"PAGE_TEMPLATES",
"=",
"get_setting",
"(",
"'PAGE_TEMPLATES'",
",",
"default_value",
"=",
"(",
")",
")",
"if",
"isinstance",
"(",
"PAGE_TEMPLATES",
",",
"collections",
".",
"Callable",
")",
":",
"return",
"PAGE_TEMPLAT... | https://github.com/batiste/django-page-cms/blob/8ba3fa07ecc4aab1013db457ff50a1ebe1ac4d06/pages/settings.py#L51-L58 | ||
reviewboard/reviewboard | 7395902e4c181bcd1d633f61105012ffb1d18e1b | reviewboard/webapi/resources/review_reply_screenshot_comment.py | python | ReviewReplyScreenshotCommentResource.create | (self, request, reply_to_id, *args, **kwargs) | return self.create_or_update_comment_reply(
request=request,
comment=comment,
reply=reply,
comments_m2m=reply.screenshot_comments,
default_attrs={
'screenshot': comment.screenshot,
'x': comment.x,
'y': comment.y,
'w': comment.w,
'h': comment.h,
},
*args,
**kwargs) | Creates a reply to a screenshot comment on a review.
This will create a reply to a screenshot comment on a review.
The new comment will contain the same dimensions of the comment
being replied to, but may contain new text.
Extra data can be stored later lookup. See
:ref:`webapi2.0-extra-data` for more information. | Creates a reply to a screenshot comment on a review. | [
"Creates",
"a",
"reply",
"to",
"a",
"screenshot",
"comment",
"on",
"a",
"review",
"."
] | def create(self, request, reply_to_id, *args, **kwargs):
"""Creates a reply to a screenshot comment on a review.
This will create a reply to a screenshot comment on a review.
The new comment will contain the same dimensions of the comment
being replied to, but may contain new text.
Extra data can be stored later lookup. See
:ref:`webapi2.0-extra-data` for more information.
"""
try:
resources.review_request.get_object(request, *args, **kwargs)
reply = resources.review_reply.get_object(request, *args, **kwargs)
except ObjectDoesNotExist:
return DOES_NOT_EXIST
if not resources.review_reply.has_modify_permissions(request, reply):
return self.get_no_access_error(request)
try:
comment = resources.review_screenshot_comment.get_object(
request,
comment_id=reply_to_id,
*args, **kwargs)
except ObjectDoesNotExist:
return INVALID_FORM_DATA, {
'fields': {
'reply_to_id': ['This is not a valid screenshot '
'comment ID'],
}
}
return self.create_or_update_comment_reply(
request=request,
comment=comment,
reply=reply,
comments_m2m=reply.screenshot_comments,
default_attrs={
'screenshot': comment.screenshot,
'x': comment.x,
'y': comment.y,
'w': comment.w,
'h': comment.h,
},
*args,
**kwargs) | [
"def",
"create",
"(",
"self",
",",
"request",
",",
"reply_to_id",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"try",
":",
"resources",
".",
"review_request",
".",
"get_object",
"(",
"request",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
... | https://github.com/reviewboard/reviewboard/blob/7395902e4c181bcd1d633f61105012ffb1d18e1b/reviewboard/webapi/resources/review_reply_screenshot_comment.py#L57-L102 | |
beeware/ouroboros | a29123c6fab6a807caffbb7587cf548e0c370296 | ouroboros/email/parser.py | python | BytesParser.__init__ | (self, *args, **kw) | Parser of binary RFC 2822 and MIME email messages.
Creates an in-memory object tree representing the email message, which
can then be manipulated and turned over to a Generator to return the
textual representation of the message.
The input must be formatted as a block of RFC 2822 headers and header
continuation lines, optionally preceeded by a `Unix-from' header. The
header block is terminated either by the end of the input or by a
blank line.
_class is the class to instantiate for new message objects when they
must be created. This class must have a constructor that can take
zero arguments. Default is Message.Message. | Parser of binary RFC 2822 and MIME email messages. | [
"Parser",
"of",
"binary",
"RFC",
"2822",
"and",
"MIME",
"email",
"messages",
"."
] | def __init__(self, *args, **kw):
"""Parser of binary RFC 2822 and MIME email messages.
Creates an in-memory object tree representing the email message, which
can then be manipulated and turned over to a Generator to return the
textual representation of the message.
The input must be formatted as a block of RFC 2822 headers and header
continuation lines, optionally preceeded by a `Unix-from' header. The
header block is terminated either by the end of the input or by a
blank line.
_class is the class to instantiate for new message objects when they
must be created. This class must have a constructor that can take
zero arguments. Default is Message.Message.
"""
self.parser = Parser(*args, **kw) | [
"def",
"__init__",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kw",
")",
":",
"self",
".",
"parser",
"=",
"Parser",
"(",
"*",
"args",
",",
"*",
"*",
"kw",
")"
] | https://github.com/beeware/ouroboros/blob/a29123c6fab6a807caffbb7587cf548e0c370296/ouroboros/email/parser.py#L82-L98 | ||
gwastro/pycbc | 1e1c85534b9dba8488ce42df693230317ca63dea | pycbc/psd/__init__.py | python | insert_psd_option_group_multi_ifo | (parser) | return psd_options | Adds the options used to call the pycbc.psd.from_cli function to an
optparser as an OptionGroup. This should be used if you
want to use these options in your code.
Parameters
-----------
parser : object
OptionParser instance. | Adds the options used to call the pycbc.psd.from_cli function to an
optparser as an OptionGroup. This should be used if you
want to use these options in your code. | [
"Adds",
"the",
"options",
"used",
"to",
"call",
"the",
"pycbc",
".",
"psd",
".",
"from_cli",
"function",
"to",
"an",
"optparser",
"as",
"an",
"OptionGroup",
".",
"This",
"should",
"be",
"used",
"if",
"you",
"want",
"to",
"use",
"these",
"options",
"in",
... | def insert_psd_option_group_multi_ifo(parser):
"""
Adds the options used to call the pycbc.psd.from_cli function to an
optparser as an OptionGroup. This should be used if you
want to use these options in your code.
Parameters
-----------
parser : object
OptionParser instance.
"""
psd_options = parser.add_argument_group(
"Options to select the method of PSD generation",
"The options --psd-model, --psd-file, --asd-file, "
"and --psd-estimation are mutually exclusive.")
psd_options.add_argument("--psd-model", nargs="+",
action=MultiDetOptionAction, metavar='IFO:MODEL',
help="Get PSD from given analytical model. "
"Choose from %s" %(', '.join(get_psd_model_list()),))
psd_options.add_argument("--psd-file", nargs="+",
action=MultiDetOptionAction, metavar='IFO:FILE',
help="Get PSD using given PSD ASCII file")
psd_options.add_argument("--asd-file", nargs="+",
action=MultiDetOptionAction, metavar='IFO:FILE',
help="Get PSD using given ASD ASCII file")
psd_options.add_argument("--psd-estimation", nargs="+",
action=MultiDetOptionAction, metavar='IFO:FILE',
help="Measure PSD from the data, using given "
"average method. Choose from "
"mean, median or median-mean.")
psd_options.add_argument("--psd-segment-length", type=float, nargs="+",
action=MultiDetOptionAction, metavar='IFO:LENGTH',
help="(Required for --psd-estimation) The segment "
"length for PSD estimation (s)")
psd_options.add_argument("--psd-segment-stride", type=float, nargs="+",
action=MultiDetOptionAction, metavar='IFO:STRIDE',
help="(Required for --psd-estimation) The separation"
" between consecutive segments (s)")
psd_options.add_argument("--psd-num-segments", type=int, nargs="+",
default=None,
action=MultiDetOptionAction, metavar='IFO:NUM',
help="(Optional, used only with --psd-estimation). "
"If given PSDs will be estimated using only "
"this number of segments. If more data is "
"given than needed to make this number of "
"segments than excess data will not be used in "
"the PSD estimate. If not enough data is given "
"the code will fail.")
psd_options.add_argument("--psd-inverse-length", type=float, nargs="+",
action=MultiDetOptionAction, metavar='IFO:LENGTH',
help="(Optional) The maximum length of the impulse"
" response of the overwhitening filter (s)")
psd_options.add_argument("--invpsd-trunc-method", default=None,
choices=["hann"],
help="(Optional) What truncation method to use "
"when applying psd-inverse-length. If not "
"provided, a hard truncation will be used.")
psd_options.add_argument("--psd-output", nargs="+",
action=MultiDetOptionAction, metavar='IFO:FILE',
help="(Optional) Write PSD to specified file")
# Options for PSD variation
psd_options.add_argument("--psdvar-segment", type=float,
metavar="SECONDS", help="Length of segment "
"when calculating the PSD variability.")
psd_options.add_argument("--psdvar-short-segment", type=float,
metavar="SECONDS", help="Length of short segment "
"for outliers removal in PSD variability "
"calculation.")
psd_options.add_argument("--psdvar-long-segment", type=float,
metavar="SECONDS", help="Length of long segment "
"when calculating the PSD variability.")
psd_options.add_argument("--psdvar-psd-duration", type=float,
metavar="SECONDS", help="Duration of short "
"segments for PSD estimation.")
psd_options.add_argument("--psdvar-psd-stride", type=float,
metavar="SECONDS", help="Separation between PSD "
"estimation segments.")
psd_options.add_argument("--psdvar-low-freq", type=float, metavar="HERTZ",
help="Minimum frequency to consider in strain "
"bandpass.")
psd_options.add_argument("--psdvar-high-freq", type=float, metavar="HERTZ",
help="Maximum frequency to consider in strain "
"bandpass.")
return psd_options | [
"def",
"insert_psd_option_group_multi_ifo",
"(",
"parser",
")",
":",
"psd_options",
"=",
"parser",
".",
"add_argument_group",
"(",
"\"Options to select the method of PSD generation\"",
",",
"\"The options --psd-model, --psd-file, --asd-file, \"",
"\"and --psd-estimation are mutually ex... | https://github.com/gwastro/pycbc/blob/1e1c85534b9dba8488ce42df693230317ca63dea/pycbc/psd/__init__.py#L263-L348 | |
misterch0c/shadowbroker | e3a069bea47a2c1009697941ac214adc6f90aa8d | windows/Resources/Python/Core/Lib/logging/__init__.py | python | Filterer.removeFilter | (self, filter) | Remove the specified filter from this handler. | Remove the specified filter from this handler. | [
"Remove",
"the",
"specified",
"filter",
"from",
"this",
"handler",
"."
] | def removeFilter(self, filter):
"""
Remove the specified filter from this handler.
"""
if filter in self.filters:
self.filters.remove(filter) | [
"def",
"removeFilter",
"(",
"self",
",",
"filter",
")",
":",
"if",
"filter",
"in",
"self",
".",
"filters",
":",
"self",
".",
"filters",
".",
"remove",
"(",
"filter",
")"
] | https://github.com/misterch0c/shadowbroker/blob/e3a069bea47a2c1009697941ac214adc6f90aa8d/windows/Resources/Python/Core/Lib/logging/__init__.py#L499-L504 | ||
rballester/tntorch | d255773632a5da6de4092798d9e58854abda3082 | tntorch/tensor.py | python | Tensor.ranks_tt | (self) | return torch.tensor([first] + [c.shape[-1] for c in self.cores]) | Returns the TT ranks of this tensor.
:return: a vector of integers | Returns the TT ranks of this tensor. | [
"Returns",
"the",
"TT",
"ranks",
"of",
"this",
"tensor",
"."
] | def ranks_tt(self):
"""
Returns the TT ranks of this tensor.
:return: a vector of integers
"""
if self.batch:
d1 = 3
d2 = 2
d3 = 1
else:
d1 = 2
d2 = 1
d3 = 0
if self.cores[0].dim() == d1:
first = self.cores[0].shape[d2]
else:
first = self.cores[0].shape[d3]
return torch.tensor([first] + [c.shape[-1] for c in self.cores]) | [
"def",
"ranks_tt",
"(",
"self",
")",
":",
"if",
"self",
".",
"batch",
":",
"d1",
"=",
"3",
"d2",
"=",
"2",
"d3",
"=",
"1",
"else",
":",
"d1",
"=",
"2",
"d2",
"=",
"1",
"d3",
"=",
"0",
"if",
"self",
".",
"cores",
"[",
"0",
"]",
".",
"dim",... | https://github.com/rballester/tntorch/blob/d255773632a5da6de4092798d9e58854abda3082/tntorch/tensor.py#L642-L663 | |
stellargraph/stellargraph | 3c2c8c18ab4c5c16660f350d8e23d7dc39e738de | stellargraph/core/graph.py | python | StellarGraph.node_feature_sizes | (self, node_types=None) | return self._feature_sizes(self._nodes, node_types, "node") | Get the feature sizes for the specified node types.
.. seealso:: :meth:`node_feature_shapes`
Args:
node_types (list, optional): A list of node types. If None all current node types
will be used.
Returns:
A dictionary of node type and integer feature size. | Get the feature sizes for the specified node types. | [
"Get",
"the",
"feature",
"sizes",
"for",
"the",
"specified",
"node",
"types",
"."
] | def node_feature_sizes(self, node_types=None):
"""
Get the feature sizes for the specified node types.
.. seealso:: :meth:`node_feature_shapes`
Args:
node_types (list, optional): A list of node types. If None all current node types
will be used.
Returns:
A dictionary of node type and integer feature size.
"""
return self._feature_sizes(self._nodes, node_types, "node") | [
"def",
"node_feature_sizes",
"(",
"self",
",",
"node_types",
"=",
"None",
")",
":",
"return",
"self",
".",
"_feature_sizes",
"(",
"self",
".",
"_nodes",
",",
"node_types",
",",
"\"node\"",
")"
] | https://github.com/stellargraph/stellargraph/blob/3c2c8c18ab4c5c16660f350d8e23d7dc39e738de/stellargraph/core/graph.py#L1128-L1141 | |
facebookresearch/FashionPlus | 5b992c3c547d4b24e64d9c09052125c9d4c3812b | classification/data_dict/shape_and_feature/update_demo.py | python | InputFeature.overwrite_feature | (self, target_feature, partID, mode) | Overwrites shape and/or texture feature in partID with target_feature
Args: target_feature (numpy array), overwrite the feature with values in target_feature
partID (int), which part of feature to be overwritten
mode (str), one of "shape_only", "texture_only", "shape_and_texture",
specifying which component of the part feature to be overwritten | Overwrites shape and/or texture feature in partID with target_feature
Args: target_feature (numpy array), overwrite the feature with values in target_feature
partID (int), which part of feature to be overwritten
mode (str), one of "shape_only", "texture_only", "shape_and_texture",
specifying which component of the part feature to be overwritten | [
"Overwrites",
"shape",
"and",
"/",
"or",
"texture",
"feature",
"in",
"partID",
"with",
"target_feature",
"Args",
":",
"target_feature",
"(",
"numpy",
"array",
")",
"overwrite",
"the",
"feature",
"with",
"values",
"in",
"target_feature",
"partID",
"(",
"int",
"... | def overwrite_feature(self, target_feature, partID, mode):
'''Overwrites shape and/or texture feature in partID with target_feature
Args: target_feature (numpy array), overwrite the feature with values in target_feature
partID (int), which part of feature to be overwritten
mode (str), one of "shape_only", "texture_only", "shape_and_texture",
specifying which component of the part feature to be overwritten
'''
if mode == 'shape_only':
self.feature[partID * (self.texture_feat_num + self.shape_feat_num) + self.texture_feat_num: \
(partID + 1) * (self.texture_feat_num + self.shape_feat_num)] = target_feature
elif mode == 'texture_only':
self.feature[partID * (self.texture_feat_num + self.shape_feat_num): \
partID * (self.texture_feat_num + self.shape_feat_num) + self.texture_feat_num] = target_feature
elif mode == 'shape_and_texture':
self.feature[partID * (self.texture_feat_num + self.shape_feat_num): \
(partID + 1) * (self.texture_feat_num + self.shape_feat_num)] = target_feature
else:
raise NotImplementedError | [
"def",
"overwrite_feature",
"(",
"self",
",",
"target_feature",
",",
"partID",
",",
"mode",
")",
":",
"if",
"mode",
"==",
"'shape_only'",
":",
"self",
".",
"feature",
"[",
"partID",
"*",
"(",
"self",
".",
"texture_feat_num",
"+",
"self",
".",
"shape_feat_n... | https://github.com/facebookresearch/FashionPlus/blob/5b992c3c547d4b24e64d9c09052125c9d4c3812b/classification/data_dict/shape_and_feature/update_demo.py#L112-L129 | ||
Damian89/extended-xss-search | aacd6083c9ac8e9c4b9a0e2ef62763a55f3c7ac6 | inc/Reflection.py | python | Reflection.set_found_string | (self, paramdata, search_value, value_of_finding) | [] | def set_found_string(self, paramdata, search_value, value_of_finding):
self.found = self.found + "Found with value {} %: [{}] [{}={}]\nURL: {}".format(
int(100 * value_of_finding),
self.data["method"],
paramdata,
search_value,
self.data["url"]
)
if self.data["method"] == "POST":
self.found = "{}\n\nPayload: {}\n\n".format(self.found, self.data["body"])
if self.data["method"] == "GET":
self.found = "{}\n\nPayload: {}\n\n".format(self.found, self.data["path"]) | [
"def",
"set_found_string",
"(",
"self",
",",
"paramdata",
",",
"search_value",
",",
"value_of_finding",
")",
":",
"self",
".",
"found",
"=",
"self",
".",
"found",
"+",
"\"Found with value {} %: [{}] [{}={}]\\nURL: {}\"",
".",
"format",
"(",
"int",
"(",
"100",
"*... | https://github.com/Damian89/extended-xss-search/blob/aacd6083c9ac8e9c4b9a0e2ef62763a55f3c7ac6/inc/Reflection.py#L102-L113 | ||||
bbfamily/abu | 2de85ae57923a720dac99a545b4f856f6b87304b | abupy/ExtBu/joblib/parallel.py | python | cpu_count | () | return mp.cpu_count() | Return the number of CPUs. | Return the number of CPUs. | [
"Return",
"the",
"number",
"of",
"CPUs",
"."
] | def cpu_count():
"""Return the number of CPUs."""
if mp is None:
return 1
return mp.cpu_count() | [
"def",
"cpu_count",
"(",
")",
":",
"if",
"mp",
"is",
"None",
":",
"return",
"1",
"return",
"mp",
".",
"cpu_count",
"(",
")"
] | https://github.com/bbfamily/abu/blob/2de85ae57923a720dac99a545b4f856f6b87304b/abupy/ExtBu/joblib/parallel.py#L140-L144 | |
crits/crits_services | c7abf91f1865d913cffad4b966599da204f8ae43 | preview_service/forms.py | python | previewConfigForm.__init__ | (self, *args, **kwargs) | [] | def __init__(self, *args, **kwargs):
kwargs.setdefault('label_suffix', ':')
super(previewConfigForm, self).__init__(*args, **kwargs) | [
"def",
"__init__",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"kwargs",
".",
"setdefault",
"(",
"'label_suffix'",
",",
"':'",
")",
"super",
"(",
"previewConfigForm",
",",
"self",
")",
".",
"__init__",
"(",
"*",
"args",
",",
"*"... | https://github.com/crits/crits_services/blob/c7abf91f1865d913cffad4b966599da204f8ae43/preview_service/forms.py#L18-L20 | ||||
openshift/openshift-tools | 1188778e728a6e4781acf728123e5b356380fe6f | openshift/installer/vendored/openshift-ansible-3.11.28-1/roles/lib_openshift/library/oc_volume.py | python | Utils.openshift_installed | () | return rpmquery.count() > 0 | check if openshift is installed | check if openshift is installed | [
"check",
"if",
"openshift",
"is",
"installed"
] | def openshift_installed():
''' check if openshift is installed '''
import rpm
transaction_set = rpm.TransactionSet()
rpmquery = transaction_set.dbMatch("name", "atomic-openshift")
return rpmquery.count() > 0 | [
"def",
"openshift_installed",
"(",
")",
":",
"import",
"rpm",
"transaction_set",
"=",
"rpm",
".",
"TransactionSet",
"(",
")",
"rpmquery",
"=",
"transaction_set",
".",
"dbMatch",
"(",
"\"name\"",
",",
"\"atomic-openshift\"",
")",
"return",
"rpmquery",
".",
"count... | https://github.com/openshift/openshift-tools/blob/1188778e728a6e4781acf728123e5b356380fe6f/openshift/installer/vendored/openshift-ansible-3.11.28-1/roles/lib_openshift/library/oc_volume.py#L1361-L1368 | |
pantsbuild/pex | 473c6ac732ed4bc338b4b20a9ec930d1d722c9b4 | pex/interpreter.py | python | PythonIdentity._find_interpreter_name | (cls, python_tag) | [] | def _find_interpreter_name(cls, python_tag):
for abbr, interpreter in cls.ABBR_TO_INTERPRETER_NAME.items():
if python_tag.startswith(abbr):
return interpreter
raise ValueError("Unknown interpreter: {}".format(python_tag)) | [
"def",
"_find_interpreter_name",
"(",
"cls",
",",
"python_tag",
")",
":",
"for",
"abbr",
",",
"interpreter",
"in",
"cls",
".",
"ABBR_TO_INTERPRETER_NAME",
".",
"items",
"(",
")",
":",
"if",
"python_tag",
".",
"startswith",
"(",
"abbr",
")",
":",
"return",
... | https://github.com/pantsbuild/pex/blob/473c6ac732ed4bc338b4b20a9ec930d1d722c9b4/pex/interpreter.py#L153-L157 | ||||
runawayhorse001/LearningApacheSpark | 67f3879dce17553195f094f5728b94a01badcf24 | pyspark/taskcontext.py | python | _load_from_socket | (port, auth_secret) | return res | Load data from a given socket, this is a blocking method thus only return when the socket
connection has been closed. | Load data from a given socket, this is a blocking method thus only return when the socket
connection has been closed. | [
"Load",
"data",
"from",
"a",
"given",
"socket",
"this",
"is",
"a",
"blocking",
"method",
"thus",
"only",
"return",
"when",
"the",
"socket",
"connection",
"has",
"been",
"closed",
"."
] | def _load_from_socket(port, auth_secret):
"""
Load data from a given socket, this is a blocking method thus only return when the socket
connection has been closed.
"""
(sockfile, sock) = local_connect_and_auth(port, auth_secret)
# The barrier() call may block forever, so no timeout
sock.settimeout(None)
# Make a barrier() function call.
write_int(BARRIER_FUNCTION, sockfile)
sockfile.flush()
# Collect result.
res = UTF8Deserializer().loads(sockfile)
# Release resources.
sockfile.close()
sock.close()
return res | [
"def",
"_load_from_socket",
"(",
"port",
",",
"auth_secret",
")",
":",
"(",
"sockfile",
",",
"sock",
")",
"=",
"local_connect_and_auth",
"(",
"port",
",",
"auth_secret",
")",
"# The barrier() call may block forever, so no timeout",
"sock",
".",
"settimeout",
"(",
"N... | https://github.com/runawayhorse001/LearningApacheSpark/blob/67f3879dce17553195f094f5728b94a01badcf24/pyspark/taskcontext.py#L107-L126 | |
markovmodel/PyEMMA | e9d08d715dde17ceaa96480a9ab55d5e87d3a4b3 | pyemma/_version.py | python | versions_from_parentdir | (parentdir_prefix, root, verbose) | Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory | Try to determine the version from the parent directory name. | [
"Try",
"to",
"determine",
"the",
"version",
"from",
"the",
"parent",
"directory",
"name",
"."
] | def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None, "date": None}
else:
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print("Tried directories %s but none started with prefix %s" %
(str(rootdirs), parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix") | [
"def",
"versions_from_parentdir",
"(",
"parentdir_prefix",
",",
"root",
",",
"verbose",
")",
":",
"rootdirs",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"3",
")",
":",
"dirname",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"root",
")",
"if",
"d... | https://github.com/markovmodel/PyEMMA/blob/e9d08d715dde17ceaa96480a9ab55d5e87d3a4b3/pyemma/_version.py#L107-L129 | ||
kindredresearch/SenseAct | e7acbeb7918d0069ea82f0bab09ecb99807fdc9b | senseact/devices/sim_double_pendulum/gym_simulator.py | python | GymSimulator.update_from_gym | (self, robot_buffer_list, gym_buffer, obs_index) | Reads most recent observations from gym buffer and updates the rl_buffer.
Args:
robot_buffer_list: buffer used to update the observations for a sensor
at a given timestep sensor_dt.
gym_buffer: buffer used to publish the observations at a given timestep
gym_dt.
obs_index: Observation index (or list of indices) associated with the
particular sensor. | Reads most recent observations from gym buffer and updates the rl_buffer. | [
"Reads",
"most",
"recent",
"observations",
"from",
"gym",
"buffer",
"and",
"updates",
"the",
"rl_buffer",
"."
] | def update_from_gym(self, robot_buffer_list, gym_buffer, obs_index):
"""Reads most recent observations from gym buffer and updates the rl_buffer.
Args:
robot_buffer_list: buffer used to update the observations for a sensor
at a given timestep sensor_dt.
gym_buffer: buffer used to publish the observations at a given timestep
gym_dt.
obs_index: Observation index (or list of indices) associated with the
particular sensor.
"""
#read value from gym buffer
value, _, _ = gym_buffer.read()
value = np.array(value[0])
#write value to placeholder_buffer used to update observations use by rl agent
robot_buffer_list.write(value[obs_index]) | [
"def",
"update_from_gym",
"(",
"self",
",",
"robot_buffer_list",
",",
"gym_buffer",
",",
"obs_index",
")",
":",
"#read value from gym buffer",
"value",
",",
"_",
",",
"_",
"=",
"gym_buffer",
".",
"read",
"(",
")",
"value",
"=",
"np",
".",
"array",
"(",
"va... | https://github.com/kindredresearch/SenseAct/blob/e7acbeb7918d0069ea82f0bab09ecb99807fdc9b/senseact/devices/sim_double_pendulum/gym_simulator.py#L56-L71 | ||
memray/seq2seq-keyphrase-pytorch | e2cf5b031b98f5e5b2471294b911ce18a6ee1392 | pykp/data/remove_duplicates.py | python | main | () | 1. clean text, remove stopwords/punctuations
2. Treat as overlaps if title & text match>=70%
3. Build a title hashset to remove training duplicates | 1. clean text, remove stopwords/punctuations
2. Treat as overlaps if title & text match>=70%
3. Build a title hashset to remove training duplicates | [
"1",
".",
"clean",
"text",
"remove",
"stopwords",
"/",
"punctuations",
"2",
".",
"Treat",
"as",
"overlaps",
"if",
"title",
"&",
"text",
"match",
">",
"=",
"70%",
"3",
".",
"Build",
"a",
"title",
"hashset",
"to",
"remove",
"training",
"duplicates"
] | def main():
opt = init_args()
# specify for which dataset (for valid/test) we need to remove duplicate data samples from training data
if opt.datatype == 'paper':
total_num = 530631
train_dataset_name = 'kp20k_training'
test_dataset_names = ['kp20k', 'inspec', 'nus', 'semeval', 'krapivin']
train_id_field, train_title_field, train_text_field, train_keyword_field = None, 'title', 'abstract', 'keyword'
test_id_field, test_title_field, test_text_field, test_keyword_field = None, 'title', 'abstract', 'keyword'
trg_delimiter = ';'
elif opt.datatype == 'qa':
total_num = 298965
train_dataset_name = 'stackexchange_training'
test_dataset_names = ['stackexchange']
train_id_field, train_title_field, train_text_field, train_keyword_field = None, 'title', 'question', 'tags'
test_id_field, test_title_field, test_text_field, test_keyword_field = None, 'title', 'question', 'tags'
trg_delimiter = ';'
elif opt.datatype == 'mag':
total_num = 5108427
train_dataset_name = 'mag_training'
test_dataset_names = ['kp20k', 'inspec', 'nus', 'semeval', 'krapivin']
train_id_field, train_title_field, train_text_field, train_keyword_field = 'id', 'title', 'abstract', 'keywords'
test_id_field, test_title_field, test_text_field, test_keyword_field = None, 'title', 'abstract', 'keyword'
trg_delimiter = None
print("Loading training data...")
train_examples_iter = example_iterator_from_json(opt.train_file, train_dataset_name,
train_id_field, train_title_field, train_text_field,
train_keyword_field, trg_delimiter)
testsets_dict = {}
output_dir = opt.test_dataset_dir + '/%s_output/' % train_dataset_name
if not os.path.exists(output_dir):
os.makedirs(output_dir)
print("Loading validation/test data...")
for test_dataset_name in test_dataset_names:
for type in ['validation', 'testing']:
test_dataset_subname = '%s_%s' % (test_dataset_name, type)
source_test_file = os.path.join(opt.test_dataset_dir, test_dataset_name, test_dataset_subname+'.json')
test_examples = list(example_iterator_from_json(source_test_file, test_dataset_subname,
test_id_field, test_title_field,
test_text_field, test_keyword_field,
trg_delimiter))
testset = {}
for test_num, test_example in enumerate(test_examples):
test_id = test_example['id']
title_tokens = text2tokens(test_example['title'])
text_tokens = text2tokens(test_example['abstract'])
# concatenate title and put it into hashtable
title_set = set(title_tokens)
text_set = set(text_tokens)
content_set = title_set | text_set
test_example['title_set'] = title_set
test_example['content_set'] = content_set
test_example['dup_train_ids'] = []
test_example['dup_train_titles'] = []
testset[test_id] = test_example
testsets_dict[test_dataset_subname] = testset
"""
1. clean text, remove stopwords/punctuations
2. Treat as overlaps if title & text match>=70%
3. Build a title hashset to remove training duplicates
"""
print("Cleaning duplicate data...")
# train_dup_filtered_file = open('%s/%s_nodup.json' % (output_dir, train_dataset_name), 'w')
# train_dup_log_file = open('%s/%s__dup.txt' % (output_dir, train_dataset_name), 'w')
global file_locks_writers
file_locks_writers = {}
for test_dataset_name in test_dataset_names:
for type in ['validation', 'testing']:
test_dataset_subname = '%s_%s' % (test_dataset_name, type)
file_locks_writers[test_dataset_subname] = (threading.Lock(), open('%s/%s__dup__%s.log'
% (output_dir, test_dataset_subname, train_dataset_name), 'w'))
global pbar, output_cache
output_cache = []
file_locks_writers['train_output'] = (threading.Lock(), open('%s/%s_nodup.json' % (output_dir, train_dataset_name), 'w'))
file_locks_writers['train_log'] = (threading.Lock(), open('%s/%s__dup.log' % (output_dir, train_dataset_name), 'w'))
title_pool = {}
pbar = tqdm(total=total_num)
_worker_loop(train_examples_iter, testsets_dict, title_pool)
global valid_num
print('Processed valid data %d/%d' % (valid_num, total_num)) | [
"def",
"main",
"(",
")",
":",
"opt",
"=",
"init_args",
"(",
")",
"# specify for which dataset (for valid/test) we need to remove duplicate data samples from training data",
"if",
"opt",
".",
"datatype",
"==",
"'paper'",
":",
"total_num",
"=",
"530631",
"train_dataset_name",... | https://github.com/memray/seq2seq-keyphrase-pytorch/blob/e2cf5b031b98f5e5b2471294b911ce18a6ee1392/pykp/data/remove_duplicates.py#L175-L269 | ||
marinho/geraldo | 868ebdce67176d9b6205cddc92476f642c783fff | site/newsite/django_1_0/django/utils/_decimal.py | python | Context.create_decimal | (self, num='0') | return d._fix(self) | Creates a new Decimal instance but using self as context. | Creates a new Decimal instance but using self as context. | [
"Creates",
"a",
"new",
"Decimal",
"instance",
"but",
"using",
"self",
"as",
"context",
"."
] | def create_decimal(self, num='0'):
"""Creates a new Decimal instance but using self as context."""
d = Decimal(num, context=self)
return d._fix(self) | [
"def",
"create_decimal",
"(",
"self",
",",
"num",
"=",
"'0'",
")",
":",
"d",
"=",
"Decimal",
"(",
"num",
",",
"context",
"=",
"self",
")",
"return",
"d",
".",
"_fix",
"(",
"self",
")"
] | https://github.com/marinho/geraldo/blob/868ebdce67176d9b6205cddc92476f642c783fff/site/newsite/django_1_0/django/utils/_decimal.py#L2340-L2343 | |
openshift/openshift-tools | 1188778e728a6e4781acf728123e5b356380fe6f | openshift/installer/vendored/openshift-ansible-3.11.28-1/roles/lib_vendored_deps/library/oc_clusterrole.py | python | Utils.check_def_equal | (user_def, result_def, skip_keys=None, debug=False) | return True | Given a user defined definition, compare it with the results given back by our query. | Given a user defined definition, compare it with the results given back by our query. | [
"Given",
"a",
"user",
"defined",
"definition",
"compare",
"it",
"with",
"the",
"results",
"given",
"back",
"by",
"our",
"query",
"."
] | def check_def_equal(user_def, result_def, skip_keys=None, debug=False):
''' Given a user defined definition, compare it with the results given back by our query. '''
# Currently these values are autogenerated and we do not need to check them
skip = ['metadata', 'status']
if skip_keys:
skip.extend(skip_keys)
for key, value in result_def.items():
if key in skip:
continue
# Both are lists
if isinstance(value, list):
if key not in user_def:
if debug:
print('User data does not have key [%s]' % key)
print('User data: %s' % user_def)
return False
if not isinstance(user_def[key], list):
if debug:
print('user_def[key] is not a list key=[%s] user_def[key]=%s' % (key, user_def[key]))
return False
if len(user_def[key]) != len(value):
if debug:
print("List lengths are not equal.")
print("key=[%s]: user_def[%s] != value[%s]" % (key, len(user_def[key]), len(value)))
print("user_def: %s" % user_def[key])
print("value: %s" % value)
return False
for values in zip(user_def[key], value):
if isinstance(values[0], dict) and isinstance(values[1], dict):
if debug:
print('sending list - list')
print(type(values[0]))
print(type(values[1]))
result = Utils.check_def_equal(values[0], values[1], skip_keys=skip_keys, debug=debug)
if not result:
print('list compare returned false')
return False
elif value != user_def[key]:
if debug:
print('value should be identical')
print(user_def[key])
print(value)
return False
# recurse on a dictionary
elif isinstance(value, dict):
if key not in user_def:
if debug:
print("user_def does not have key [%s]" % key)
return False
if not isinstance(user_def[key], dict):
if debug:
print("dict returned false: not instance of dict")
return False
# before passing ensure keys match
api_values = set(value.keys()) - set(skip)
user_values = set(user_def[key].keys()) - set(skip)
if api_values != user_values:
if debug:
print("keys are not equal in dict")
print(user_values)
print(api_values)
return False
result = Utils.check_def_equal(user_def[key], value, skip_keys=skip_keys, debug=debug)
if not result:
if debug:
print("dict returned false")
print(result)
return False
# Verify each key, value pair is the same
else:
if key not in user_def or value != user_def[key]:
if debug:
print("value not equal; user_def does not have key")
print(key)
print(value)
if key in user_def:
print(user_def[key])
return False
if debug:
print('returning true')
return True | [
"def",
"check_def_equal",
"(",
"user_def",
",",
"result_def",
",",
"skip_keys",
"=",
"None",
",",
"debug",
"=",
"False",
")",
":",
"# Currently these values are autogenerated and we do not need to check them",
"skip",
"=",
"[",
"'metadata'",
",",
"'status'",
"]",
"if"... | https://github.com/openshift/openshift-tools/blob/1188778e728a6e4781acf728123e5b356380fe6f/openshift/installer/vendored/openshift-ansible-3.11.28-1/roles/lib_vendored_deps/library/oc_clusterrole.py#L1322-L1414 | |
maqp/tfc | 4bb13da1f19671e1e723db7e8a21be58847209af | src/receiver/messages.py | python | process_message_packet | (ts: 'datetime', # Timestamp of received message packet
assembly_packet_ct: bytes, # Encrypted assembly packet
window_list: 'WindowList', # WindowList object
packet_list: 'PacketList', # PacketList object
contact_list: 'ContactList', # ContactList object
key_list: 'KeyList', # KeyList object
group_list: 'GroupList', # GroupList object
settings: 'Settings', # Settings object
file_keys: Dict[bytes, bytes], # Dictionary of file decryption keys
message_log: 'MessageLog', # MessageLog object
) | Process received message packet. | Process received message packet. | [
"Process",
"received",
"message",
"packet",
"."
] | def process_message_packet(ts: 'datetime', # Timestamp of received message packet
assembly_packet_ct: bytes, # Encrypted assembly packet
window_list: 'WindowList', # WindowList object
packet_list: 'PacketList', # PacketList object
contact_list: 'ContactList', # ContactList object
key_list: 'KeyList', # KeyList object
group_list: 'GroupList', # GroupList object
settings: 'Settings', # Settings object
file_keys: Dict[bytes, bytes], # Dictionary of file decryption keys
message_log: 'MessageLog', # MessageLog object
) -> None:
"""Process received message packet."""
command_window = window_list.get_command_window()
onion_pub_key, origin, assembly_packet_ct = separate_headers(
assembly_packet_ct, [ONION_SERVICE_PUBLIC_KEY_LENGTH, ORIGIN_HEADER_LENGTH])
if onion_pub_key == LOCAL_PUBKEY:
raise SoftError("Warning! Received packet masqueraded as a command.", window=command_window)
if origin not in [ORIGIN_USER_HEADER, ORIGIN_CONTACT_HEADER]:
raise SoftError("Error: Received packet had an invalid origin-header.", window=command_window)
assembly_packet = decrypt_assembly_packet(assembly_packet_ct, onion_pub_key, origin,
window_list, contact_list, key_list)
p_type = (FILE if assembly_packet[:ASSEMBLY_PACKET_HEADER_LENGTH].isupper() else MESSAGE)
packet = packet_list.get_packet(onion_pub_key, origin, p_type)
logging = contact_list.get_contact_by_pub_key(onion_pub_key).log_messages
try:
packet.add_packet(assembly_packet)
except SoftError:
log_masking_packets(onion_pub_key, origin, logging, settings, packet, message_log)
raise
log_masking_packets(onion_pub_key, origin, logging, settings, packet, message_log)
if packet.is_complete:
process_complete_message_packet(ts, onion_pub_key, p_type, origin, logging, packet, window_list,
contact_list, group_list, settings, message_log, file_keys) | [
"def",
"process_message_packet",
"(",
"ts",
":",
"'datetime'",
",",
"# Timestamp of received message packet",
"assembly_packet_ct",
":",
"bytes",
",",
"# Encrypted assembly packet",
"window_list",
":",
"'WindowList'",
",",
"# WindowList object",
"packet_list",
":",
"'PacketLi... | https://github.com/maqp/tfc/blob/4bb13da1f19671e1e723db7e8a21be58847209af/src/receiver/messages.py#L72-L111 | ||
PokemonGoF/PokemonGo-Bot-Desktop | 4bfa94f0183406c6a86f93645eff7abd3ad4ced8 | build/pywin/Lib/xml/sax/handler.py | python | ContentHandler.setDocumentLocator | (self, locator) | Called by the parser to give the application a locator for
locating the origin of document events.
SAX parsers are strongly encouraged (though not absolutely
required) to supply a locator: if it does so, it must supply
the locator to the application by invoking this method before
invoking any of the other methods in the DocumentHandler
interface.
The locator allows the application to determine the end
position of any document-related event, even if the parser is
not reporting an error. Typically, the application will use
this information for reporting its own errors (such as
character content that does not match an application's
business rules). The information returned by the locator is
probably not sufficient for use with a search engine.
Note that the locator will return correct information only
during the invocation of the events in this interface. The
application should not attempt to use it at any other time. | Called by the parser to give the application a locator for
locating the origin of document events. | [
"Called",
"by",
"the",
"parser",
"to",
"give",
"the",
"application",
"a",
"locator",
"for",
"locating",
"the",
"origin",
"of",
"document",
"events",
"."
] | def setDocumentLocator(self, locator):
"""Called by the parser to give the application a locator for
locating the origin of document events.
SAX parsers are strongly encouraged (though not absolutely
required) to supply a locator: if it does so, it must supply
the locator to the application by invoking this method before
invoking any of the other methods in the DocumentHandler
interface.
The locator allows the application to determine the end
position of any document-related event, even if the parser is
not reporting an error. Typically, the application will use
this information for reporting its own errors (such as
character content that does not match an application's
business rules). The information returned by the locator is
probably not sufficient for use with a search engine.
Note that the locator will return correct information only
during the invocation of the events in this interface. The
application should not attempt to use it at any other time."""
self._locator = locator | [
"def",
"setDocumentLocator",
"(",
"self",
",",
"locator",
")",
":",
"self",
".",
"_locator",
"=",
"locator"
] | https://github.com/PokemonGoF/PokemonGo-Bot-Desktop/blob/4bfa94f0183406c6a86f93645eff7abd3ad4ced8/build/pywin/Lib/xml/sax/handler.py#L57-L78 | ||
exaile/exaile | a7b58996c5c15b3aa7b9975ac13ee8f784ef4689 | xlgui/widgets/playback.py | python | MoveMarkerMenuItem.on_parent_focus_out_event | (self, widget, event) | Cancels movement of markers | Cancels movement of markers | [
"Cancels",
"movement",
"of",
"markers"
] | def on_parent_focus_out_event(self, widget, event):
"""
Cancels movement of markers
"""
self.move_cancel() | [
"def",
"on_parent_focus_out_event",
"(",
"self",
",",
"widget",
",",
"event",
")",
":",
"self",
".",
"move_cancel",
"(",
")"
] | https://github.com/exaile/exaile/blob/a7b58996c5c15b3aa7b9975ac13ee8f784ef4689/xlgui/widgets/playback.py#L1315-L1319 | ||
ring04h/wyportmap | c4201e2313504e780a7f25238eba2a2d3223e739 | sqlalchemy/orm/collections.py | python | collection.iterator | (fn) | return fn | Tag the method as the collection remover.
The iterator method is called with no arguments. It is expected to
return an iterator over all collection members::
@collection.iterator
def __iter__(self): ... | Tag the method as the collection remover. | [
"Tag",
"the",
"method",
"as",
"the",
"collection",
"remover",
"."
] | def iterator(fn):
"""Tag the method as the collection remover.
The iterator method is called with no arguments. It is expected to
return an iterator over all collection members::
@collection.iterator
def __iter__(self): ...
"""
fn._sa_instrument_role = 'iterator'
return fn | [
"def",
"iterator",
"(",
"fn",
")",
":",
"fn",
".",
"_sa_instrument_role",
"=",
"'iterator'",
"return",
"fn"
] | https://github.com/ring04h/wyportmap/blob/c4201e2313504e780a7f25238eba2a2d3223e739/sqlalchemy/orm/collections.py#L388-L399 | |
geekan/scrapy-examples | edb1cb116bd6def65a6ef01f953b58eb43e54305 | sinanews/sinanews/pipelines.py | python | JsonWithEncodingPipeline.__init__ | (self) | [] | def __init__(self):
self.file = codecs.open('data_utf8.json', 'w', encoding='utf-8') | [
"def",
"__init__",
"(",
"self",
")",
":",
"self",
".",
"file",
"=",
"codecs",
".",
"open",
"(",
"'data_utf8.json'",
",",
"'w'",
",",
"encoding",
"=",
"'utf-8'",
")"
] | https://github.com/geekan/scrapy-examples/blob/edb1cb116bd6def65a6ef01f953b58eb43e54305/sinanews/sinanews/pipelines.py#L19-L20 | ||||
IronLanguages/ironpython2 | 51fdedeeda15727717fb8268a805f71b06c0b9f1 | Src/StdLib/Lib/site-packages/pythonwin/pywin/framework/scriptutils.py | python | LocatePythonFile | ( fileName, bBrowseIfDir = 1 ) | return win32ui.FullPath(fileName) | Given a file name, return a fully qualified file name, or None | Given a file name, return a fully qualified file name, or None | [
"Given",
"a",
"file",
"name",
"return",
"a",
"fully",
"qualified",
"file",
"name",
"or",
"None"
] | def LocatePythonFile( fileName, bBrowseIfDir = 1 ):
" Given a file name, return a fully qualified file name, or None "
# first look for the exact file as specified
if not os.path.isfile(fileName):
# Go looking!
baseName = fileName
for path in sys.path:
fileName = os.path.abspath(os.path.join(path, baseName))
if os.path.isdir(fileName):
if bBrowseIfDir:
d=win32ui.CreateFileDialog(1, "*.py", None, 0, "Python Files (*.py)|*.py|All files|*.*")
d.SetOFNInitialDir(fileName)
rc=d.DoModal()
if rc==win32con.IDOK:
fileName = d.GetPathName()
break
else:
return None
else:
fileName = fileName + ".py"
if os.path.isfile(fileName):
break # Found it!
else: # for not broken out of
return None
return win32ui.FullPath(fileName) | [
"def",
"LocatePythonFile",
"(",
"fileName",
",",
"bBrowseIfDir",
"=",
"1",
")",
":",
"# first look for the exact file as specified",
"if",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"fileName",
")",
":",
"# Go looking!",
"baseName",
"=",
"fileName",
"for",
"p... | https://github.com/IronLanguages/ironpython2/blob/51fdedeeda15727717fb8268a805f71b06c0b9f1/Src/StdLib/Lib/site-packages/pythonwin/pywin/framework/scriptutils.py#L590-L615 | |
juglab/n2v | a146831f28c4c6e63b983e505c7fe7071d98e048 | n2v/internals/N2V_DataGenerator.py | python | N2V_DataGenerator.load_imgs | (self, files, dims='YX') | return imgs | Helper to read a list of files. The images are not required to have same size,
but have to be of same dimensionality.
Parameters
----------
files : list(String)
List of paths to tiff-files.
dims : String, optional(default='YX')
Dimensions of the images to read. Known dimensions are: 'TZYXC'
Returns
-------
images : list(array(float))
A list of the read tif-files. The images have dimensionality 'SZYXC' or 'SYXC' | Helper to read a list of files. The images are not required to have same size,
but have to be of same dimensionality. | [
"Helper",
"to",
"read",
"a",
"list",
"of",
"files",
".",
"The",
"images",
"are",
"not",
"required",
"to",
"have",
"same",
"size",
"but",
"have",
"to",
"be",
"of",
"same",
"dimensionality",
"."
] | def load_imgs(self, files, dims='YX'):
"""
Helper to read a list of files. The images are not required to have same size,
but have to be of same dimensionality.
Parameters
----------
files : list(String)
List of paths to tiff-files.
dims : String, optional(default='YX')
Dimensions of the images to read. Known dimensions are: 'TZYXC'
Returns
-------
images : list(array(float))
A list of the read tif-files. The images have dimensionality 'SZYXC' or 'SYXC'
"""
assert 'Y' in dims and 'X' in dims, "'dims' has to contain 'X' and 'Y'."
tmp_dims = dims
for b in ['X', 'Y', 'Z', 'T', 'C']:
assert tmp_dims.count(b) <= 1, "'dims' has to contain {} at most once.".format(b)
tmp_dims = tmp_dims.replace(b, '')
assert len(tmp_dims) == 0, "Unknown dimensions in 'dims'."
if 'Z' in dims:
net_axes = 'ZYXC'
else:
net_axes = 'YXC'
move_axis_from = ()
move_axis_to = ()
for d, b in enumerate(dims):
move_axis_from += tuple([d])
if b == 'T':
move_axis_to += tuple([0])
elif b == 'C':
move_axis_to += tuple([-1])
elif b in 'XYZ':
if 'T' in dims:
move_axis_to += tuple([net_axes.index(b)+1])
else:
move_axis_to += tuple([net_axes.index(b)])
imgs = []
for f in files:
if f.endswith('.tif') or f.endswith('.tiff'):
imread = tifffile.imread
elif f.endswith('.png'):
imread = image.imread
elif f.endswith('.jpg') or f.endswith('.jpeg') or f.endswith('.JPEG') or f.endswith('.JPG'):
_raise(Exception("JPEG is not supported, because it is not loss-less and breaks the pixel-wise independence assumption."))
else:
_raise("Filetype '{}' is not supported.".format(f))
img = imread(f).astype(np.float32)
assert len(img.shape) == len(dims), "Number of image dimensions doesn't match 'dims'."
img = np.moveaxis(img, move_axis_from, move_axis_to)
if not ('T' in dims):
img = img[np.newaxis]
if not ('C' in dims):
img = img[..., np.newaxis]
imgs.append(img)
return imgs | [
"def",
"load_imgs",
"(",
"self",
",",
"files",
",",
"dims",
"=",
"'YX'",
")",
":",
"assert",
"'Y'",
"in",
"dims",
"and",
"'X'",
"in",
"dims",
",",
"\"'dims' has to contain 'X' and 'Y'.\"",
"tmp_dims",
"=",
"dims",
"for",
"b",
"in",
"[",
"'X'",
",",
"'Y'"... | https://github.com/juglab/n2v/blob/a146831f28c4c6e63b983e505c7fe7071d98e048/n2v/internals/N2V_DataGenerator.py#L13-L81 | |
paulgb/simplediff | 545e377932d72c14cdfdd9663cb3a6a5ba4ae50b | python/simplediff/__init__.py | python | diff | (old, new) | Find the differences between two lists. Returns a list of pairs, where the
first value is in ['+','-','='] and represents an insertion, deletion, or
no change for that list. The second value of the pair is the list
of elements.
Params:
old the old list of immutable, comparable values (ie. a list
of strings)
new the new list of immutable, comparable values
Returns:
A list of pairs, with the first part of the pair being one of three
strings ('-', '+', '=') and the second part being a list of values from
the original old and/or new lists. The first part of the pair
corresponds to whether the list of values is a deletion, insertion, or
unchanged, respectively.
Examples:
>>> diff([1,2,3,4],[1,3,4])
[('=', [1]), ('-', [2]), ('=', [3, 4])]
>>> diff([1,2,3,4],[2,3,4,1])
[('-', [1]), ('=', [2, 3, 4]), ('+', [1])]
>>> diff('The quick brown fox jumps over the lazy dog'.split(),
... 'The slow blue cheese drips over the lazy carrot'.split())
... # doctest: +NORMALIZE_WHITESPACE
[('=', ['The']),
('-', ['quick', 'brown', 'fox', 'jumps']),
('+', ['slow', 'blue', 'cheese', 'drips']),
('=', ['over', 'the', 'lazy']),
('-', ['dog']),
('+', ['carrot'])] | Find the differences between two lists. Returns a list of pairs, where the
first value is in ['+','-','='] and represents an insertion, deletion, or
no change for that list. The second value of the pair is the list
of elements. | [
"Find",
"the",
"differences",
"between",
"two",
"lists",
".",
"Returns",
"a",
"list",
"of",
"pairs",
"where",
"the",
"first",
"value",
"is",
"in",
"[",
"+",
"-",
"=",
"]",
"and",
"represents",
"an",
"insertion",
"deletion",
"or",
"no",
"change",
"for",
... | def diff(old, new):
'''
Find the differences between two lists. Returns a list of pairs, where the
first value is in ['+','-','='] and represents an insertion, deletion, or
no change for that list. The second value of the pair is the list
of elements.
Params:
old the old list of immutable, comparable values (ie. a list
of strings)
new the new list of immutable, comparable values
Returns:
A list of pairs, with the first part of the pair being one of three
strings ('-', '+', '=') and the second part being a list of values from
the original old and/or new lists. The first part of the pair
corresponds to whether the list of values is a deletion, insertion, or
unchanged, respectively.
Examples:
>>> diff([1,2,3,4],[1,3,4])
[('=', [1]), ('-', [2]), ('=', [3, 4])]
>>> diff([1,2,3,4],[2,3,4,1])
[('-', [1]), ('=', [2, 3, 4]), ('+', [1])]
>>> diff('The quick brown fox jumps over the lazy dog'.split(),
... 'The slow blue cheese drips over the lazy carrot'.split())
... # doctest: +NORMALIZE_WHITESPACE
[('=', ['The']),
('-', ['quick', 'brown', 'fox', 'jumps']),
('+', ['slow', 'blue', 'cheese', 'drips']),
('=', ['over', 'the', 'lazy']),
('-', ['dog']),
('+', ['carrot'])]
'''
# Create a map from old values to their indices
old_index_map = dict()
for i, val in enumerate(old):
old_index_map.setdefault(val,list()).append(i)
# Find the largest substring common to old and new.
# We use a dynamic programming approach here.
#
# We iterate over each value in the `new` list, calling the
# index `inew`. At each iteration, `overlap[i]` is the
# length of the largest suffix of `old[:i]` equal to a suffix
# of `new[:inew]` (or unset when `old[i]` != `new[inew]`).
#
# At each stage of iteration, the new `overlap` (called
# `_overlap` until the original `overlap` is no longer needed)
# is built from the old one.
#
# If the length of overlap exceeds the largest substring
# seen so far (`sub_length`), we update the largest substring
# to the overlapping strings.
overlap = dict()
# `sub_start_old` is the index of the beginning of the largest overlapping
# substring in the old list. `sub_start_new` is the index of the beginning
# of the same substring in the new list. `sub_length` is the length that
# overlaps in both.
# These track the largest overlapping substring seen so far, so naturally
# we start with a 0-length substring.
sub_start_old = 0
sub_start_new = 0
sub_length = 0
for inew, val in enumerate(new):
_overlap = dict()
for iold in old_index_map.get(val,list()):
# now we are considering all values of iold such that
# `old[iold] == new[inew]`.
_overlap[iold] = (iold and overlap.get(iold - 1, 0)) + 1
if(_overlap[iold] > sub_length):
# this is the largest substring seen so far, so store its
# indices
sub_length = _overlap[iold]
sub_start_old = iold - sub_length + 1
sub_start_new = inew - sub_length + 1
overlap = _overlap
if sub_length == 0:
# If no common substring is found, we return an insert and delete...
return (old and [('-', old)] or []) + (new and [('+', new)] or [])
else:
# ...otherwise, the common substring is unchanged and we recursively
# diff the text before and after that substring
return diff(old[ : sub_start_old], new[ : sub_start_new]) + \
[('=', new[sub_start_new : sub_start_new + sub_length])] + \
diff(old[sub_start_old + sub_length : ],
new[sub_start_new + sub_length : ]) | [
"def",
"diff",
"(",
"old",
",",
"new",
")",
":",
"# Create a map from old values to their indices",
"old_index_map",
"=",
"dict",
"(",
")",
"for",
"i",
",",
"val",
"in",
"enumerate",
"(",
"old",
")",
":",
"old_index_map",
".",
"setdefault",
"(",
"val",
",",
... | https://github.com/paulgb/simplediff/blob/545e377932d72c14cdfdd9663cb3a6a5ba4ae50b/python/simplediff/__init__.py#L17-L110 | ||
Abjad/abjad | d0646dfbe83db3dc5ab268f76a0950712b87b7fd | abjad/rhythmtrees.py | python | RhythmTreeMixin.preprolated_duration | (self) | return self._duration | The node's preprolated_duration in pulses:
>>> node = abjad.rhythmtrees.RhythmTreeLeaf(
... preprolated_duration=1)
>>> node.preprolated_duration
Duration(1, 1)
>>> node.preprolated_duration = 2
>>> node.preprolated_duration
Duration(2, 1)
Returns int. | The node's preprolated_duration in pulses: | [
"The",
"node",
"s",
"preprolated_duration",
"in",
"pulses",
":"
] | def preprolated_duration(self):
"""
The node's preprolated_duration in pulses:
>>> node = abjad.rhythmtrees.RhythmTreeLeaf(
... preprolated_duration=1)
>>> node.preprolated_duration
Duration(1, 1)
>>> node.preprolated_duration = 2
>>> node.preprolated_duration
Duration(2, 1)
Returns int.
"""
return self._duration | [
"def",
"preprolated_duration",
"(",
"self",
")",
":",
"return",
"self",
".",
"_duration"
] | https://github.com/Abjad/abjad/blob/d0646dfbe83db3dc5ab268f76a0950712b87b7fd/abjad/rhythmtrees.py#L178-L193 | |
stoq/stoq | c26991644d1affcf96bc2e0a0434796cabdf8448 | plugins/ecf/ecfui.py | python | ECFUI._get_client_document | (self, sale) | Returns a Settable with two attributes: document, a string with
the client cpf or cnpj and document_type, being one of
(FiscalSaleHistory.TYPE_CPF, FiscalSaleHistory.TYPE_CNPJ ) | Returns a Settable with two attributes: document, a string with
the client cpf or cnpj and document_type, being one of
(FiscalSaleHistory.TYPE_CPF, FiscalSaleHistory.TYPE_CNPJ ) | [
"Returns",
"a",
"Settable",
"with",
"two",
"attributes",
":",
"document",
"a",
"string",
"with",
"the",
"client",
"cpf",
"or",
"cnpj",
"and",
"document_type",
"being",
"one",
"of",
"(",
"FiscalSaleHistory",
".",
"TYPE_CPF",
"FiscalSaleHistory",
".",
"TYPE_CNPJ",... | def _get_client_document(self, sale):
"""Returns a Settable with two attributes: document, a string with
the client cpf or cnpj and document_type, being one of
(FiscalSaleHistory.TYPE_CPF, FiscalSaleHistory.TYPE_CNPJ )
"""
client_role = sale.get_client_role()
if isinstance(client_role, Individual):
document_type = FiscalSaleHistory.TYPE_CPF
document = client_role.cpf
elif isinstance(client_role, Company):
document_type = FiscalSaleHistory.TYPE_CNPJ
document = client_role.cnpj
else:
return
if document:
return Settable(document_type=document_type,
document=document) | [
"def",
"_get_client_document",
"(",
"self",
",",
"sale",
")",
":",
"client_role",
"=",
"sale",
".",
"get_client_role",
"(",
")",
"if",
"isinstance",
"(",
"client_role",
",",
"Individual",
")",
":",
"document_type",
"=",
"FiscalSaleHistory",
".",
"TYPE_CPF",
"d... | https://github.com/stoq/stoq/blob/c26991644d1affcf96bc2e0a0434796cabdf8448/plugins/ecf/ecfui.py#L570-L587 | ||
kerlomz/captcha_trainer | 72b0cd02c66a9b44073820098155b3278c8bde61 | app.py | python | Wizard.project_path | (self) | return project_path | [] | def project_path(self):
if not self.current_project:
return None
project_path = "{}/{}".format(self.project_root_path, self.current_project)
if not os.path.exists(project_path):
os.makedirs(project_path)
return project_path | [
"def",
"project_path",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"current_project",
":",
"return",
"None",
"project_path",
"=",
"\"{}/{}\"",
".",
"format",
"(",
"self",
".",
"project_root_path",
",",
"self",
".",
"current_project",
")",
"if",
"not",
... | https://github.com/kerlomz/captcha_trainer/blob/72b0cd02c66a9b44073820098155b3278c8bde61/app.py#L895-L901 | |||
scrapinghub/spidermon | f2b21e45e70796f583bbb97f39b823c31d242b17 | spidermon/results/monitor.py | python | MonitorResult.monitors_passed_results | (self) | return self._step_monitors.items_for_statuses(
settings.MONITOR.STATUSES.SUCCESSFUL
) | [] | def monitors_passed_results(self):
return self._step_monitors.items_for_statuses(
settings.MONITOR.STATUSES.SUCCESSFUL
) | [
"def",
"monitors_passed_results",
"(",
"self",
")",
":",
"return",
"self",
".",
"_step_monitors",
".",
"items_for_statuses",
"(",
"settings",
".",
"MONITOR",
".",
"STATUSES",
".",
"SUCCESSFUL",
")"
] | https://github.com/scrapinghub/spidermon/blob/f2b21e45e70796f583bbb97f39b823c31d242b17/spidermon/results/monitor.py#L43-L46 | |||
mlrun/mlrun | 4c120719d64327a34b7ee1ab08fb5e01b258b00a | mlrun/feature_store/api.py | python | ingest | (
featureset: Union[FeatureSet, str] = None,
source=None,
targets: List[DataTargetBase] = None,
namespace=None,
return_df: bool = True,
infer_options: InferOptions = InferOptions.default(),
run_config: RunConfig = None,
mlrun_context=None,
spark_context=None,
overwrite=None,
) | return df | Read local DataFrame, file, URL, or source into the feature store
Ingest reads from the source, run the graph transformations, infers metadata and stats
and writes the results to the default of specified targets
when targets are not specified data is stored in the configured default targets
(will usually be NoSQL for real-time and Parquet for offline).
the `run_config` parameter allow specifying the function and job configuration,
see: :py:class:`~mlrun.feature_store.RunConfig`
example::
stocks_set = FeatureSet("stocks", entities=[Entity("ticker")])
stocks = pd.read_csv("stocks.csv")
df = ingest(stocks_set, stocks, infer_options=fstore.InferOptions.default())
# for running as remote job
config = RunConfig(image='mlrun/mlrun')
df = ingest(stocks_set, stocks, run_config=config)
# specify source and targets
source = CSVSource("mycsv", path="measurements.csv")
targets = [CSVTarget("mycsv", path="./mycsv.csv")]
ingest(measurements, source, targets)
:param featureset: feature set object or featureset.uri. (uri must be of a feature set that is in the DB,
call `.save()` if it's not)
:param source: source dataframe or other sources (e.g. parquet source see:
:py:class:`~mlrun.datastore.ParquetSource` and other classes in mlrun.datastore with suffix
Source)
:param targets: optional list of data target objects
:param namespace: namespace or module containing graph classes
:param return_df: indicate if to return a dataframe with the graph results
:param infer_options: schema and stats infer options
:param run_config: function and/or run configuration for remote jobs,
see :py:class:`~mlrun.feature_store.RunConfig`
:param mlrun_context: mlrun context (when running as a job), for internal use !
:param spark_context: local spark session for spark ingestion, example for creating the spark context:
`spark = SparkSession.builder.appName("Spark function").getOrCreate()`
For remote spark ingestion, this should contain the remote spark service name
:param overwrite: delete the targets' data prior to ingestion
(default: True for non scheduled ingest - deletes the targets that are about to be ingested.
False for scheduled ingest - does not delete the target) | Read local DataFrame, file, URL, or source into the feature store
Ingest reads from the source, run the graph transformations, infers metadata and stats
and writes the results to the default of specified targets | [
"Read",
"local",
"DataFrame",
"file",
"URL",
"or",
"source",
"into",
"the",
"feature",
"store",
"Ingest",
"reads",
"from",
"the",
"source",
"run",
"the",
"graph",
"transformations",
"infers",
"metadata",
"and",
"stats",
"and",
"writes",
"the",
"results",
"to",... | def ingest(
featureset: Union[FeatureSet, str] = None,
source=None,
targets: List[DataTargetBase] = None,
namespace=None,
return_df: bool = True,
infer_options: InferOptions = InferOptions.default(),
run_config: RunConfig = None,
mlrun_context=None,
spark_context=None,
overwrite=None,
) -> pd.DataFrame:
"""Read local DataFrame, file, URL, or source into the feature store
Ingest reads from the source, run the graph transformations, infers metadata and stats
and writes the results to the default of specified targets
when targets are not specified data is stored in the configured default targets
(will usually be NoSQL for real-time and Parquet for offline).
the `run_config` parameter allow specifying the function and job configuration,
see: :py:class:`~mlrun.feature_store.RunConfig`
example::
stocks_set = FeatureSet("stocks", entities=[Entity("ticker")])
stocks = pd.read_csv("stocks.csv")
df = ingest(stocks_set, stocks, infer_options=fstore.InferOptions.default())
# for running as remote job
config = RunConfig(image='mlrun/mlrun')
df = ingest(stocks_set, stocks, run_config=config)
# specify source and targets
source = CSVSource("mycsv", path="measurements.csv")
targets = [CSVTarget("mycsv", path="./mycsv.csv")]
ingest(measurements, source, targets)
:param featureset: feature set object or featureset.uri. (uri must be of a feature set that is in the DB,
call `.save()` if it's not)
:param source: source dataframe or other sources (e.g. parquet source see:
:py:class:`~mlrun.datastore.ParquetSource` and other classes in mlrun.datastore with suffix
Source)
:param targets: optional list of data target objects
:param namespace: namespace or module containing graph classes
:param return_df: indicate if to return a dataframe with the graph results
:param infer_options: schema and stats infer options
:param run_config: function and/or run configuration for remote jobs,
see :py:class:`~mlrun.feature_store.RunConfig`
:param mlrun_context: mlrun context (when running as a job), for internal use !
:param spark_context: local spark session for spark ingestion, example for creating the spark context:
`spark = SparkSession.builder.appName("Spark function").getOrCreate()`
For remote spark ingestion, this should contain the remote spark service name
:param overwrite: delete the targets' data prior to ingestion
(default: True for non scheduled ingest - deletes the targets that are about to be ingested.
False for scheduled ingest - does not delete the target)
"""
if featureset:
if isinstance(featureset, str):
# need to strip store prefix from the uri
_, stripped_name = parse_store_uri(featureset)
try:
featureset = get_feature_set_by_uri(stripped_name)
except RunDBError as exc:
# TODO: this handling is needed because the generic httpdb error handling doesn't raise the correct
# error class and doesn't propagate the correct message, until it solved we're manually handling this
# case to give better user experience, remove this when the error handling is fixed.
raise mlrun.errors.MLRunInvalidArgumentError(
f"{exc}. Make sure the feature set is saved in DB (call feature_set.save())"
)
# feature-set spec always has a source property that is not None. It may be default-constructed, in which
# case the path will be 'None'. That's why we need a special check
if source is None and featureset.has_valid_source():
source = featureset.spec.source
if not mlrun_context and (not featureset or source is None):
raise mlrun.errors.MLRunInvalidArgumentError(
"feature set and source must be specified"
)
if run_config:
if isinstance(source, pd.DataFrame):
raise mlrun.errors.MLRunInvalidArgumentError(
"DataFrame source is illegal in with RunConfig"
)
# remote job execution
verify_feature_set_permissions(
featureset, mlrun.api.schemas.AuthorizationAction.update
)
run_config = run_config.copy() if run_config else RunConfig()
source, run_config.parameters = set_task_params(
featureset, source, targets, run_config.parameters, infer_options, overwrite
)
name = f"{featureset.metadata.name}_ingest"
return run_ingestion_job(
name, featureset, run_config, source.schedule, spark_context
)
if mlrun_context:
# extract ingestion parameters from mlrun context
if isinstance(source, pd.DataFrame):
raise mlrun.errors.MLRunInvalidArgumentError(
"DataFrame source is illegal when running ingest remotely"
)
if featureset or source is not None:
raise mlrun.errors.MLRunInvalidArgumentError(
"cannot specify mlrun_context with feature set or source"
)
(
featureset,
source,
targets,
infer_options,
overwrite,
) = context_to_ingestion_params(mlrun_context)
verify_feature_set_permissions(
featureset, mlrun.api.schemas.AuthorizationAction.update
)
if not source:
raise mlrun.errors.MLRunInvalidArgumentError(
"data source was not specified"
)
filter_time_string = ""
if source.schedule:
featureset.reload(update_spec=False)
min_time = datetime.max
for target in featureset.status.targets:
if target.last_written:
cur_last_written = datetime.fromisoformat(target.last_written)
if cur_last_written < min_time:
min_time = cur_last_written
if min_time != datetime.max:
source.start_time = min_time
time_zone = min_time.tzinfo
source.end_time = datetime.now(tz=time_zone)
filter_time_string = (
f"Source.start_time for the job is{str(source.start_time)}. "
f"Source.end_time is {str(source.end_time)}"
)
mlrun_context.logger.info(
f"starting ingestion task to {featureset.uri}.{filter_time_string}"
)
return_df = False
namespace = namespace or get_caller_globals()
purge_targets = targets or featureset.spec.targets or get_default_targets()
if overwrite is None:
if isinstance(source, BaseSourceDriver) and source.schedule:
overwrite = False
else:
overwrite = True
if overwrite:
validate_target_list(targets=purge_targets)
purge_target_names = [
t if isinstance(t, str) else t.name for t in purge_targets
]
featureset.purge_targets(target_names=purge_target_names, silent=True)
else:
for target in purge_targets:
if not kind_to_driver[target.kind].support_append:
raise mlrun.errors.MLRunInvalidArgumentError(
f"{target.kind} target does not support overwrite=False ingestion"
)
if hasattr(target, "is_single_file") and target.is_single_file():
raise mlrun.errors.MLRunInvalidArgumentError(
"overwrite=False isn't supported in single files. Please use folder path."
)
if spark_context and featureset.spec.engine != "spark":
raise mlrun.errors.MLRunInvalidArgumentError(
"featureset.spec.engine must be set to 'spark' to ingest with spark"
)
if featureset.spec.engine == "spark":
if isinstance(source, pd.DataFrame) and run_config is not None:
raise mlrun.errors.MLRunInvalidArgumentError(
"DataFrame source is illegal when ingesting with spark"
)
# use local spark session to ingest
return _ingest_with_spark(
spark_context,
featureset,
source,
targets,
infer_options=infer_options,
mlrun_context=mlrun_context,
namespace=namespace,
overwrite=overwrite,
)
if isinstance(source, str):
source = mlrun.store_manager.object(url=source).as_df()
schema_options = InferOptions.get_common_options(
infer_options, InferOptions.schema()
)
if schema_options:
preview(
featureset, source, options=schema_options, namespace=namespace,
)
infer_stats = InferOptions.get_common_options(
infer_options, InferOptions.all_stats()
)
return_df = return_df or infer_stats != InferOptions.Null
featureset.save()
targets = targets or featureset.spec.targets or get_default_targets()
df = init_featureset_graph(
source, featureset, namespace, targets=targets, return_df=return_df,
)
if not InferOptions.get_common_options(
infer_stats, InferOptions.Index
) and InferOptions.get_common_options(infer_options, InferOptions.Index):
infer_stats += InferOptions.Index
infer_from_static_df(df, featureset, options=infer_stats)
if isinstance(source, DataSource):
for target in featureset.status.targets:
if (
target.last_written == datetime.min
and source.schedule
and source.start_time
):
# datetime.min is a special case that indicated that nothing was written in storey. we need the fix so
# in the next scheduled run, we will have the same start time
target.last_written = source.start_time
_post_ingestion(mlrun_context, featureset, spark_context)
return df | [
"def",
"ingest",
"(",
"featureset",
":",
"Union",
"[",
"FeatureSet",
",",
"str",
"]",
"=",
"None",
",",
"source",
"=",
"None",
",",
"targets",
":",
"List",
"[",
"DataTargetBase",
"]",
"=",
"None",
",",
"namespace",
"=",
"None",
",",
"return_df",
":",
... | https://github.com/mlrun/mlrun/blob/4c120719d64327a34b7ee1ab08fb5e01b258b00a/mlrun/feature_store/api.py#L227-L463 | |
pyproj4/pyproj | 24eade78c52f8bf6717e56fb7c878f7da9892368 | pyproj/crs/crs.py | python | CRS.from_authority | (cls, auth_name: str, code: Union[str, int]) | return cls.from_user_input(_prepare_from_authority(auth_name, code)) | .. versionadded:: 2.2.0
Make a CRS from an authority name and authority code
Parameters
----------
auth_name: str
The name of the authority.
code : int or str
The code used by the authority.
Returns
-------
CRS | .. versionadded:: 2.2.0 | [
"..",
"versionadded",
"::",
"2",
".",
"2",
".",
"0"
] | def from_authority(cls, auth_name: str, code: Union[str, int]) -> "CRS":
"""
.. versionadded:: 2.2.0
Make a CRS from an authority name and authority code
Parameters
----------
auth_name: str
The name of the authority.
code : int or str
The code used by the authority.
Returns
-------
CRS
"""
return cls.from_user_input(_prepare_from_authority(auth_name, code)) | [
"def",
"from_authority",
"(",
"cls",
",",
"auth_name",
":",
"str",
",",
"code",
":",
"Union",
"[",
"str",
",",
"int",
"]",
")",
"->",
"\"CRS\"",
":",
"return",
"cls",
".",
"from_user_input",
"(",
"_prepare_from_authority",
"(",
"auth_name",
",",
"code",
... | https://github.com/pyproj4/pyproj/blob/24eade78c52f8bf6717e56fb7c878f7da9892368/pyproj/crs/crs.py#L338-L355 | |
buke/GreenOdoo | 3d8c55d426fb41fdb3f2f5a1533cfe05983ba1df | source/openerp/api.py | python | onchange | (*args) | return lambda method: decorate(method, '_onchange', args) | Return a decorator to decorate an onchange method for given fields.
Each argument must be a field name::
@api.onchange('partner_id')
def _onchange_partner(self):
self.message = "Dear %s" % (self.partner_id.name or "")
In the form views where the field appears, the method will be called
when one of the given fields is modified. The method is invoked on a
pseudo-record that contains the values present in the form. Field
assignments on that record are automatically sent back to the client.
The method may return a dictionary for changing field domains and pop up
a warning message, like in the old API::
return {
'domain': {'other_id': [('partner_id', '=', partner_id)]},
'warning': {'title': "Warning", 'message': "What is this?"},
}
.. warning::
``@onchange`` only supports simple field names, dotted names
(fields of relational fields e.g. ``partner_id.tz``) are not
supported and will be ignored | Return a decorator to decorate an onchange method for given fields.
Each argument must be a field name:: | [
"Return",
"a",
"decorator",
"to",
"decorate",
"an",
"onchange",
"method",
"for",
"given",
"fields",
".",
"Each",
"argument",
"must",
"be",
"a",
"field",
"name",
"::"
] | def onchange(*args):
""" Return a decorator to decorate an onchange method for given fields.
Each argument must be a field name::
@api.onchange('partner_id')
def _onchange_partner(self):
self.message = "Dear %s" % (self.partner_id.name or "")
In the form views where the field appears, the method will be called
when one of the given fields is modified. The method is invoked on a
pseudo-record that contains the values present in the form. Field
assignments on that record are automatically sent back to the client.
The method may return a dictionary for changing field domains and pop up
a warning message, like in the old API::
return {
'domain': {'other_id': [('partner_id', '=', partner_id)]},
'warning': {'title': "Warning", 'message': "What is this?"},
}
.. warning::
``@onchange`` only supports simple field names, dotted names
(fields of relational fields e.g. ``partner_id.tz``) are not
supported and will be ignored
"""
return lambda method: decorate(method, '_onchange', args) | [
"def",
"onchange",
"(",
"*",
"args",
")",
":",
"return",
"lambda",
"method",
":",
"decorate",
"(",
"method",
",",
"'_onchange'",
",",
"args",
")"
] | https://github.com/buke/GreenOdoo/blob/3d8c55d426fb41fdb3f2f5a1533cfe05983ba1df/source/openerp/api.py#L168-L196 | |
OpenCobolIDE/OpenCobolIDE | c78d0d335378e5fe0a5e74f53c19b68b55e85388 | open_cobol_ide/extlibs/pyqode/core/widgets/preview.py | python | HtmlPreviewWidget._update_preview | (self) | [] | def _update_preview(self):
try:
# remember cursor/scrollbar position
p = self.textCursor().position()
v = self.verticalScrollBar().value()
# display new html
self.setHtml(self._editor.to_html())
# restore cursor/scrollbar position
c = self.textCursor()
c.setPosition(p)
self.setTextCursor(c)
self.verticalScrollBar().setValue(v)
except (TypeError, AttributeError):
self.setHtml('<center>No preview available...</center>')
self.hide_requested.emit() | [
"def",
"_update_preview",
"(",
"self",
")",
":",
"try",
":",
"# remember cursor/scrollbar position",
"p",
"=",
"self",
".",
"textCursor",
"(",
")",
".",
"position",
"(",
")",
"v",
"=",
"self",
".",
"verticalScrollBar",
"(",
")",
".",
"value",
"(",
")",
"... | https://github.com/OpenCobolIDE/OpenCobolIDE/blob/c78d0d335378e5fe0a5e74f53c19b68b55e85388/open_cobol_ide/extlibs/pyqode/core/widgets/preview.py#L42-L58 | ||||
cloudera/hue | 23f02102d4547c17c32bd5ea0eb24e9eadd657a4 | desktop/core/ext-py/SQLAlchemy-1.3.17/examples/sharding/attribute_shard.py | python | shard_chooser | (mapper, instance, clause=None) | shard chooser.
looks at the given instance and returns a shard id
note that we need to define conditions for
the WeatherLocation class, as well as our secondary Report class which will
point back to its WeatherLocation via its 'location' attribute. | shard chooser. | [
"shard",
"chooser",
"."
] | def shard_chooser(mapper, instance, clause=None):
"""shard chooser.
looks at the given instance and returns a shard id
note that we need to define conditions for
the WeatherLocation class, as well as our secondary Report class which will
point back to its WeatherLocation via its 'location' attribute.
"""
if isinstance(instance, WeatherLocation):
return shard_lookup[instance.continent]
else:
return shard_chooser(mapper, instance.location) | [
"def",
"shard_chooser",
"(",
"mapper",
",",
"instance",
",",
"clause",
"=",
"None",
")",
":",
"if",
"isinstance",
"(",
"instance",
",",
"WeatherLocation",
")",
":",
"return",
"shard_lookup",
"[",
"instance",
".",
"continent",
"]",
"else",
":",
"return",
"s... | https://github.com/cloudera/hue/blob/23f02102d4547c17c32bd5ea0eb24e9eadd657a4/desktop/core/ext-py/SQLAlchemy-1.3.17/examples/sharding/attribute_shard.py#L124-L136 | ||
googledatalab/pydatalab | 1c86e26a0d24e3bc8097895ddeab4d0607be4c40 | google/datalab/contrib/mlworkbench/commands/_ml.py | python | _batch_predict | (args, cell) | [] | def _batch_predict(args, cell):
if args['cloud_config'] and not args['cloud']:
raise ValueError('"cloud_config" is provided but no "--cloud". '
'Do you want local run or cloud run?')
if args['cloud']:
job_request = {
'data_format': 'TEXT',
'input_paths': file_io.get_matching_files(args['data']['csv']),
'output_path': args['output'],
}
if args['model'].startswith('gs://'):
job_request['uri'] = args['model']
else:
parts = args['model'].split('.')
if len(parts) != 2:
raise ValueError('Invalid model name for cloud prediction. Use "model.version".')
version_name = ('projects/%s/models/%s/versions/%s' %
(Context.default().project_id, parts[0], parts[1]))
job_request['version_name'] = version_name
cloud_config = args['cloud_config'] or {}
job_id = cloud_config.pop('job_id', None)
job_request.update(cloud_config)
job = datalab_ml.Job.submit_batch_prediction(job_request, job_id)
_show_job_link(job)
else:
print('local prediction...')
_local_predict.local_batch_predict(args['model'],
args['data']['csv'],
args['output'],
args['format'],
args['batch_size'])
print('done.') | [
"def",
"_batch_predict",
"(",
"args",
",",
"cell",
")",
":",
"if",
"args",
"[",
"'cloud_config'",
"]",
"and",
"not",
"args",
"[",
"'cloud'",
"]",
":",
"raise",
"ValueError",
"(",
"'\"cloud_config\" is provided but no \"--cloud\". '",
"'Do you want local run or cloud r... | https://github.com/googledatalab/pydatalab/blob/1c86e26a0d24e3bc8097895ddeab4d0607be4c40/google/datalab/contrib/mlworkbench/commands/_ml.py#L833-L867 | ||||
maas/maas | db2f89970c640758a51247c59bf1ec6f60cf4ab5 | src/maasserver/management/commands/apikey.py | python | Command._print_token | (self, token) | Write `token` to stdout in the standard format (with names if
--with-names option is enabled) | Write `token` to stdout in the standard format (with names if
--with-names option is enabled) | [
"Write",
"token",
"to",
"stdout",
"in",
"the",
"standard",
"format",
"(",
"with",
"names",
"if",
"--",
"with",
"-",
"names",
"option",
"is",
"enabled",
")"
] | def _print_token(self, token):
"""Write `token` to stdout in the standard format (with names if
--with-names option is enabled)"""
if self.display_names:
self.stdout.write(
"%s %s"
% (
convert_tuple_to_string(get_creds_tuple(token)),
token.consumer.name,
)
)
else:
self.stdout.write(convert_tuple_to_string(get_creds_tuple(token)))
# In Django 1.5+, self.stdout.write() adds a newline character at
# the end of the message.
if django.VERSION < (1, 5):
self.stdout.write("\n") | [
"def",
"_print_token",
"(",
"self",
",",
"token",
")",
":",
"if",
"self",
".",
"display_names",
":",
"self",
".",
"stdout",
".",
"write",
"(",
"\"%s %s\"",
"%",
"(",
"convert_tuple_to_string",
"(",
"get_creds_tuple",
"(",
"token",
")",
")",
",",
"token",
... | https://github.com/maas/maas/blob/db2f89970c640758a51247c59bf1ec6f60cf4ab5/src/maasserver/management/commands/apikey.py#L52-L68 | ||
UniShared/videonotes | 803cdd97b90823fb17f50dd55999aa7d1fec6c3a | lib/gflags.py | python | FlagValues.FindModuleIdDefiningFlag | (self, flagname, default=None) | return default | Return the ID of the module defining this flag, or default.
Args:
flagname: Name of the flag to lookup.
default: Value to return if flagname is not defined. Defaults
to None.
Returns:
The ID of the module which registered the flag with this name.
If no such module exists (i.e. no flag with this name exists),
we return default. | Return the ID of the module defining this flag, or default. | [
"Return",
"the",
"ID",
"of",
"the",
"module",
"defining",
"this",
"flag",
"or",
"default",
"."
] | def FindModuleIdDefiningFlag(self, flagname, default=None):
"""Return the ID of the module defining this flag, or default.
Args:
flagname: Name of the flag to lookup.
default: Value to return if flagname is not defined. Defaults
to None.
Returns:
The ID of the module which registered the flag with this name.
If no such module exists (i.e. no flag with this name exists),
we return default.
"""
for module_id, flags in self.FlagsByModuleIdDict().iteritems():
for flag in flags:
if flag.name == flagname or flag.short_name == flagname:
return module_id
return default | [
"def",
"FindModuleIdDefiningFlag",
"(",
"self",
",",
"flagname",
",",
"default",
"=",
"None",
")",
":",
"for",
"module_id",
",",
"flags",
"in",
"self",
".",
"FlagsByModuleIdDict",
"(",
")",
".",
"iteritems",
"(",
")",
":",
"for",
"flag",
"in",
"flags",
"... | https://github.com/UniShared/videonotes/blob/803cdd97b90823fb17f50dd55999aa7d1fec6c3a/lib/gflags.py#L973-L990 | |
misterch0c/shadowbroker | e3a069bea47a2c1009697941ac214adc6f90aa8d | windows/Resources/Python/Core/Lib/lib-tk/Tkinter.py | python | Wm.wm_focusmodel | (self, model=None) | return self.tk.call('wm', 'focusmodel', self._w, model) | Set focus model to MODEL. "active" means that this widget will claim
the focus itself, "passive" means that the window manager shall give
the focus. Return current focus model if MODEL is None. | Set focus model to MODEL. "active" means that this widget will claim
the focus itself, "passive" means that the window manager shall give
the focus. Return current focus model if MODEL is None. | [
"Set",
"focus",
"model",
"to",
"MODEL",
".",
"active",
"means",
"that",
"this",
"widget",
"will",
"claim",
"the",
"focus",
"itself",
"passive",
"means",
"that",
"the",
"window",
"manager",
"shall",
"give",
"the",
"focus",
".",
"Return",
"current",
"focus",
... | def wm_focusmodel(self, model=None):
"""Set focus model to MODEL. "active" means that this widget will claim
the focus itself, "passive" means that the window manager shall give
the focus. Return current focus model if MODEL is None."""
return self.tk.call('wm', 'focusmodel', self._w, model) | [
"def",
"wm_focusmodel",
"(",
"self",
",",
"model",
"=",
"None",
")",
":",
"return",
"self",
".",
"tk",
".",
"call",
"(",
"'wm'",
",",
"'focusmodel'",
",",
"self",
".",
"_w",
",",
"model",
")"
] | https://github.com/misterch0c/shadowbroker/blob/e3a069bea47a2c1009697941ac214adc6f90aa8d/windows/Resources/Python/Core/Lib/lib-tk/Tkinter.py#L1672-L1676 | |
apache/tvm | 6eb4ed813ebcdcd9558f0906a1870db8302ff1e0 | python/tvm/relay/frontend/darknet.py | python | _darknet_avgpooling | (inputs, params, attrs, prefix) | return get_relay_op("avg_pool2d")(*inputs, **new_attrs) | Process the average pool 2d operation. | Process the average pool 2d operation. | [
"Process",
"the",
"average",
"pool",
"2d",
"operation",
"."
] | def _darknet_avgpooling(inputs, params, attrs, prefix):
"""Process the average pool 2d operation."""
new_attrs = {}
kernel = attrs.get("kernel")
strides = attrs.get("stride", 1)
pads = attrs.get("pad", 0)
new_attrs["pool_size"] = (kernel, kernel)
new_attrs["strides"] = (strides, strides)
new_attrs["padding"] = (pads, pads)
return get_relay_op("avg_pool2d")(*inputs, **new_attrs) | [
"def",
"_darknet_avgpooling",
"(",
"inputs",
",",
"params",
",",
"attrs",
",",
"prefix",
")",
":",
"new_attrs",
"=",
"{",
"}",
"kernel",
"=",
"attrs",
".",
"get",
"(",
"\"kernel\"",
")",
"strides",
"=",
"attrs",
".",
"get",
"(",
"\"stride\"",
",",
"1",... | https://github.com/apache/tvm/blob/6eb4ed813ebcdcd9558f0906a1870db8302ff1e0/python/tvm/relay/frontend/darknet.py#L76-L86 | |
national-voter-file/national-voter-file | f8bae42418c9307150d10c9e71174defaefa4e60 | src/python/national_voter_file/us_states/de/transformer.py | python | StateTransformer.extract_party | (self, input_dict) | return {'PARTY' : self.de_party_map.get(input_dict['PARTY'], input_dict['PARTY'])} | Inputs:
input_dict: names of columns and corresponding values
Outputs:
Dictionary with following keys
'PARTY' | Inputs:
input_dict: names of columns and corresponding values
Outputs:
Dictionary with following keys
'PARTY' | [
"Inputs",
":",
"input_dict",
":",
"names",
"of",
"columns",
"and",
"corresponding",
"values",
"Outputs",
":",
"Dictionary",
"with",
"following",
"keys",
"PARTY"
] | def extract_party(self, input_dict):
"""
Inputs:
input_dict: names of columns and corresponding values
Outputs:
Dictionary with following keys
'PARTY'
"""
return {'PARTY' : self.de_party_map.get(input_dict['PARTY'], input_dict['PARTY'])} | [
"def",
"extract_party",
"(",
"self",
",",
"input_dict",
")",
":",
"return",
"{",
"'PARTY'",
":",
"self",
".",
"de_party_map",
".",
"get",
"(",
"input_dict",
"[",
"'PARTY'",
"]",
",",
"input_dict",
"[",
"'PARTY'",
"]",
")",
"}"
] | https://github.com/national-voter-file/national-voter-file/blob/f8bae42418c9307150d10c9e71174defaefa4e60/src/python/national_voter_file/us_states/de/transformer.py#L267-L276 | |
numenta/nupic | b9ebedaf54f49a33de22d8d44dff7c765cdb5548 | src/nupic/algorithms/backtracking_tm.py | python | BacktrackingTM.printConfidence | (self, aState, maxCols = 20) | Print a floating point array that is the same shape as activeState.
:param aState: TODO: document
:param maxCols: TODO: document | Print a floating point array that is the same shape as activeState. | [
"Print",
"a",
"floating",
"point",
"array",
"that",
"is",
"the",
"same",
"shape",
"as",
"activeState",
"."
] | def printConfidence(self, aState, maxCols = 20):
"""
Print a floating point array that is the same shape as activeState.
:param aState: TODO: document
:param maxCols: TODO: document
"""
def formatFPRow(var, i):
s = ''
for c in range(min(maxCols, self.numberOfCols)):
if c > 0 and c % 10 == 0:
s += ' '
s += ' %5.3f' % var[c, i]
s += ' '
return s
for i in xrange(self.cellsPerColumn):
print formatFPRow(aState, i) | [
"def",
"printConfidence",
"(",
"self",
",",
"aState",
",",
"maxCols",
"=",
"20",
")",
":",
"def",
"formatFPRow",
"(",
"var",
",",
"i",
")",
":",
"s",
"=",
"''",
"for",
"c",
"in",
"range",
"(",
"min",
"(",
"maxCols",
",",
"self",
".",
"numberOfCols"... | https://github.com/numenta/nupic/blob/b9ebedaf54f49a33de22d8d44dff7c765cdb5548/src/nupic/algorithms/backtracking_tm.py#L1018-L1035 | ||
google/trax | d6cae2067dedd0490b78d831033607357e975015 | trax/tf_numpy/numpy_impl/math_ops.py | python | isfinite | (x) | return _scalar(tf.math.is_finite, x, True) | [] | def isfinite(x):
return _scalar(tf.math.is_finite, x, True) | [
"def",
"isfinite",
"(",
"x",
")",
":",
"return",
"_scalar",
"(",
"tf",
".",
"math",
".",
"is_finite",
",",
"x",
",",
"True",
")"
] | https://github.com/google/trax/blob/d6cae2067dedd0490b78d831033607357e975015/trax/tf_numpy/numpy_impl/math_ops.py#L711-L712 | |||
general03/flask-autoindex | 424246242c9f40aeb9ac2c8c63f4d2234024256e | .eggs/Werkzeug-1.0.1-py3.7.egg/werkzeug/utils.py | python | append_slash_redirect | (environ, code=301) | return redirect(new_path, code) | Redirects to the same URL but with a slash appended. The behavior
of this function is undefined if the path ends with a slash already.
:param environ: the WSGI environment for the request that triggers
the redirect.
:param code: the status code for the redirect. | Redirects to the same URL but with a slash appended. The behavior
of this function is undefined if the path ends with a slash already. | [
"Redirects",
"to",
"the",
"same",
"URL",
"but",
"with",
"a",
"slash",
"appended",
".",
"The",
"behavior",
"of",
"this",
"function",
"is",
"undefined",
"if",
"the",
"path",
"ends",
"with",
"a",
"slash",
"already",
"."
] | def append_slash_redirect(environ, code=301):
"""Redirects to the same URL but with a slash appended. The behavior
of this function is undefined if the path ends with a slash already.
:param environ: the WSGI environment for the request that triggers
the redirect.
:param code: the status code for the redirect.
"""
new_path = environ["PATH_INFO"].strip("/") + "/"
query_string = environ.get("QUERY_STRING")
if query_string:
new_path += "?" + query_string
return redirect(new_path, code) | [
"def",
"append_slash_redirect",
"(",
"environ",
",",
"code",
"=",
"301",
")",
":",
"new_path",
"=",
"environ",
"[",
"\"PATH_INFO\"",
"]",
".",
"strip",
"(",
"\"/\"",
")",
"+",
"\"/\"",
"query_string",
"=",
"environ",
".",
"get",
"(",
"\"QUERY_STRING\"",
")... | https://github.com/general03/flask-autoindex/blob/424246242c9f40aeb9ac2c8c63f4d2234024256e/.eggs/Werkzeug-1.0.1-py3.7.egg/werkzeug/utils.py#L534-L546 | |
JustinhoCHN/SRGAN_Wasserstein | 08cb76028880f95cbeea1353c5bfc5b2b356ae83 | tensorlayer/prepro.py | python | swirl | (x, center=None, strength=1, radius=100, rotation=0, output_shape=None, order=1, mode='constant', cval=0, clip=True, preserve_range=False, is_random=False) | return swirled | Swirl an image randomly or non-randomly, see `scikit-image swirl API <http://scikit-image.org/docs/dev/api/skimage.transform.html#skimage.transform.swirl>`_
and `example <http://scikit-image.org/docs/dev/auto_examples/plot_swirl.html>`_.
Parameters
-----------
x : numpy array
An image with dimension of [row, col, channel] (default).
center : (row, column) tuple or (2,) ndarray, optional
Center coordinate of transformation.
strength : float, optional
The amount of swirling applied.
radius : float, optional
The extent of the swirl in pixels. The effect dies out rapidly beyond radius.
rotation : float, (degree) optional
Additional rotation applied to the image, usually [0, 360], relates to center.
output_shape : tuple (rows, cols), optional
Shape of the output image generated. By default the shape of the input image is preserved.
order : int, optional
The order of the spline interpolation, default is 1. The order has to be in the range 0-5. See skimage.transform.warp for detail.
mode : {‘constant’, ‘edge’, ‘symmetric’, ‘reflect’, ‘wrap’}, optional
Points outside the boundaries of the input are filled according to the given mode, with ‘constant’ used as the default. Modes match the behaviour of numpy.pad.
cval : float, optional
Used in conjunction with mode ‘constant’, the value outside the image boundaries.
clip : bool, optional
Whether to clip the output to the range of values of the input image. This is enabled by default, since higher order interpolation may produce values outside the given input range.
preserve_range : bool, optional
Whether to keep the original range of values. Otherwise, the input image is converted according to the conventions of img_as_float.
is_random : boolean, default False
If True, random swirl.
- random center = [(0 ~ x.shape[0]), (0 ~ x.shape[1])]
- random strength = [0, strength]
- random radius = [1e-10, radius]
- random rotation = [-rotation, rotation]
Examples
---------
>>> x --> [row, col, 1] greyscale
>>> x = swirl(x, strength=4, radius=100) | Swirl an image randomly or non-randomly, see `scikit-image swirl API <http://scikit-image.org/docs/dev/api/skimage.transform.html#skimage.transform.swirl>`_
and `example <http://scikit-image.org/docs/dev/auto_examples/plot_swirl.html>`_. | [
"Swirl",
"an",
"image",
"randomly",
"or",
"non",
"-",
"randomly",
"see",
"scikit",
"-",
"image",
"swirl",
"API",
"<http",
":",
"//",
"scikit",
"-",
"image",
".",
"org",
"/",
"docs",
"/",
"dev",
"/",
"api",
"/",
"skimage",
".",
"transform",
".",
"html... | def swirl(x, center=None, strength=1, radius=100, rotation=0, output_shape=None, order=1, mode='constant', cval=0, clip=True, preserve_range=False, is_random=False):
"""Swirl an image randomly or non-randomly, see `scikit-image swirl API <http://scikit-image.org/docs/dev/api/skimage.transform.html#skimage.transform.swirl>`_
and `example <http://scikit-image.org/docs/dev/auto_examples/plot_swirl.html>`_.
Parameters
-----------
x : numpy array
An image with dimension of [row, col, channel] (default).
center : (row, column) tuple or (2,) ndarray, optional
Center coordinate of transformation.
strength : float, optional
The amount of swirling applied.
radius : float, optional
The extent of the swirl in pixels. The effect dies out rapidly beyond radius.
rotation : float, (degree) optional
Additional rotation applied to the image, usually [0, 360], relates to center.
output_shape : tuple (rows, cols), optional
Shape of the output image generated. By default the shape of the input image is preserved.
order : int, optional
The order of the spline interpolation, default is 1. The order has to be in the range 0-5. See skimage.transform.warp for detail.
mode : {‘constant’, ‘edge’, ‘symmetric’, ‘reflect’, ‘wrap’}, optional
Points outside the boundaries of the input are filled according to the given mode, with ‘constant’ used as the default. Modes match the behaviour of numpy.pad.
cval : float, optional
Used in conjunction with mode ‘constant’, the value outside the image boundaries.
clip : bool, optional
Whether to clip the output to the range of values of the input image. This is enabled by default, since higher order interpolation may produce values outside the given input range.
preserve_range : bool, optional
Whether to keep the original range of values. Otherwise, the input image is converted according to the conventions of img_as_float.
is_random : boolean, default False
If True, random swirl.
- random center = [(0 ~ x.shape[0]), (0 ~ x.shape[1])]
- random strength = [0, strength]
- random radius = [1e-10, radius]
- random rotation = [-rotation, rotation]
Examples
---------
>>> x --> [row, col, 1] greyscale
>>> x = swirl(x, strength=4, radius=100)
"""
assert radius != 0, Exception("Invalid radius value")
rotation = np.pi / 180 * rotation
if is_random:
center_h = int(np.random.uniform(0, x.shape[0]))
center_w = int(np.random.uniform(0, x.shape[1]))
center = (center_h, center_w)
strength = np.random.uniform(0, strength)
radius = np.random.uniform(1e-10, radius)
rotation = np.random.uniform(-rotation, rotation)
max_v = np.max(x)
if max_v > 1: # Note: the input of this fn should be [-1, 1], rescale is required.
x = x / max_v
swirled = skimage.transform.swirl(x, center=center, strength=strength, radius=radius, rotation=rotation,
output_shape=output_shape, order=order, mode=mode, cval=cval, clip=clip, preserve_range=preserve_range)
if max_v > 1:
swirled = swirled * max_v
return swirled | [
"def",
"swirl",
"(",
"x",
",",
"center",
"=",
"None",
",",
"strength",
"=",
"1",
",",
"radius",
"=",
"100",
",",
"rotation",
"=",
"0",
",",
"output_shape",
"=",
"None",
",",
"order",
"=",
"1",
",",
"mode",
"=",
"'constant'",
",",
"cval",
"=",
"0"... | https://github.com/JustinhoCHN/SRGAN_Wasserstein/blob/08cb76028880f95cbeea1353c5bfc5b2b356ae83/tensorlayer/prepro.py#L480-L537 | |
cocoakekeyu/cancan | c6722026cb3ec9453c34c6453f4fb67cc8b625f3 | cancan/ability.py | python | Ability.expand_actions | (self, actions) | return r | Accepts an array of actions and returns an array of actions which match | Accepts an array of actions and returns an array of actions which match | [
"Accepts",
"an",
"array",
"of",
"actions",
"and",
"returns",
"an",
"array",
"of",
"actions",
"which",
"match"
] | def expand_actions(self, actions):
"""
Accepts an array of actions and returns an array of actions which match
"""
r = []
for action in actions:
r.append(action)
if action in self.aliased_actions:
r.extend(self.aliased_actions[action])
return r | [
"def",
"expand_actions",
"(",
"self",
",",
"actions",
")",
":",
"r",
"=",
"[",
"]",
"for",
"action",
"in",
"actions",
":",
"r",
".",
"append",
"(",
"action",
")",
"if",
"action",
"in",
"self",
".",
"aliased_actions",
":",
"r",
".",
"extend",
"(",
"... | https://github.com/cocoakekeyu/cancan/blob/c6722026cb3ec9453c34c6453f4fb67cc8b625f3/cancan/ability.py#L81-L90 | |
ni/nidaqmx-python | 62fc6b48cbbb330fe1bcc9aedadc86610a1269b6 | nidaqmx/_task_modules/channels/co_channel.py | python | COChannel.co_data_xfer_mech | (self) | [] | def co_data_xfer_mech(self):
cfunc = lib_importer.windll.DAQmxResetCODataXferMech
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str]
error_code = cfunc(
self._handle, self._name)
check_for_error(error_code) | [
"def",
"co_data_xfer_mech",
"(",
"self",
")",
":",
"cfunc",
"=",
"lib_importer",
".",
"windll",
".",
"DAQmxResetCODataXferMech",
"if",
"cfunc",
".",
"argtypes",
"is",
"None",
":",
"with",
"cfunc",
".",
"arglock",
":",
"if",
"cfunc",
".",
"argtypes",
"is",
... | https://github.com/ni/nidaqmx-python/blob/62fc6b48cbbb330fe1bcc9aedadc86610a1269b6/nidaqmx/_task_modules/channels/co_channel.py#L676-L686 | ||||
getsentry/snuba | 6f92898b37c89d5d41a1894b313726d85ede0170 | snuba/clickhouse/translators/snuba/mapping.py | python | SnubaClickhouseMappingTranslator.translate_function_strict | (self, exp: FunctionCall) | return f | Unfortunately it is not possible to avoid this assertion.
Though the structure of TranslationMappers guarantees that this
assertion can never fail since it defines the valid translations
and it statically requires a FunctionCallMapper to translate a
FunctionCall.
FunctionCallMapper returns FunctionCall as return type, thus
always satisfying the assertion. | Unfortunately it is not possible to avoid this assertion.
Though the structure of TranslationMappers guarantees that this
assertion can never fail since it defines the valid translations
and it statically requires a FunctionCallMapper to translate a
FunctionCall.
FunctionCallMapper returns FunctionCall as return type, thus
always satisfying the assertion. | [
"Unfortunately",
"it",
"is",
"not",
"possible",
"to",
"avoid",
"this",
"assertion",
".",
"Though",
"the",
"structure",
"of",
"TranslationMappers",
"guarantees",
"that",
"this",
"assertion",
"can",
"never",
"fail",
"since",
"it",
"defines",
"the",
"valid",
"trans... | def translate_function_strict(self, exp: FunctionCall) -> FunctionCall:
"""
Unfortunately it is not possible to avoid this assertion.
Though the structure of TranslationMappers guarantees that this
assertion can never fail since it defines the valid translations
and it statically requires a FunctionCallMapper to translate a
FunctionCall.
FunctionCallMapper returns FunctionCall as return type, thus
always satisfying the assertion.
"""
f = exp.accept(self)
assert isinstance(f, FunctionCall)
return f | [
"def",
"translate_function_strict",
"(",
"self",
",",
"exp",
":",
"FunctionCall",
")",
"->",
"FunctionCall",
":",
"f",
"=",
"exp",
".",
"accept",
"(",
"self",
")",
"assert",
"isinstance",
"(",
"f",
",",
"FunctionCall",
")",
"return",
"f"
] | https://github.com/getsentry/snuba/blob/6f92898b37c89d5d41a1894b313726d85ede0170/snuba/clickhouse/translators/snuba/mapping.py#L179-L191 | |
Gallopsled/pwntools | 1573957cc8b1957399b7cc9bfae0c6f80630d5d4 | pwnlib/tubes/tube.py | python | tube.recvlines | (self, numlines=2**20, keepends=False, timeout=default) | return lines | r"""recvlines(numlines, keepends=False, timeout=default) -> list of bytes objects
Receive up to ``numlines`` lines.
A "line" is any sequence of bytes terminated by the byte sequence
set by :attr:`newline`, which defaults to ``'\n'``.
If the request is not satisfied before ``timeout`` seconds pass,
all data is buffered and an empty string (``''``) is returned.
Arguments:
numlines(int): Maximum number of lines to receive
keepends(bool): Keep newlines at the end of each line (:const:`False`).
timeout(int): Maximum timeout
Raises:
exceptions.EOFError: The connection closed before the request could be satisfied
Returns:
A string containing bytes received from the socket,
or ``''`` if a timeout occurred while waiting.
Examples:
>>> t = tube()
>>> t.recv_raw = lambda n: b'\n'
>>> t.recvlines(3)
[b'', b'', b'']
>>> t.recv_raw = lambda n: b'Foo\nBar\nBaz\n'
>>> t.recvlines(3)
[b'Foo', b'Bar', b'Baz']
>>> t.recvlines(3, True)
[b'Foo\n', b'Bar\n', b'Baz\n'] | r"""recvlines(numlines, keepends=False, timeout=default) -> list of bytes objects | [
"r",
"recvlines",
"(",
"numlines",
"keepends",
"=",
"False",
"timeout",
"=",
"default",
")",
"-",
">",
"list",
"of",
"bytes",
"objects"
] | def recvlines(self, numlines=2**20, keepends=False, timeout=default):
r"""recvlines(numlines, keepends=False, timeout=default) -> list of bytes objects
Receive up to ``numlines`` lines.
A "line" is any sequence of bytes terminated by the byte sequence
set by :attr:`newline`, which defaults to ``'\n'``.
If the request is not satisfied before ``timeout`` seconds pass,
all data is buffered and an empty string (``''``) is returned.
Arguments:
numlines(int): Maximum number of lines to receive
keepends(bool): Keep newlines at the end of each line (:const:`False`).
timeout(int): Maximum timeout
Raises:
exceptions.EOFError: The connection closed before the request could be satisfied
Returns:
A string containing bytes received from the socket,
or ``''`` if a timeout occurred while waiting.
Examples:
>>> t = tube()
>>> t.recv_raw = lambda n: b'\n'
>>> t.recvlines(3)
[b'', b'', b'']
>>> t.recv_raw = lambda n: b'Foo\nBar\nBaz\n'
>>> t.recvlines(3)
[b'Foo', b'Bar', b'Baz']
>>> t.recvlines(3, True)
[b'Foo\n', b'Bar\n', b'Baz\n']
"""
lines = []
with self.countdown(timeout):
for _ in range(numlines):
try:
# We must set 'keepends' to True here so that we can
# restore the original, unmodified data to the buffer
# in the event of a timeout.
res = self.recvline(keepends=True, timeout=timeout)
except Exception:
self.unrecv(b''.join(lines))
raise
if res:
lines.append(res)
else:
break
if not keepends:
lines = [line.rstrip(self.newline) for line in lines]
return lines | [
"def",
"recvlines",
"(",
"self",
",",
"numlines",
"=",
"2",
"**",
"20",
",",
"keepends",
"=",
"False",
",",
"timeout",
"=",
"default",
")",
":",
"lines",
"=",
"[",
"]",
"with",
"self",
".",
"countdown",
"(",
"timeout",
")",
":",
"for",
"_",
"in",
... | https://github.com/Gallopsled/pwntools/blob/1573957cc8b1957399b7cc9bfae0c6f80630d5d4/pwnlib/tubes/tube.py#L354-L409 | |
OpenCobolIDE/OpenCobolIDE | c78d0d335378e5fe0a5e74f53c19b68b55e85388 | open_cobol_ide/extlibs/pyqode/cobol/widgets/pic_offsets.py | python | PicOffsetsTable._update | (self, infos) | [] | def _update(self, infos):
self.clearContents()
self.setRowCount(len(infos))
# process each info in a separate row
for i, info in enumerate(infos):
self.setItem(
i, 0, QtWidgets.QTableWidgetItem("%s" % info.level))
self.setItem(
i, 1, QtWidgets.QTableWidgetItem(info.name))
self.setItem(
i, 2, QtWidgets.QTableWidgetItem("%s" % info.offset))
self.setItem(
i, 3, QtWidgets.QTableWidgetItem(info.pic))
self.setSortingEnabled(False)
self.show_requested.emit() | [
"def",
"_update",
"(",
"self",
",",
"infos",
")",
":",
"self",
".",
"clearContents",
"(",
")",
"self",
".",
"setRowCount",
"(",
"len",
"(",
"infos",
")",
")",
"# process each info in a separate row",
"for",
"i",
",",
"info",
"in",
"enumerate",
"(",
"infos"... | https://github.com/OpenCobolIDE/OpenCobolIDE/blob/c78d0d335378e5fe0a5e74f53c19b68b55e85388/open_cobol_ide/extlibs/pyqode/cobol/widgets/pic_offsets.py#L53-L69 | ||||
IdentityPython/pysaml2 | 6badb32d212257bd83ffcc816f9b625f68281b47 | src/saml2/authn_context/timesync.py | python | ActivationLimitType_.__init__ | (self,
activation_limit_duration=None,
activation_limit_usages=None,
activation_limit_session=None,
text=None,
extension_elements=None,
extension_attributes=None,
) | [] | def __init__(self,
activation_limit_duration=None,
activation_limit_usages=None,
activation_limit_session=None,
text=None,
extension_elements=None,
extension_attributes=None,
):
SamlBase.__init__(self,
text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes,
)
self.activation_limit_duration = activation_limit_duration
self.activation_limit_usages = activation_limit_usages
self.activation_limit_session = activation_limit_session | [
"def",
"__init__",
"(",
"self",
",",
"activation_limit_duration",
"=",
"None",
",",
"activation_limit_usages",
"=",
"None",
",",
"activation_limit_session",
"=",
"None",
",",
"text",
"=",
"None",
",",
"extension_elements",
"=",
"None",
",",
"extension_attributes",
... | https://github.com/IdentityPython/pysaml2/blob/6badb32d212257bd83ffcc816f9b625f68281b47/src/saml2/authn_context/timesync.py#L925-L940 | ||||
JiYou/openstack | 8607dd488bde0905044b303eb6e52bdea6806923 | packages/source/nova/nova/virt/powervm/operator.py | python | BaseOperator.create_lpar | (self, lpar) | Receives a LPAR data object and creates a LPAR instance.
:param lpar: LPAR object | Receives a LPAR data object and creates a LPAR instance. | [
"Receives",
"a",
"LPAR",
"data",
"object",
"and",
"creates",
"a",
"LPAR",
"instance",
"."
] | def create_lpar(self, lpar):
"""Receives a LPAR data object and creates a LPAR instance.
:param lpar: LPAR object
"""
conf_data = lpar.to_string()
self.run_vios_command(self.command.mksyscfg('-r lpar -i "%s"' %
conf_data)) | [
"def",
"create_lpar",
"(",
"self",
",",
"lpar",
")",
":",
"conf_data",
"=",
"lpar",
".",
"to_string",
"(",
")",
"self",
".",
"run_vios_command",
"(",
"self",
".",
"command",
".",
"mksyscfg",
"(",
"'-r lpar -i \"%s\"'",
"%",
"conf_data",
")",
")"
] | https://github.com/JiYou/openstack/blob/8607dd488bde0905044b303eb6e52bdea6806923/packages/source/nova/nova/virt/powervm/operator.py#L494-L501 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.