nwo stringlengths 5 86 | sha stringlengths 40 40 | path stringlengths 4 189 | language stringclasses 1 value | identifier stringlengths 1 94 | parameters stringlengths 2 4.03k | argument_list stringclasses 1 value | return_statement stringlengths 0 11.5k | docstring stringlengths 1 33.2k | docstring_summary stringlengths 0 5.15k | docstring_tokens list | function stringlengths 34 151k | function_tokens list | url stringlengths 90 278 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
nasa/fprime | 595cf3682d8365943d86c1a6fe7c78f0a116acf0 | Autocoders/Python/src/fprime_ac/generators/visitors/InstanceTopologyCppVisitor.py | python | InstanceTopologyCppVisitor.protectedVisit | (self, obj) | Defined to generate protected stuff within a class.
@param args: the instance of the concrete element to operation on. | Defined to generate protected stuff within a class. | [
"Defined",
"to",
"generate",
"protected",
"stuff",
"within",
"a",
"class",
"."
] | def protectedVisit(self, obj):
"""
Defined to generate protected stuff within a class.
@param args: the instance of the concrete element to operation on.
""" | [
"def",
"protectedVisit",
"(",
"self",
",",
"obj",
")",
":"
] | https://github.com/nasa/fprime/blob/595cf3682d8365943d86c1a6fe7c78f0a116acf0/Autocoders/Python/src/fprime_ac/generators/visitors/InstanceTopologyCppVisitor.py#L404-L408 | ||
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/msw/_misc.py | python | Caret.GetSize | (*args, **kwargs) | return _misc_.Caret_GetSize(*args, **kwargs) | GetSize(self) -> Size | GetSize(self) -> Size | [
"GetSize",
"(",
"self",
")",
"-",
">",
"Size"
] | def GetSize(*args, **kwargs):
"""GetSize(self) -> Size"""
return _misc_.Caret_GetSize(*args, **kwargs) | [
"def",
"GetSize",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_misc_",
".",
"Caret_GetSize",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/msw/_misc.py#L762-L764 | |
ApolloAuto/apollo-platform | 86d9dc6743b496ead18d597748ebabd34a513289 | ros/third_party/lib_x86_64/python2.7/dist-packages/numpy/core/defchararray.py | python | find | (a, sub, start=0, end=None) | return _vec_string(
a, integer, 'find', [sub, start] + _clean_args(end)) | For each element, return the lowest index in the string where
substring `sub` is found.
Calls `str.find` element-wise.
For each element, return the lowest index in the string where
substring `sub` is found, such that `sub` is contained in the
range [`start`, `end`].
Parameters
----------
a : array_like of str or unicode
sub : str or unicode
start, end : int, optional
Optional arguments `start` and `end` are interpreted as in
slice notation.
Returns
-------
out : ndarray or int
Output array of ints. Returns -1 if `sub` is not found.
See also
--------
str.find | For each element, return the lowest index in the string where
substring `sub` is found. | [
"For",
"each",
"element",
"return",
"the",
"lowest",
"index",
"in",
"the",
"string",
"where",
"substring",
"sub",
"is",
"found",
"."
] | def find(a, sub, start=0, end=None):
"""
For each element, return the lowest index in the string where
substring `sub` is found.
Calls `str.find` element-wise.
For each element, return the lowest index in the string where
substring `sub` is found, such that `sub` is contained in the
range [`start`, `end`].
Parameters
----------
a : array_like of str or unicode
sub : str or unicode
start, end : int, optional
Optional arguments `start` and `end` are interpreted as in
slice notation.
Returns
-------
out : ndarray or int
Output array of ints. Returns -1 if `sub` is not found.
See also
--------
str.find
"""
return _vec_string(
a, integer, 'find', [sub, start] + _clean_args(end)) | [
"def",
"find",
"(",
"a",
",",
"sub",
",",
"start",
"=",
"0",
",",
"end",
"=",
"None",
")",
":",
"return",
"_vec_string",
"(",
"a",
",",
"integer",
",",
"'find'",
",",
"[",
"sub",
",",
"start",
"]",
"+",
"_clean_args",
"(",
"end",
")",
")"
] | https://github.com/ApolloAuto/apollo-platform/blob/86d9dc6743b496ead18d597748ebabd34a513289/ros/third_party/lib_x86_64/python2.7/dist-packages/numpy/core/defchararray.py#L621-L653 | |
makefile/frcnn | 8d9b9ebf8be8315ba2f374d460121b0adf1df29c | scripts/cpp_lint.py | python | _CppLintState.SetFilters | (self, filters) | Sets the error-message filters.
These filters are applied when deciding whether to emit a given
error message.
Args:
filters: A string of comma-separated filters (eg "+whitespace/indent").
Each filter should start with + or -; else we die.
Raises:
ValueError: The comma-separated filters did not all start with '+' or '-'.
E.g. "-,+whitespace,-whitespace/indent,whitespace/badfilter" | Sets the error-message filters. | [
"Sets",
"the",
"error",
"-",
"message",
"filters",
"."
] | def SetFilters(self, filters):
"""Sets the error-message filters.
These filters are applied when deciding whether to emit a given
error message.
Args:
filters: A string of comma-separated filters (eg "+whitespace/indent").
Each filter should start with + or -; else we die.
Raises:
ValueError: The comma-separated filters did not all start with '+' or '-'.
E.g. "-,+whitespace,-whitespace/indent,whitespace/badfilter"
"""
# Default filters always have less priority than the flag ones.
self.filters = _DEFAULT_FILTERS[:]
for filt in filters.split(','):
clean_filt = filt.strip()
if clean_filt:
self.filters.append(clean_filt)
for filt in self.filters:
if not (filt.startswith('+') or filt.startswith('-')):
raise ValueError('Every filter in --filters must start with + or -'
' (%s does not)' % filt) | [
"def",
"SetFilters",
"(",
"self",
",",
"filters",
")",
":",
"# Default filters always have less priority than the flag ones.",
"self",
".",
"filters",
"=",
"_DEFAULT_FILTERS",
"[",
":",
"]",
"for",
"filt",
"in",
"filters",
".",
"split",
"(",
"','",
")",
":",
"cl... | https://github.com/makefile/frcnn/blob/8d9b9ebf8be8315ba2f374d460121b0adf1df29c/scripts/cpp_lint.py#L717-L740 | ||
apple/turicreate | cce55aa5311300e3ce6af93cb45ba791fd1bdf49 | src/python/turicreate/data_structures/sarray.py | python | SArray.mean | (self) | Mean of all the values in the SArray, or mean image.
Returns None on an empty SArray. Raises an exception if called on an
SArray with non-numeric type or non-Image type.
Returns
-------
out : float | turicreate.Image
Mean of all values in SArray, or image holding per-pixel mean
across the input SArray.
See Also
--------
median | Mean of all the values in the SArray, or mean image. | [
"Mean",
"of",
"all",
"the",
"values",
"in",
"the",
"SArray",
"or",
"mean",
"image",
"."
] | def mean(self):
"""
Mean of all the values in the SArray, or mean image.
Returns None on an empty SArray. Raises an exception if called on an
SArray with non-numeric type or non-Image type.
Returns
-------
out : float | turicreate.Image
Mean of all values in SArray, or image holding per-pixel mean
across the input SArray.
See Also
--------
median
"""
with cython_context():
if self.dtype == _Image:
from .. import extensions
return extensions.generate_mean(self)
else:
return self.__proxy__.mean() | [
"def",
"mean",
"(",
"self",
")",
":",
"with",
"cython_context",
"(",
")",
":",
"if",
"self",
".",
"dtype",
"==",
"_Image",
":",
"from",
".",
".",
"import",
"extensions",
"return",
"extensions",
".",
"generate_mean",
"(",
"self",
")",
"else",
":",
"retu... | https://github.com/apple/turicreate/blob/cce55aa5311300e3ce6af93cb45ba791fd1bdf49/src/python/turicreate/data_structures/sarray.py#L2276-L2299 | ||
okex/V3-Open-API-SDK | c5abb0db7e2287718e0055e17e57672ce0ec7fd9 | okex-python-sdk-api/venv/Lib/site-packages/pip-19.0.3-py3.8.egg/pip/_vendor/requests/utils.py | python | set_environ | (env_name, value) | Set the environment variable 'env_name' to 'value'
Save previous value, yield, and then restore the previous value stored in
the environment variable 'env_name'.
If 'value' is None, do nothing | Set the environment variable 'env_name' to 'value' | [
"Set",
"the",
"environment",
"variable",
"env_name",
"to",
"value"
] | def set_environ(env_name, value):
"""Set the environment variable 'env_name' to 'value'
Save previous value, yield, and then restore the previous value stored in
the environment variable 'env_name'.
If 'value' is None, do nothing"""
value_changed = value is not None
if value_changed:
old_value = os.environ.get(env_name)
os.environ[env_name] = value
try:
yield
finally:
if value_changed:
if old_value is None:
del os.environ[env_name]
else:
os.environ[env_name] = old_value | [
"def",
"set_environ",
"(",
"env_name",
",",
"value",
")",
":",
"value_changed",
"=",
"value",
"is",
"not",
"None",
"if",
"value_changed",
":",
"old_value",
"=",
"os",
".",
"environ",
".",
"get",
"(",
"env_name",
")",
"os",
".",
"environ",
"[",
"env_name"... | https://github.com/okex/V3-Open-API-SDK/blob/c5abb0db7e2287718e0055e17e57672ce0ec7fd9/okex-python-sdk-api/venv/Lib/site-packages/pip-19.0.3-py3.8.egg/pip/_vendor/requests/utils.py#L673-L691 | ||
Xilinx/Vitis-AI | fc74d404563d9951b57245443c73bef389f3657f | tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/contrib/distributions/python/ops/distribution_util.py | python | make_tril_scale | (
loc=None,
scale_tril=None,
scale_diag=None,
scale_identity_multiplier=None,
shape_hint=None,
validate_args=False,
assert_positive=False,
name=None) | return make_diag_scale(
loc=loc,
scale_diag=scale_diag,
scale_identity_multiplier=scale_identity_multiplier,
shape_hint=shape_hint,
validate_args=validate_args,
assert_positive=assert_positive,
name=name) | Creates a LinOp representing a lower triangular matrix.
Args:
loc: Floating-point `Tensor`. This is used for inferring shape in the case
where only `scale_identity_multiplier` is set.
scale_tril: Floating-point `Tensor` representing the diagonal matrix.
`scale_diag` has shape [N1, N2, ... k, k], which represents a k x k
lower triangular matrix.
When `None` no `scale_tril` term is added to the LinOp.
The upper triangular elements above the diagonal are ignored.
scale_diag: Floating-point `Tensor` representing the diagonal matrix.
`scale_diag` has shape [N1, N2, ... k], which represents a k x k
diagonal matrix.
When `None` no diagonal term is added to the LinOp.
scale_identity_multiplier: floating point rank 0 `Tensor` representing a
scaling done to the identity matrix.
When `scale_identity_multiplier = scale_diag = scale_tril = None` then
`scale += IdentityMatrix`. Otherwise no scaled-identity-matrix is added
to `scale`.
shape_hint: scalar integer `Tensor` representing a hint at the dimension of
the identity matrix when only `scale_identity_multiplier` is set.
validate_args: Python `bool` indicating whether arguments should be
checked for correctness.
assert_positive: Python `bool` indicating whether LinOp should be checked
for being positive definite.
name: Python `str` name given to ops managed by this object.
Returns:
`LinearOperator` representing a lower triangular matrix.
Raises:
ValueError: If only `scale_identity_multiplier` is set and `loc` and
`shape_hint` are both None. | Creates a LinOp representing a lower triangular matrix. | [
"Creates",
"a",
"LinOp",
"representing",
"a",
"lower",
"triangular",
"matrix",
"."
] | def make_tril_scale(
loc=None,
scale_tril=None,
scale_diag=None,
scale_identity_multiplier=None,
shape_hint=None,
validate_args=False,
assert_positive=False,
name=None):
"""Creates a LinOp representing a lower triangular matrix.
Args:
loc: Floating-point `Tensor`. This is used for inferring shape in the case
where only `scale_identity_multiplier` is set.
scale_tril: Floating-point `Tensor` representing the diagonal matrix.
`scale_diag` has shape [N1, N2, ... k, k], which represents a k x k
lower triangular matrix.
When `None` no `scale_tril` term is added to the LinOp.
The upper triangular elements above the diagonal are ignored.
scale_diag: Floating-point `Tensor` representing the diagonal matrix.
`scale_diag` has shape [N1, N2, ... k], which represents a k x k
diagonal matrix.
When `None` no diagonal term is added to the LinOp.
scale_identity_multiplier: floating point rank 0 `Tensor` representing a
scaling done to the identity matrix.
When `scale_identity_multiplier = scale_diag = scale_tril = None` then
`scale += IdentityMatrix`. Otherwise no scaled-identity-matrix is added
to `scale`.
shape_hint: scalar integer `Tensor` representing a hint at the dimension of
the identity matrix when only `scale_identity_multiplier` is set.
validate_args: Python `bool` indicating whether arguments should be
checked for correctness.
assert_positive: Python `bool` indicating whether LinOp should be checked
for being positive definite.
name: Python `str` name given to ops managed by this object.
Returns:
`LinearOperator` representing a lower triangular matrix.
Raises:
ValueError: If only `scale_identity_multiplier` is set and `loc` and
`shape_hint` are both None.
"""
def _maybe_attach_assertion(x):
if not validate_args:
return x
if assert_positive:
return control_flow_ops.with_dependencies([
check_ops.assert_positive(
array_ops.matrix_diag_part(x),
message="diagonal part must be positive"),
], x)
return control_flow_ops.with_dependencies([
check_ops.assert_none_equal(
array_ops.matrix_diag_part(x),
array_ops.zeros([], x.dtype),
message="diagonal part must be non-zero"),
], x)
with ops.name_scope(name, "make_tril_scale",
values=[loc, scale_diag, scale_identity_multiplier]):
loc = _convert_to_tensor(loc, name="loc")
scale_tril = _convert_to_tensor(scale_tril, name="scale_tril")
scale_diag = _convert_to_tensor(scale_diag, name="scale_diag")
scale_identity_multiplier = _convert_to_tensor(
scale_identity_multiplier,
name="scale_identity_multiplier")
if scale_tril is not None:
scale_tril = array_ops.matrix_band_part(scale_tril, -1, 0) # Zero out TriU.
tril_diag = array_ops.matrix_diag_part(scale_tril)
if scale_diag is not None:
tril_diag += scale_diag
if scale_identity_multiplier is not None:
tril_diag += scale_identity_multiplier[..., array_ops.newaxis]
scale_tril = array_ops.matrix_set_diag(scale_tril, tril_diag)
return linalg.LinearOperatorLowerTriangular(
tril=_maybe_attach_assertion(scale_tril),
is_non_singular=True,
is_self_adjoint=False,
is_positive_definite=assert_positive)
return make_diag_scale(
loc=loc,
scale_diag=scale_diag,
scale_identity_multiplier=scale_identity_multiplier,
shape_hint=shape_hint,
validate_args=validate_args,
assert_positive=assert_positive,
name=name) | [
"def",
"make_tril_scale",
"(",
"loc",
"=",
"None",
",",
"scale_tril",
"=",
"None",
",",
"scale_diag",
"=",
"None",
",",
"scale_identity_multiplier",
"=",
"None",
",",
"shape_hint",
"=",
"None",
",",
"validate_args",
"=",
"False",
",",
"assert_positive",
"=",
... | https://github.com/Xilinx/Vitis-AI/blob/fc74d404563d9951b57245443c73bef389f3657f/tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/contrib/distributions/python/ops/distribution_util.py#L90-L183 | |
apache/incubator-mxnet | f03fb23f1d103fec9541b5ae59ee06b1734a51d9 | python/mxnet/operator.py | python | CustomOpProp.infer_type | (self, in_type) | return in_type, [in_type[0]]*len(self.list_outputs()), \
[in_type[0]]*len(self.list_auxiliary_states()) | infer_type interface. override to create new operators
Parameters
----------
in_type : list of np.dtype
list of argument types in the same order as
declared in list_arguments.
Returns
-------
in_type : list
list of argument types. Can be modified from in_type.
out_type : list
list of output types calculated from in_type,
in the same order as declared in list_outputs.
aux_type : Optional, list
list of aux types calculated from in_type,
in the same order as declared in list_auxiliary_states. | infer_type interface. override to create new operators | [
"infer_type",
"interface",
".",
"override",
"to",
"create",
"new",
"operators"
] | def infer_type(self, in_type):
"""infer_type interface. override to create new operators
Parameters
----------
in_type : list of np.dtype
list of argument types in the same order as
declared in list_arguments.
Returns
-------
in_type : list
list of argument types. Can be modified from in_type.
out_type : list
list of output types calculated from in_type,
in the same order as declared in list_outputs.
aux_type : Optional, list
list of aux types calculated from in_type,
in the same order as declared in list_auxiliary_states.
"""
return in_type, [in_type[0]]*len(self.list_outputs()), \
[in_type[0]]*len(self.list_auxiliary_states()) | [
"def",
"infer_type",
"(",
"self",
",",
"in_type",
")",
":",
"return",
"in_type",
",",
"[",
"in_type",
"[",
"0",
"]",
"]",
"*",
"len",
"(",
"self",
".",
"list_outputs",
"(",
")",
")",
",",
"[",
"in_type",
"[",
"0",
"]",
"]",
"*",
"len",
"(",
"se... | https://github.com/apache/incubator-mxnet/blob/f03fb23f1d103fec9541b5ae59ee06b1734a51d9/python/mxnet/operator.py#L521-L542 | |
ChromiumWebApps/chromium | c7361d39be8abd1574e6ce8957c8dbddd4c6ccf7 | third_party/pymock/mock.py | python | _patch_multiple | (target, spec=None, create=False, spec_set=None,
autospec=None, new_callable=None, **kwargs) | return patcher | Perform multiple patches in a single call. It takes the object to be
patched (either as an object or a string to fetch the object by importing)
and keyword arguments for the patches::
with patch.multiple(settings, FIRST_PATCH='one', SECOND_PATCH='two'):
...
Use `DEFAULT` as the value if you want `patch.multiple` to create
mocks for you. In this case the created mocks are passed into a decorated
function by keyword, and a dictionary is returned when `patch.multiple` is
used as a context manager.
`patch.multiple` can be used as a decorator, class decorator or a context
manager. The arguments `spec`, `spec_set`, `create`,
`autospec` and `new_callable` have the same meaning as for `patch`. These
arguments will be applied to *all* patches done by `patch.multiple`.
When used as a class decorator `patch.multiple` honours `patch.TEST_PREFIX`
for choosing which methods to wrap. | Perform multiple patches in a single call. It takes the object to be
patched (either as an object or a string to fetch the object by importing)
and keyword arguments for the patches:: | [
"Perform",
"multiple",
"patches",
"in",
"a",
"single",
"call",
".",
"It",
"takes",
"the",
"object",
"to",
"be",
"patched",
"(",
"either",
"as",
"an",
"object",
"or",
"a",
"string",
"to",
"fetch",
"the",
"object",
"by",
"importing",
")",
"and",
"keyword",... | def _patch_multiple(target, spec=None, create=False, spec_set=None,
autospec=None, new_callable=None, **kwargs):
"""Perform multiple patches in a single call. It takes the object to be
patched (either as an object or a string to fetch the object by importing)
and keyword arguments for the patches::
with patch.multiple(settings, FIRST_PATCH='one', SECOND_PATCH='two'):
...
Use `DEFAULT` as the value if you want `patch.multiple` to create
mocks for you. In this case the created mocks are passed into a decorated
function by keyword, and a dictionary is returned when `patch.multiple` is
used as a context manager.
`patch.multiple` can be used as a decorator, class decorator or a context
manager. The arguments `spec`, `spec_set`, `create`,
`autospec` and `new_callable` have the same meaning as for `patch`. These
arguments will be applied to *all* patches done by `patch.multiple`.
When used as a class decorator `patch.multiple` honours `patch.TEST_PREFIX`
for choosing which methods to wrap.
"""
if type(target) in (unicode, str):
getter = lambda: _importer(target)
else:
getter = lambda: target
if not kwargs:
raise ValueError(
'Must supply at least one keyword argument with patch.multiple'
)
# need to wrap in a list for python 3, where items is a view
items = list(kwargs.items())
attribute, new = items[0]
patcher = _patch(
getter, attribute, new, spec, create, spec_set,
autospec, new_callable, {}
)
patcher.attribute_name = attribute
for attribute, new in items[1:]:
this_patcher = _patch(
getter, attribute, new, spec, create, spec_set,
autospec, new_callable, {}
)
this_patcher.attribute_name = attribute
patcher.additional_patchers.append(this_patcher)
return patcher | [
"def",
"_patch_multiple",
"(",
"target",
",",
"spec",
"=",
"None",
",",
"create",
"=",
"False",
",",
"spec_set",
"=",
"None",
",",
"autospec",
"=",
"None",
",",
"new_callable",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"type",
"(",
"targe... | https://github.com/ChromiumWebApps/chromium/blob/c7361d39be8abd1574e6ce8957c8dbddd4c6ccf7/third_party/pymock/mock.py#L1446-L1492 | |
dmlc/nnvm | dab5ce8ab6adbf4edd8bd2fa89f1a99f343b6e38 | python/nnvm/frontend/darknet.py | python | _darknet_conv2d | (inputs, attrs) | return sym, out_name | Process the convolution 2d operation. | Process the convolution 2d operation. | [
"Process",
"the",
"convolution",
"2d",
"operation",
"."
] | def _darknet_conv2d(inputs, attrs):
"""Process the convolution 2d operation."""
kernel = _darknet_parse_tshape(_darknet_required_attr(attrs, 'kernel'))
if len(kernel) != 1:
_darknet_raise_not_supported('non 2d kernel', 'conv2d')
layout = attrs.get('layout', 'NCHW')
if layout not in ['NCHW', 'NHWC']:
_darknet_raise_not_supported('layout: ' + layout, 'conv2d')
strides = int(attrs.get('stride', (1, 1)))
pads = int(attrs.get('pad', (0, 0)))
op_name, new_attrs = 'conv2d', {}
new_attrs['channels'] = _darknet_required_attr(attrs, 'num_filter')
new_attrs['kernel_size'] = [kernel[0], kernel[0]]
new_attrs['strides'] = (strides, strides)
new_attrs['padding'] = (pads, pads)
new_attrs['dilation'] = attrs.get('dilate', (1, 1))
new_attrs['groups'] = attrs.get('num_group', 1)
new_attrs['layout'] = layout
if attrs.get('use_batchNorm', False) is True:
new_attrs['use_bias'] = False
else:
new_attrs['use_bias'] = True
out_name = {}
sym = _darknet_get_nnvm_op(op_name)(*inputs, **new_attrs)
out_name[0] = sym.list_output_names()[0].replace('_output', '')
if attrs.get('use_batchNorm', False) is True:
op_name, new_attrs = 'batch_norm', {}
new_attrs['epsilon'] = 0.000001
sym = _darknet_get_nnvm_op(op_name)(*sym, **new_attrs)
out_name[1] = sym.list_output_names()[0].replace('_output', '')
if 'activation' in attrs:
new_attrs = {}
new_attrs['activation'] = attrs['activation']
new_attrs['slope'] = 0.1
sym, _ = _darknet_activations(sym, new_attrs)
return sym, out_name | [
"def",
"_darknet_conv2d",
"(",
"inputs",
",",
"attrs",
")",
":",
"kernel",
"=",
"_darknet_parse_tshape",
"(",
"_darknet_required_attr",
"(",
"attrs",
",",
"'kernel'",
")",
")",
"if",
"len",
"(",
"kernel",
")",
"!=",
"1",
":",
"_darknet_raise_not_supported",
"(... | https://github.com/dmlc/nnvm/blob/dab5ce8ab6adbf4edd8bd2fa89f1a99f343b6e38/python/nnvm/frontend/darknet.py#L132-L169 | |
kamyu104/LeetCode-Solutions | 77605708a927ea3b85aee5a479db733938c7c211 | Python/step-by-step-directions-from-a-binary-tree-node-to-another.py | python | Solution2.getDirections | (self, root, startValue, destValue) | return "".join(['U']*len(src) + dst) | :type root: Optional[TreeNode]
:type startValue: int
:type destValue: int
:rtype: str | :type root: Optional[TreeNode]
:type startValue: int
:type destValue: int
:rtype: str | [
":",
"type",
"root",
":",
"Optional",
"[",
"TreeNode",
"]",
":",
"type",
"startValue",
":",
"int",
":",
"type",
"destValue",
":",
"int",
":",
"rtype",
":",
"str"
] | def getDirections(self, root, startValue, destValue):
"""
:type root: Optional[TreeNode]
:type startValue: int
:type destValue: int
:rtype: str
"""
def dfs(node, val, path):
if node.val == val:
return True
if node.left and dfs(node.left, val, path):
path.append('L')
elif node.right and dfs(node.right, val, path):
path.append('R')
return path
src, dst = [], []
dfs(root, startValue, src)
dfs(root, destValue, dst)
while len(src) and len(dst) and src[-1] == dst[-1]:
src.pop()
dst.pop()
dst.reverse()
return "".join(['U']*len(src) + dst) | [
"def",
"getDirections",
"(",
"self",
",",
"root",
",",
"startValue",
",",
"destValue",
")",
":",
"def",
"dfs",
"(",
"node",
",",
"val",
",",
"path",
")",
":",
"if",
"node",
".",
"val",
"==",
"val",
":",
"return",
"True",
"if",
"node",
".",
"left",
... | https://github.com/kamyu104/LeetCode-Solutions/blob/77605708a927ea3b85aee5a479db733938c7c211/Python/step-by-step-directions-from-a-binary-tree-node-to-another.py#L54-L77 | |
DaFuCoding/MTCNN_Caffe | 09c30c3ff391bd9cb6b249c1910afaf147767ab3 | scripts/cpp_lint.py | python | Error | (filename, linenum, category, confidence, message) | Logs the fact we've found a lint error.
We log where the error was found, and also our confidence in the error,
that is, how certain we are this is a legitimate style regression, and
not a misidentification or a use that's sometimes justified.
False positives can be suppressed by the use of
"cpplint(category)" comments on the offending line. These are
parsed into _error_suppressions.
Args:
filename: The name of the file containing the error.
linenum: The number of the line containing the error.
category: A string used to describe the "category" this bug
falls under: "whitespace", say, or "runtime". Categories
may have a hierarchy separated by slashes: "whitespace/indent".
confidence: A number from 1-5 representing a confidence score for
the error, with 5 meaning that we are certain of the problem,
and 1 meaning that it could be a legitimate construct.
message: The error message. | Logs the fact we've found a lint error. | [
"Logs",
"the",
"fact",
"we",
"ve",
"found",
"a",
"lint",
"error",
"."
] | def Error(filename, linenum, category, confidence, message):
"""Logs the fact we've found a lint error.
We log where the error was found, and also our confidence in the error,
that is, how certain we are this is a legitimate style regression, and
not a misidentification or a use that's sometimes justified.
False positives can be suppressed by the use of
"cpplint(category)" comments on the offending line. These are
parsed into _error_suppressions.
Args:
filename: The name of the file containing the error.
linenum: The number of the line containing the error.
category: A string used to describe the "category" this bug
falls under: "whitespace", say, or "runtime". Categories
may have a hierarchy separated by slashes: "whitespace/indent".
confidence: A number from 1-5 representing a confidence score for
the error, with 5 meaning that we are certain of the problem,
and 1 meaning that it could be a legitimate construct.
message: The error message.
"""
if _ShouldPrintError(category, confidence, linenum):
_cpplint_state.IncrementErrorCount(category)
if _cpplint_state.output_format == 'vs7':
sys.stderr.write('%s(%s): %s [%s] [%d]\n' % (
filename, linenum, message, category, confidence))
elif _cpplint_state.output_format == 'eclipse':
sys.stderr.write('%s:%s: warning: %s [%s] [%d]\n' % (
filename, linenum, message, category, confidence))
else:
sys.stderr.write('%s:%s: %s [%s] [%d]\n' % (
filename, linenum, message, category, confidence)) | [
"def",
"Error",
"(",
"filename",
",",
"linenum",
",",
"category",
",",
"confidence",
",",
"message",
")",
":",
"if",
"_ShouldPrintError",
"(",
"category",
",",
"confidence",
",",
"linenum",
")",
":",
"_cpplint_state",
".",
"IncrementErrorCount",
"(",
"category... | https://github.com/DaFuCoding/MTCNN_Caffe/blob/09c30c3ff391bd9cb6b249c1910afaf147767ab3/scripts/cpp_lint.py#L988-L1020 | ||
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/gtk/aui.py | python | AuiMDIChildFrame.Iconize | (*args, **kwargs) | return _aui.AuiMDIChildFrame_Iconize(*args, **kwargs) | Iconize(self, bool iconize=True) | Iconize(self, bool iconize=True) | [
"Iconize",
"(",
"self",
"bool",
"iconize",
"=",
"True",
")"
] | def Iconize(*args, **kwargs):
"""Iconize(self, bool iconize=True)"""
return _aui.AuiMDIChildFrame_Iconize(*args, **kwargs) | [
"def",
"Iconize",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_aui",
".",
"AuiMDIChildFrame_Iconize",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/gtk/aui.py#L1570-L1572 | |
google/or-tools | 2cb85b4eead4c38e1c54b48044f92087cf165bce | examples/python/transit_time.py | python | DataProblem.time_per_demand_unit | (self) | return 5 | Gets the time (in min) to load a demand | Gets the time (in min) to load a demand | [
"Gets",
"the",
"time",
"(",
"in",
"min",
")",
"to",
"load",
"a",
"demand"
] | def time_per_demand_unit(self):
"""Gets the time (in min) to load a demand"""
return 5 | [
"def",
"time_per_demand_unit",
"(",
"self",
")",
":",
"return",
"5"
] | https://github.com/google/or-tools/blob/2cb85b4eead4c38e1c54b48044f92087cf165bce/examples/python/transit_time.py#L133-L135 | |
dscharrer/innoextract | 5519d364cc8898f906f6285d81a87ab8c5469cde | cmake/cpplint.py | python | IsBlankLine | (line) | return not line or line.isspace() | Returns true if the given line is blank.
We consider a line to be blank if the line is empty or consists of
only white spaces.
Args:
line: A line of a string.
Returns:
True, if the given line is blank. | Returns true if the given line is blank. | [
"Returns",
"true",
"if",
"the",
"given",
"line",
"is",
"blank",
"."
] | def IsBlankLine(line):
"""Returns true if the given line is blank.
We consider a line to be blank if the line is empty or consists of
only white spaces.
Args:
line: A line of a string.
Returns:
True, if the given line is blank.
"""
return not line or line.isspace() | [
"def",
"IsBlankLine",
"(",
"line",
")",
":",
"return",
"not",
"line",
"or",
"line",
".",
"isspace",
"(",
")"
] | https://github.com/dscharrer/innoextract/blob/5519d364cc8898f906f6285d81a87ab8c5469cde/cmake/cpplint.py#L1629-L1641 | |
RobotLocomotion/drake | 0e18a34604c45ed65bc9018a54f7610f91cdad5b | tools/workspace/drake_visualizer/_drake_visualizer_builtin_scripts/show_image.py | python | vtk_image_to_numpy | (image) | return data | Gets a properly shaped NumPy view of a VTK image's memory with the storage
format `(h, w, num_channels)`.
@note This coincides with most other NumPy-based image libraries (OpenCV,
matplotlib, scipy). | Gets a properly shaped NumPy view of a VTK image's memory with the storage
format `(h, w, num_channels)`. | [
"Gets",
"a",
"properly",
"shaped",
"NumPy",
"view",
"of",
"a",
"VTK",
"image",
"s",
"memory",
"with",
"the",
"storage",
"format",
"(",
"h",
"w",
"num_channels",
")",
"."
] | def vtk_image_to_numpy(image):
"""
Gets a properly shaped NumPy view of a VTK image's memory with the storage
format `(h, w, num_channels)`.
@note This coincides with most other NumPy-based image libraries (OpenCV,
matplotlib, scipy).
"""
data = vtk_to_numpy(image.GetPointData().GetScalars())
data.shape = get_vtk_image_shape(image)
return data | [
"def",
"vtk_image_to_numpy",
"(",
"image",
")",
":",
"data",
"=",
"vtk_to_numpy",
"(",
"image",
".",
"GetPointData",
"(",
")",
".",
"GetScalars",
"(",
")",
")",
"data",
".",
"shape",
"=",
"get_vtk_image_shape",
"(",
"image",
")",
"return",
"data"
] | https://github.com/RobotLocomotion/drake/blob/0e18a34604c45ed65bc9018a54f7610f91cdad5b/tools/workspace/drake_visualizer/_drake_visualizer_builtin_scripts/show_image.py#L305-L315 | |
ceph/ceph | 959663007321a369c83218414a29bd9dbc8bda3a | qa/tasks/cephfs/filesystem.py | python | Filesystem.wait_for_daemons | (self, timeout=None, skip_max_mds_check=False, status=None) | Wait until all daemons are healthy
:return: | Wait until all daemons are healthy
:return: | [
"Wait",
"until",
"all",
"daemons",
"are",
"healthy",
":",
"return",
":"
] | def wait_for_daemons(self, timeout=None, skip_max_mds_check=False, status=None):
"""
Wait until all daemons are healthy
:return:
"""
if timeout is None:
timeout = DAEMON_WAIT_TIMEOUT
if status is None:
status = self.status()
elapsed = 0
while True:
if self.are_daemons_healthy(status=status, skip_max_mds_check=skip_max_mds_check):
return status
else:
time.sleep(1)
elapsed += 1
if elapsed > timeout:
log.debug("status = {0}".format(status))
raise RuntimeError("Timed out waiting for MDS daemons to become healthy")
status = self.status() | [
"def",
"wait_for_daemons",
"(",
"self",
",",
"timeout",
"=",
"None",
",",
"skip_max_mds_check",
"=",
"False",
",",
"status",
"=",
"None",
")",
":",
"if",
"timeout",
"is",
"None",
":",
"timeout",
"=",
"DAEMON_WAIT_TIMEOUT",
"if",
"status",
"is",
"None",
":"... | https://github.com/ceph/ceph/blob/959663007321a369c83218414a29bd9dbc8bda3a/qa/tasks/cephfs/filesystem.py#L1066-L1090 | ||
microsoft/checkedc-clang | a173fefde5d7877b7750e7ce96dd08cf18baebf2 | lldb/third_party/Python/module/pexpect-4.6/pexpect/FSM.py | python | FSM.add_transition_list | (self, list_input_symbols, state, action=None, next_state=None) | This adds the same transition for a list of input symbols.
You can pass a list or a string. Note that it is handy to use
string.digits, string.whitespace, string.letters, etc. to add
transitions that match character classes.
The action may be set to None in which case the process() method will
ignore the action and only set the next_state. The next_state may be
set to None in which case the current state will be unchanged. | This adds the same transition for a list of input symbols.
You can pass a list or a string. Note that it is handy to use
string.digits, string.whitespace, string.letters, etc. to add
transitions that match character classes. | [
"This",
"adds",
"the",
"same",
"transition",
"for",
"a",
"list",
"of",
"input",
"symbols",
".",
"You",
"can",
"pass",
"a",
"list",
"or",
"a",
"string",
".",
"Note",
"that",
"it",
"is",
"handy",
"to",
"use",
"string",
".",
"digits",
"string",
".",
"wh... | def add_transition_list (self, list_input_symbols, state, action=None, next_state=None):
'''This adds the same transition for a list of input symbols.
You can pass a list or a string. Note that it is handy to use
string.digits, string.whitespace, string.letters, etc. to add
transitions that match character classes.
The action may be set to None in which case the process() method will
ignore the action and only set the next_state. The next_state may be
set to None in which case the current state will be unchanged. '''
if next_state is None:
next_state = state
for input_symbol in list_input_symbols:
self.add_transition (input_symbol, state, action, next_state) | [
"def",
"add_transition_list",
"(",
"self",
",",
"list_input_symbols",
",",
"state",
",",
"action",
"=",
"None",
",",
"next_state",
"=",
"None",
")",
":",
"if",
"next_state",
"is",
"None",
":",
"next_state",
"=",
"state",
"for",
"input_symbol",
"in",
"list_in... | https://github.com/microsoft/checkedc-clang/blob/a173fefde5d7877b7750e7ce96dd08cf18baebf2/lldb/third_party/Python/module/pexpect-4.6/pexpect/FSM.py#L148-L162 | ||
miyosuda/TensorFlowAndroidDemo | 35903e0221aa5f109ea2dbef27f20b52e317f42d | jni-build/jni/include/tensorflow/python/ops/math_ops.py | python | saturate_cast | (value, dtype, name=None) | Performs a safe saturating cast of `value` to `dtype`.
This function casts the input to `dtype` without applying any scaling. If
there is a danger that values would over or underflow in the cast, this op
applies the appropriate clamping before the cast.
Args:
value: A `Tensor`.
dtype: The desired output `DType`.
name: A name for the operation (optional).
Returns:
`value` safely cast to `dtype`. | Performs a safe saturating cast of `value` to `dtype`. | [
"Performs",
"a",
"safe",
"saturating",
"cast",
"of",
"value",
"to",
"dtype",
"."
] | def saturate_cast(value, dtype, name=None):
"""Performs a safe saturating cast of `value` to `dtype`.
This function casts the input to `dtype` without applying any scaling. If
there is a danger that values would over or underflow in the cast, this op
applies the appropriate clamping before the cast.
Args:
value: A `Tensor`.
dtype: The desired output `DType`.
name: A name for the operation (optional).
Returns:
`value` safely cast to `dtype`.
"""
# When casting to a type with smaller representable range, clamp.
# Note that this covers casting to unsigned types as well.
with ops.op_scope([value], name, "saturate_cast") as name:
value = ops.convert_to_tensor(value, name="value")
dtype = dtypes.as_dtype(dtype).base_dtype
if value.dtype.min < dtype.min:
value = gen_math_ops.maximum(value, ops.convert_to_tensor(
dtype.min, dtype=value.dtype, name="min"))
if value.dtype.max > dtype.max:
value = gen_math_ops.minimum(value, ops.convert_to_tensor(
dtype.max, dtype=value.dtype, name="max"))
return cast(value, dtype, name=name) | [
"def",
"saturate_cast",
"(",
"value",
",",
"dtype",
",",
"name",
"=",
"None",
")",
":",
"# When casting to a type with smaller representable range, clamp.",
"# Note that this covers casting to unsigned types as well.",
"with",
"ops",
".",
"op_scope",
"(",
"[",
"value",
"]",... | https://github.com/miyosuda/TensorFlowAndroidDemo/blob/35903e0221aa5f109ea2dbef27f20b52e317f42d/jni-build/jni/include/tensorflow/python/ops/math_ops.py#L628-L654 | ||
lmb-freiburg/ogn | 974f72ef4bf840d6f6693d22d1843a79223e77ce | scripts/cpp_lint.py | python | CheckSectionSpacing | (filename, clean_lines, class_info, linenum, error) | Checks for additional blank line issues related to sections.
Currently the only thing checked here is blank line before protected/private.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
class_info: A _ClassInfo objects.
linenum: The number of the line to check.
error: The function to call with any errors found. | Checks for additional blank line issues related to sections. | [
"Checks",
"for",
"additional",
"blank",
"line",
"issues",
"related",
"to",
"sections",
"."
] | def CheckSectionSpacing(filename, clean_lines, class_info, linenum, error):
"""Checks for additional blank line issues related to sections.
Currently the only thing checked here is blank line before protected/private.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
class_info: A _ClassInfo objects.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
# Skip checks if the class is small, where small means 25 lines or less.
# 25 lines seems like a good cutoff since that's the usual height of
# terminals, and any class that can't fit in one screen can't really
# be considered "small".
#
# Also skip checks if we are on the first line. This accounts for
# classes that look like
# class Foo { public: ... };
#
# If we didn't find the end of the class, last_line would be zero,
# and the check will be skipped by the first condition.
if (class_info.last_line - class_info.starting_linenum <= 24 or
linenum <= class_info.starting_linenum):
return
matched = Match(r'\s*(public|protected|private):', clean_lines.lines[linenum])
if matched:
# Issue warning if the line before public/protected/private was
# not a blank line, but don't do this if the previous line contains
# "class" or "struct". This can happen two ways:
# - We are at the beginning of the class.
# - We are forward-declaring an inner class that is semantically
# private, but needed to be public for implementation reasons.
# Also ignores cases where the previous line ends with a backslash as can be
# common when defining classes in C macros.
prev_line = clean_lines.lines[linenum - 1]
if (not IsBlankLine(prev_line) and
not Search(r'\b(class|struct)\b', prev_line) and
not Search(r'\\$', prev_line)):
# Try a bit harder to find the beginning of the class. This is to
# account for multi-line base-specifier lists, e.g.:
# class Derived
# : public Base {
end_class_head = class_info.starting_linenum
for i in range(class_info.starting_linenum, linenum):
if Search(r'\{\s*$', clean_lines.lines[i]):
end_class_head = i
break
if end_class_head < linenum - 1:
error(filename, linenum, 'whitespace/blank_line', 3,
'"%s:" should be preceded by a blank line' % matched.group(1)) | [
"def",
"CheckSectionSpacing",
"(",
"filename",
",",
"clean_lines",
",",
"class_info",
",",
"linenum",
",",
"error",
")",
":",
"# Skip checks if the class is small, where small means 25 lines or less.",
"# 25 lines seems like a good cutoff since that's the usual height of",
"# termina... | https://github.com/lmb-freiburg/ogn/blob/974f72ef4bf840d6f6693d22d1843a79223e77ce/scripts/cpp_lint.py#L2991-L3043 | ||
PaddlePaddle/PaddleOCR | b756bf5f8c90142e0d89d3db0163965c686b6ffe | ppocr/data/imaug/rec_img_aug.py | python | warp | (img, ang, use_tia=True, prob=0.4) | return new_img | warp | warp | [
"warp"
] | def warp(img, ang, use_tia=True, prob=0.4):
"""
warp
"""
h, w, _ = img.shape
config = Config(use_tia=use_tia)
config.make(w, h, ang)
new_img = img
if config.distort:
img_height, img_width = img.shape[0:2]
if random.random() <= prob and img_height >= 20 and img_width >= 20:
new_img = tia_distort(new_img, random.randint(3, 6))
if config.stretch:
img_height, img_width = img.shape[0:2]
if random.random() <= prob and img_height >= 20 and img_width >= 20:
new_img = tia_stretch(new_img, random.randint(3, 6))
if config.perspective:
if random.random() <= prob:
new_img = tia_perspective(new_img)
if config.crop:
img_height, img_width = img.shape[0:2]
if random.random() <= prob and img_height >= 20 and img_width >= 20:
new_img = get_crop(new_img)
if config.blur:
if random.random() <= prob:
new_img = blur(new_img)
if config.color:
if random.random() <= prob:
new_img = cvtColor(new_img)
if config.jitter:
new_img = jitter(new_img)
if config.noise:
if random.random() <= prob:
new_img = add_gasuss_noise(new_img)
if config.reverse:
if random.random() <= prob:
new_img = 255 - new_img
return new_img | [
"def",
"warp",
"(",
"img",
",",
"ang",
",",
"use_tia",
"=",
"True",
",",
"prob",
"=",
"0.4",
")",
":",
"h",
",",
"w",
",",
"_",
"=",
"img",
".",
"shape",
"config",
"=",
"Config",
"(",
"use_tia",
"=",
"use_tia",
")",
"config",
".",
"make",
"(",
... | https://github.com/PaddlePaddle/PaddleOCR/blob/b756bf5f8c90142e0d89d3db0163965c686b6ffe/ppocr/data/imaug/rec_img_aug.py#L491-L533 | |
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/gtk/html2.py | python | WebView.GetZoomType | (*args, **kwargs) | return _html2.WebView_GetZoomType(*args, **kwargs) | GetZoomType(self) -> int | GetZoomType(self) -> int | [
"GetZoomType",
"(",
"self",
")",
"-",
">",
"int"
] | def GetZoomType(*args, **kwargs):
"""GetZoomType(self) -> int"""
return _html2.WebView_GetZoomType(*args, **kwargs) | [
"def",
"GetZoomType",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_html2",
".",
"WebView_GetZoomType",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/gtk/html2.py#L314-L316 | |
esa/pykep | b410363653623730b577de257c04b0e0289f2014 | pykep/trajopt/_indirect.py | python | _indirect_base._plot_traj | (self, z, axes, units) | This function will be redefined in the inheriting classes | This function will be redefined in the inheriting classes | [
"This",
"function",
"will",
"be",
"redefined",
"in",
"the",
"inheriting",
"classes"
] | def _plot_traj(self, z, axes, units):
"""This function will be redefined in the inheriting classes
"""
pass | [
"def",
"_plot_traj",
"(",
"self",
",",
"z",
",",
"axes",
",",
"units",
")",
":",
"pass"
] | https://github.com/esa/pykep/blob/b410363653623730b577de257c04b0e0289f2014/pykep/trajopt/_indirect.py#L42-L45 | ||
wywu/LAB | 4b6debd302ae109fd104d4dd04dccc3418ae7471 | python/caffe/classifier.py | python | Classifier.predict | (self, inputs, oversample=True) | return predictions | Predict classification probabilities of inputs.
Parameters
----------
inputs : iterable of (H x W x K) input ndarrays.
oversample : boolean
average predictions across center, corners, and mirrors
when True (default). Center-only prediction when False.
Returns
-------
predictions: (N x C) ndarray of class probabilities for N images and C
classes. | Predict classification probabilities of inputs. | [
"Predict",
"classification",
"probabilities",
"of",
"inputs",
"."
] | def predict(self, inputs, oversample=True):
"""
Predict classification probabilities of inputs.
Parameters
----------
inputs : iterable of (H x W x K) input ndarrays.
oversample : boolean
average predictions across center, corners, and mirrors
when True (default). Center-only prediction when False.
Returns
-------
predictions: (N x C) ndarray of class probabilities for N images and C
classes.
"""
# Scale to standardize input dimensions.
input_ = np.zeros((len(inputs),
self.image_dims[0],
self.image_dims[1],
inputs[0].shape[2]),
dtype=np.float32)
for ix, in_ in enumerate(inputs):
input_[ix] = caffe.io.resize_image(in_, self.image_dims)
if oversample:
# Generate center, corner, and mirrored crops.
input_ = caffe.io.oversample(input_, self.crop_dims)
else:
# Take center crop.
center = np.array(self.image_dims) / 2.0
crop = np.tile(center, (1, 2))[0] + np.concatenate([
-self.crop_dims / 2.0,
self.crop_dims / 2.0
])
crop = crop.astype(int)
input_ = input_[:, crop[0]:crop[2], crop[1]:crop[3], :]
# Classify
caffe_in = np.zeros(np.array(input_.shape)[[0, 3, 1, 2]],
dtype=np.float32)
for ix, in_ in enumerate(input_):
caffe_in[ix] = self.transformer.preprocess(self.inputs[0], in_)
out = self.forward_all(**{self.inputs[0]: caffe_in})
predictions = out[self.outputs[0]]
# For oversampling, average predictions across crops.
if oversample:
predictions = predictions.reshape((len(predictions) // 10, 10, -1))
predictions = predictions.mean(1)
return predictions | [
"def",
"predict",
"(",
"self",
",",
"inputs",
",",
"oversample",
"=",
"True",
")",
":",
"# Scale to standardize input dimensions.",
"input_",
"=",
"np",
".",
"zeros",
"(",
"(",
"len",
"(",
"inputs",
")",
",",
"self",
".",
"image_dims",
"[",
"0",
"]",
","... | https://github.com/wywu/LAB/blob/4b6debd302ae109fd104d4dd04dccc3418ae7471/python/caffe/classifier.py#L47-L98 | |
vtraag/louvain-igraph | 124ea1be49ee74eec2eaca8006599d7fc5560db6 | src/louvain/Optimiser.py | python | Optimiser.set_rng_seed | (self, value) | Set the random seed for the random number generator.
Parameters
----------
value
The integer seed used in the random number generator | Set the random seed for the random number generator. | [
"Set",
"the",
"random",
"seed",
"for",
"the",
"random",
"number",
"generator",
"."
] | def set_rng_seed(self, value):
""" Set the random seed for the random number generator.
Parameters
----------
value
The integer seed used in the random number generator
"""
_c_louvain._Optimiser_set_rng_seed(self._optimiser, value) | [
"def",
"set_rng_seed",
"(",
"self",
",",
"value",
")",
":",
"_c_louvain",
".",
"_Optimiser_set_rng_seed",
"(",
"self",
".",
"_optimiser",
",",
"value",
")"
] | https://github.com/vtraag/louvain-igraph/blob/124ea1be49ee74eec2eaca8006599d7fc5560db6/src/louvain/Optimiser.py#L76-L84 | ||
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/site-packages/pip/_vendor/urllib3/__init__.py | python | disable_warnings | (category=exceptions.HTTPWarning) | Helper for quickly disabling all urllib3 warnings. | [] | def disable_warnings(category=exceptions.HTTPWarning):
"""
Helper for quickly disabling all urllib3 warnings.
"""
warnings.simplefilter("ignore", category) | [
"def",
"disable_warnings",
"(",
"category",
"=",
"exceptions",
".",
"HTTPWarning",
")",
":",
"warnings",
".",
"simplefilter",
"(",
"\"ignore\"",
",",
"category",
")"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/site-packages/pip/_vendor/urllib3/__init__.py#L161-L169 | |||
apache/trafodion | 8455c839ad6b6d7b6e04edda5715053095b78046 | install/python-installer/scripts/common.py | python | ParseJson.save | (self, dic) | return 0 | save dict to json file with pretty format | save dict to json file with pretty format | [
"save",
"dict",
"to",
"json",
"file",
"with",
"pretty",
"format"
] | def save(self, dic):
""" save dict to json file with pretty format """
with open(self.__js_file, 'w') as f:
f.write(json.dumps(dic, indent=4))
return 0 | [
"def",
"save",
"(",
"self",
",",
"dic",
")",
":",
"with",
"open",
"(",
"self",
".",
"__js_file",
",",
"'w'",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"json",
".",
"dumps",
"(",
"dic",
",",
"indent",
"=",
"4",
")",
")",
"return",
"0"
] | https://github.com/apache/trafodion/blob/8455c839ad6b6d7b6e04edda5715053095b78046/install/python-installer/scripts/common.py#L568-L572 | |
macchina-io/macchina.io | ef24ba0e18379c3dd48fb84e6dbf991101cb8db0 | platform/JS/V8/v8/third_party/jinja2/compiler.py | python | UndeclaredNameVisitor.visit_Block | (self, node) | Stop visiting a blocks. | Stop visiting a blocks. | [
"Stop",
"visiting",
"a",
"blocks",
"."
] | def visit_Block(self, node):
"""Stop visiting a blocks.""" | [
"def",
"visit_Block",
"(",
"self",
",",
"node",
")",
":"
] | https://github.com/macchina-io/macchina.io/blob/ef24ba0e18379c3dd48fb84e6dbf991101cb8db0/platform/JS/V8/v8/third_party/jinja2/compiler.py#L268-L269 | ||
windystrife/UnrealEngine_NVIDIAGameWorks | b50e6338a7c5b26374d66306ebc7807541ff815e | Engine/Extras/ThirdPartyNotUE/emsdk/Win64/python/2.7.5.3_64bit/Lib/multiprocessing/__init__.py | python | allow_connection_pickling | () | Install support for sending connections and sockets between processes | Install support for sending connections and sockets between processes | [
"Install",
"support",
"for",
"sending",
"connections",
"and",
"sockets",
"between",
"processes"
] | def allow_connection_pickling():
'''
Install support for sending connections and sockets between processes
'''
from multiprocessing import reduction | [
"def",
"allow_connection_pickling",
"(",
")",
":",
"from",
"multiprocessing",
"import",
"reduction"
] | https://github.com/windystrife/UnrealEngine_NVIDIAGameWorks/blob/b50e6338a7c5b26374d66306ebc7807541ff815e/Engine/Extras/ThirdPartyNotUE/emsdk/Win64/python/2.7.5.3_64bit/Lib/multiprocessing/__init__.py#L161-L165 | ||
happynear/caffe-windows | 967eedf25009e334b7f6f933bb5e17aaaff5bef6 | scripts/cpp_lint.py | python | FindNextMultiLineCommentEnd | (lines, lineix) | return len(lines) | We are inside a comment, find the end marker. | We are inside a comment, find the end marker. | [
"We",
"are",
"inside",
"a",
"comment",
"find",
"the",
"end",
"marker",
"."
] | def FindNextMultiLineCommentEnd(lines, lineix):
"""We are inside a comment, find the end marker."""
while lineix < len(lines):
if lines[lineix].strip().endswith('*/'):
return lineix
lineix += 1
return len(lines) | [
"def",
"FindNextMultiLineCommentEnd",
"(",
"lines",
",",
"lineix",
")",
":",
"while",
"lineix",
"<",
"len",
"(",
"lines",
")",
":",
"if",
"lines",
"[",
"lineix",
"]",
".",
"strip",
"(",
")",
".",
"endswith",
"(",
"'*/'",
")",
":",
"return",
"lineix",
... | https://github.com/happynear/caffe-windows/blob/967eedf25009e334b7f6f933bb5e17aaaff5bef6/scripts/cpp_lint.py#L1138-L1144 | |
locusrobotics/robot_navigation | d0ebe153518a827622baf05f8a20508dc05dfe44 | robot_nav_tools/rqt_dwb_plugin/src/rqt_dwb_plugin/multi_topic_view.py | python | MultiTopicView.__init__ | (self, timeline, parent, main_topic, window=0.5) | Constructor.
main_topic provides the topic of the messages to focus on.
window provides the number of seconds for the surrounding time window. | Constructor. | [
"Constructor",
"."
] | def __init__(self, timeline, parent, main_topic, window=0.5):
"""Constructor.
main_topic provides the topic of the messages to focus on.
window provides the number of seconds for the surrounding time window.
"""
super(MultiTopicView, self).__init__(timeline, parent, main_topic)
self.main_topic = self.topic # Equivalent to main_topic
self.window = rospy.Duration(window)
# confirm extra topics are in the bag
self.extra_topic_callbacks = {}
found_topics = timeline._get_topics()
missing_topics = []
for extra_topic, datatype, callback in self.get_extra_topics():
if extra_topic not in found_topics:
missing_topics.append(extra_topic)
found_datatype = timeline.get_datatype(extra_topic)
if type(datatype) == list:
if found_datatype not in datatype:
rospy.logwarn('The type of extra topic {} ({}) does not match the declared types: {}'.format(
extra_topic, found_datatype, ', '.join(map(str, datatype))))
continue
elif datatype != found_datatype:
rospy.logwarn('The type of extra topic {} ({}) does not match the declared type {}'.format(
extra_topic, found_datatype, datatype))
continue
self.extra_topic_callbacks[extra_topic] = callback
if missing_topics:
rospy.logwarn('The following extra_topics were not found in the bag: ' + ', '.join(missing_topics)) | [
"def",
"__init__",
"(",
"self",
",",
"timeline",
",",
"parent",
",",
"main_topic",
",",
"window",
"=",
"0.5",
")",
":",
"super",
"(",
"MultiTopicView",
",",
"self",
")",
".",
"__init__",
"(",
"timeline",
",",
"parent",
",",
"main_topic",
")",
"self",
"... | https://github.com/locusrobotics/robot_navigation/blob/d0ebe153518a827622baf05f8a20508dc05dfe44/robot_nav_tools/rqt_dwb_plugin/src/rqt_dwb_plugin/multi_topic_view.py#L56-L88 | ||
etotheipi/BitcoinArmory | 2a6fc5355bb0c6fe26e387ccba30a5baafe8cd98 | txjsonrpc/jsonrpc.py | python | addIntrospection | (jsonrpc) | Add Introspection support to an JSONRPC server.
@param jsonrpc: The jsonrpc server to add Introspection support to. | Add Introspection support to an JSONRPC server. | [
"Add",
"Introspection",
"support",
"to",
"an",
"JSONRPC",
"server",
"."
] | def addIntrospection(jsonrpc):
"""
Add Introspection support to an JSONRPC server.
@param jsonrpc: The jsonrpc server to add Introspection support to.
"""
#jsonrpc.putSubHandler('system', Introspection, ('protocol',))
jsonrpc.putSubHandler('system', Introspection(jsonrpc)) | [
"def",
"addIntrospection",
"(",
"jsonrpc",
")",
":",
"#jsonrpc.putSubHandler('system', Introspection, ('protocol',))",
"jsonrpc",
".",
"putSubHandler",
"(",
"'system'",
",",
"Introspection",
"(",
"jsonrpc",
")",
")"
] | https://github.com/etotheipi/BitcoinArmory/blob/2a6fc5355bb0c6fe26e387ccba30a5baafe8cd98/txjsonrpc/jsonrpc.py#L202-L209 | ||
catboost/catboost | 167f64f237114a4d10b2b4ee42adb4569137debe | contrib/python/protobuf/py3/google/protobuf/internal/well_known_types.py | python | Struct.get_or_create_list | (self, key) | return self.fields[key].list_value | Returns a list for this key, creating if it didn't exist already. | Returns a list for this key, creating if it didn't exist already. | [
"Returns",
"a",
"list",
"for",
"this",
"key",
"creating",
"if",
"it",
"didn",
"t",
"exist",
"already",
"."
] | def get_or_create_list(self, key):
"""Returns a list for this key, creating if it didn't exist already."""
if not self.fields[key].HasField('list_value'):
# Clear will mark list_value modified which will indeed create a list.
self.fields[key].list_value.Clear()
return self.fields[key].list_value | [
"def",
"get_or_create_list",
"(",
"self",
",",
"key",
")",
":",
"if",
"not",
"self",
".",
"fields",
"[",
"key",
"]",
".",
"HasField",
"(",
"'list_value'",
")",
":",
"# Clear will mark list_value modified which will indeed create a list.",
"self",
".",
"fields",
"[... | https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/protobuf/py3/google/protobuf/internal/well_known_types.py#L789-L794 | |
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | wx/tools/Editra/src/extern/flatnotebook.py | python | PageContainer.OnLeftDown | (self, event) | Handles the wx.EVT_LEFT_DOWN events for L{PageContainer}. | Handles the wx.EVT_LEFT_DOWN events for L{PageContainer}. | [
"Handles",
"the",
"wx",
".",
"EVT_LEFT_DOWN",
"events",
"for",
"L",
"{",
"PageContainer",
"}",
"."
] | def OnLeftDown(self, event):
""" Handles the wx.EVT_LEFT_DOWN events for L{PageContainer}. """
# Reset buttons status
self._nXButtonStatus = FNB_BTN_NONE
self._nLeftButtonStatus = FNB_BTN_NONE
self._nRightButtonStatus = FNB_BTN_NONE
self._nTabXButtonStatus = FNB_BTN_NONE
self._nArrowDownButtonStatus = FNB_BTN_NONE
self._nLeftClickZone, tabIdx = self.HitTest(event.GetPosition())
if self._nLeftClickZone == FNB_DROP_DOWN_ARROW:
self._nArrowDownButtonStatus = FNB_BTN_PRESSED
self.Refresh()
elif self._nLeftClickZone == FNB_LEFT_ARROW:
self._nLeftButtonStatus = FNB_BTN_PRESSED
self.Refresh()
elif self._nLeftClickZone == FNB_RIGHT_ARROW:
self._nRightButtonStatus = FNB_BTN_PRESSED
self.Refresh()
elif self._nLeftClickZone == FNB_X:
self._nXButtonStatus = FNB_BTN_PRESSED
self.Refresh()
elif self._nLeftClickZone == FNB_TAB_X:
self._nTabXButtonStatus = FNB_BTN_PRESSED
self.Refresh()
elif self._nLeftClickZone == FNB_TAB:
if self._iActivePage != tabIdx:
# In case the tab is disabled, we dont allow to choose it
if len(self._pagesInfoVec) > tabIdx and \
self._pagesInfoVec[tabIdx].GetEnabled():
self.FireEvent(tabIdx) | [
"def",
"OnLeftDown",
"(",
"self",
",",
"event",
")",
":",
"# Reset buttons status",
"self",
".",
"_nXButtonStatus",
"=",
"FNB_BTN_NONE",
"self",
".",
"_nLeftButtonStatus",
"=",
"FNB_BTN_NONE",
"self",
".",
"_nRightButtonStatus",
"=",
"FNB_BTN_NONE",
"self",
".",
"... | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/wx/tools/Editra/src/extern/flatnotebook.py#L3983-L4018 | ||
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/gtk/_controls.py | python | SearchCtrl.ShowSearchButton | (*args, **kwargs) | return _controls_.SearchCtrl_ShowSearchButton(*args, **kwargs) | ShowSearchButton(self, bool show)
Sets the search button visibility value on the search control. If
there is a menu attached, the search button will be visible regardless
of the search button visibility value. This has no effect in Mac OS X
v10.3 | ShowSearchButton(self, bool show) | [
"ShowSearchButton",
"(",
"self",
"bool",
"show",
")"
] | def ShowSearchButton(*args, **kwargs):
"""
ShowSearchButton(self, bool show)
Sets the search button visibility value on the search control. If
there is a menu attached, the search button will be visible regardless
of the search button visibility value. This has no effect in Mac OS X
v10.3
"""
return _controls_.SearchCtrl_ShowSearchButton(*args, **kwargs) | [
"def",
"ShowSearchButton",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_controls_",
".",
"SearchCtrl_ShowSearchButton",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/gtk/_controls.py#L7425-L7434 | |
apache/incubator-mxnet | f03fb23f1d103fec9541b5ae59ee06b1734a51d9 | example/gluon/super_resolution/super_resolution.py | python | get_dataset | (prefetch=False) | return [PrefetchingIter(i) for i in iters] if prefetch else iters | Download the BSDS500 dataset and return train and test iters. | Download the BSDS500 dataset and return train and test iters. | [
"Download",
"the",
"BSDS500",
"dataset",
"and",
"return",
"train",
"and",
"test",
"iters",
"."
] | def get_dataset(prefetch=False):
"""Download the BSDS500 dataset and return train and test iters."""
if path.exists(data_dir):
print(
"Directory {} already exists, skipping.\n"
"To force download and extraction, delete the directory and re-run."
"".format(data_dir),
file=sys.stderr,
)
else:
print("Downloading dataset...", file=sys.stderr)
downloaded_file = download(dataset_url, dirname=datasets_tmpdir)
print("done", file=sys.stderr)
print("Extracting files...", end="", file=sys.stderr)
os.makedirs(data_dir)
os.makedirs(tmp_dir)
with zipfile.ZipFile(downloaded_file) as archive:
archive.extractall(tmp_dir)
shutil.rmtree(datasets_tmpdir)
shutil.copytree(
path.join(tmp_dir, "BSDS500-master", "BSDS500", "data", "images"),
path.join(data_dir, "images"),
)
shutil.copytree(
path.join(tmp_dir, "BSDS500-master", "BSDS500", "data", "groundTruth"),
path.join(data_dir, "groundTruth"),
)
shutil.rmtree(tmp_dir)
print("done", file=sys.stderr)
crop_size = 256
crop_size -= crop_size % upscale_factor
input_crop_size = crop_size // upscale_factor
input_transform = [CenterCropAug((crop_size, crop_size)), ResizeAug(input_crop_size)]
target_transform = [CenterCropAug((crop_size, crop_size))]
iters = (
ImagePairIter(
path.join(data_dir, "images", "train"),
(input_crop_size, input_crop_size),
(crop_size, crop_size),
batch_size,
color_flag,
input_transform,
target_transform,
),
ImagePairIter(
path.join(data_dir, "images", "test"),
(input_crop_size, input_crop_size),
(crop_size, crop_size),
test_batch_size,
color_flag,
input_transform,
target_transform,
),
)
return [PrefetchingIter(i) for i in iters] if prefetch else iters | [
"def",
"get_dataset",
"(",
"prefetch",
"=",
"False",
")",
":",
"if",
"path",
".",
"exists",
"(",
"data_dir",
")",
":",
"print",
"(",
"\"Directory {} already exists, skipping.\\n\"",
"\"To force download and extraction, delete the directory and re-run.\"",
"\"\"",
".",
"fo... | https://github.com/apache/incubator-mxnet/blob/f03fb23f1d103fec9541b5ae59ee06b1734a51d9/example/gluon/super_resolution/super_resolution.py#L70-L131 | |
wlanjie/AndroidFFmpeg | 7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf | tools/fdk-aac-build/armeabi/toolchain/lib/python2.7/xml/sax/handler.py | python | ErrorHandler.warning | (self, exception) | Handle a warning. | Handle a warning. | [
"Handle",
"a",
"warning",
"."
] | def warning(self, exception):
"Handle a warning."
print exception | [
"def",
"warning",
"(",
"self",
",",
"exception",
")",
":",
"print",
"exception"
] | https://github.com/wlanjie/AndroidFFmpeg/blob/7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf/tools/fdk-aac-build/armeabi/toolchain/lib/python2.7/xml/sax/handler.py#L40-L42 | ||
BlzFans/wke | b0fa21158312e40c5fbd84682d643022b6c34a93 | cygwin/lib/python2.6/webbrowser.py | python | _iscommand | (cmd) | return False | Return True if cmd is executable or can be found on the executable
search path. | Return True if cmd is executable or can be found on the executable
search path. | [
"Return",
"True",
"if",
"cmd",
"is",
"executable",
"or",
"can",
"be",
"found",
"on",
"the",
"executable",
"search",
"path",
"."
] | def _iscommand(cmd):
"""Return True if cmd is executable or can be found on the executable
search path."""
if _isexecutable(cmd):
return True
path = os.environ.get("PATH")
if not path:
return False
for d in path.split(os.pathsep):
exe = os.path.join(d, cmd)
if _isexecutable(exe):
return True
return False | [
"def",
"_iscommand",
"(",
"cmd",
")",
":",
"if",
"_isexecutable",
"(",
"cmd",
")",
":",
"return",
"True",
"path",
"=",
"os",
".",
"environ",
".",
"get",
"(",
"\"PATH\"",
")",
"if",
"not",
"path",
":",
"return",
"False",
"for",
"d",
"in",
"path",
".... | https://github.com/BlzFans/wke/blob/b0fa21158312e40c5fbd84682d643022b6c34a93/cygwin/lib/python2.6/webbrowser.py#L121-L133 | |
moflow/moflow | 2dfb27c799c90c6caf1477508eca3eec616ef7d2 | bap/libtracewrap/libtrace/protobuf/python/google/protobuf/message.py | python | Message.SetInParent | (self) | Mark this as present in the parent.
This normally happens automatically when you assign a field of a
sub-message, but sometimes you want to make the sub-message
present while keeping it empty. If you find yourself using this,
you may want to reconsider your design. | Mark this as present in the parent. | [
"Mark",
"this",
"as",
"present",
"in",
"the",
"parent",
"."
] | def SetInParent(self):
"""Mark this as present in the parent.
This normally happens automatically when you assign a field of a
sub-message, but sometimes you want to make the sub-message
present while keeping it empty. If you find yourself using this,
you may want to reconsider your design."""
raise NotImplementedError | [
"def",
"SetInParent",
"(",
"self",
")",
":",
"raise",
"NotImplementedError"
] | https://github.com/moflow/moflow/blob/2dfb27c799c90c6caf1477508eca3eec616ef7d2/bap/libtracewrap/libtrace/protobuf/python/google/protobuf/message.py#L125-L132 | ||
SoarGroup/Soar | a1c5e249499137a27da60533c72969eef3b8ab6b | scons/scons-local-4.1.0/SCons/Tool/msvs.py | python | msvs_parse_version | (s) | return float(num), suite | Split a Visual Studio version, which may in fact be something like
'7.0Exp', into is version number (returned as a float) and trailing
"suite" portion. | Split a Visual Studio version, which may in fact be something like
'7.0Exp', into is version number (returned as a float) and trailing
"suite" portion. | [
"Split",
"a",
"Visual",
"Studio",
"version",
"which",
"may",
"in",
"fact",
"be",
"something",
"like",
"7",
".",
"0Exp",
"into",
"is",
"version",
"number",
"(",
"returned",
"as",
"a",
"float",
")",
"and",
"trailing",
"suite",
"portion",
"."
] | def msvs_parse_version(s):
"""
Split a Visual Studio version, which may in fact be something like
'7.0Exp', into is version number (returned as a float) and trailing
"suite" portion.
"""
num, suite = version_re.match(s).groups()
return float(num), suite | [
"def",
"msvs_parse_version",
"(",
"s",
")",
":",
"num",
",",
"suite",
"=",
"version_re",
".",
"match",
"(",
"s",
")",
".",
"groups",
"(",
")",
"return",
"float",
"(",
"num",
")",
",",
"suite"
] | https://github.com/SoarGroup/Soar/blob/a1c5e249499137a27da60533c72969eef3b8ab6b/scons/scons-local-4.1.0/SCons/Tool/msvs.py#L131-L138 | |
KratosMultiphysics/Kratos | 0000833054ed0503424eb28205d6508d9ca6cbbc | applications/ContactStructuralMechanicsApplication/python_scripts/custom_sympy_fe_utilities.py | python | DefineShapeFunctions | (nnodes, dim, impose_partion_of_unity = False) | return sympy_fe_utilities.DefineShapeFunctions(nnodes, dim, impose_partion_of_unity) | This method defines shape functions and derivatives
Note that partition of unity is imposed
the name HAS TO BE --> N and DN
Keyword arguments:
nnodes -- Number of nodes
dim -- Dimension of the space
impose_partion_of_unity -- Impose the partition of unity | This method defines shape functions and derivatives
Note that partition of unity is imposed
the name HAS TO BE --> N and DN | [
"This",
"method",
"defines",
"shape",
"functions",
"and",
"derivatives",
"Note",
"that",
"partition",
"of",
"unity",
"is",
"imposed",
"the",
"name",
"HAS",
"TO",
"BE",
"--",
">",
"N",
"and",
"DN"
] | def DefineShapeFunctions(nnodes, dim, impose_partion_of_unity = False):
""" This method defines shape functions and derivatives
Note that partition of unity is imposed
the name HAS TO BE --> N and DN
Keyword arguments:
nnodes -- Number of nodes
dim -- Dimension of the space
impose_partion_of_unity -- Impose the partition of unity
"""
return sympy_fe_utilities.DefineShapeFunctions(nnodes, dim, impose_partion_of_unity) | [
"def",
"DefineShapeFunctions",
"(",
"nnodes",
",",
"dim",
",",
"impose_partion_of_unity",
"=",
"False",
")",
":",
"return",
"sympy_fe_utilities",
".",
"DefineShapeFunctions",
"(",
"nnodes",
",",
"dim",
",",
"impose_partion_of_unity",
")"
] | https://github.com/KratosMultiphysics/Kratos/blob/0000833054ed0503424eb28205d6508d9ca6cbbc/applications/ContactStructuralMechanicsApplication/python_scripts/custom_sympy_fe_utilities.py#L72-L82 | |
turi-code/SFrame | 796b9bdfb2fa1b881d82080754643c7e68629cd2 | oss_src/unity/python/sframe/data_structures/sarray.py | python | SArray.rolling_count | (self, window_start, window_end) | return SArray(_proxy=self.__proxy__.builtin_rolling_apply(agg_op, window_start, window_end, 0)) | Count the number of non-NULL values of different subsets over this
SArray.
The subset that the count is excecuted on is defined as an inclusive
range relative to the position to each value in the SArray, using
`window_start` and `window_end`. For a better understanding of this,
see the examples below.
Parameters
----------
window_start : int
The start of the subset to count relative to the current value.
window_end : int
The end of the subset to count relative to the current value. Must
be greater than `window_start`.
Returns
-------
out : SArray
Examples
--------
>>> import pandas
>>> sa = SArray([1,2,3,None,5])
>>> series = pandas.Series([1,2,3,None,5])
A rolling count with a window including the previous 2 entries including
the current:
>>> sa.rolling_count(-2,0)
dtype: int
Rows: 5
[1, 2, 3, 2, 2]
Pandas equivalent:
>>> pandas.rolling_count(series, 3)
0 1
1 2
2 3
3 2
4 2
dtype: float64
A rolling count with a size of 3, centered around the current:
>>> sa.rolling_count(-1,1)
dtype: int
Rows: 5
[2, 3, 2, 2, 1]
Pandas equivalent:
>>> pandas.rolling_count(series, 3, center=True)
0 2
1 3
2 2
3 2
4 1
dtype: float64
A rolling count with a window including the current and the 2 entries
following:
>>> sa.rolling_count(0,2)
dtype: int
Rows: 5
[3, 2, 2, 1, 1]
A rolling count with a window including the previous 2 entries NOT
including the current:
>>> sa.rolling_count(-2,-1)
dtype: int
Rows: 5
[0, 1, 2, 2, 1] | Count the number of non-NULL values of different subsets over this
SArray. | [
"Count",
"the",
"number",
"of",
"non",
"-",
"NULL",
"values",
"of",
"different",
"subsets",
"over",
"this",
"SArray",
"."
] | def rolling_count(self, window_start, window_end):
"""
Count the number of non-NULL values of different subsets over this
SArray.
The subset that the count is excecuted on is defined as an inclusive
range relative to the position to each value in the SArray, using
`window_start` and `window_end`. For a better understanding of this,
see the examples below.
Parameters
----------
window_start : int
The start of the subset to count relative to the current value.
window_end : int
The end of the subset to count relative to the current value. Must
be greater than `window_start`.
Returns
-------
out : SArray
Examples
--------
>>> import pandas
>>> sa = SArray([1,2,3,None,5])
>>> series = pandas.Series([1,2,3,None,5])
A rolling count with a window including the previous 2 entries including
the current:
>>> sa.rolling_count(-2,0)
dtype: int
Rows: 5
[1, 2, 3, 2, 2]
Pandas equivalent:
>>> pandas.rolling_count(series, 3)
0 1
1 2
2 3
3 2
4 2
dtype: float64
A rolling count with a size of 3, centered around the current:
>>> sa.rolling_count(-1,1)
dtype: int
Rows: 5
[2, 3, 2, 2, 1]
Pandas equivalent:
>>> pandas.rolling_count(series, 3, center=True)
0 2
1 3
2 2
3 2
4 1
dtype: float64
A rolling count with a window including the current and the 2 entries
following:
>>> sa.rolling_count(0,2)
dtype: int
Rows: 5
[3, 2, 2, 1, 1]
A rolling count with a window including the previous 2 entries NOT
including the current:
>>> sa.rolling_count(-2,-1)
dtype: int
Rows: 5
[0, 1, 2, 2, 1]
"""
agg_op = '__builtin__nonnull__count__'
return SArray(_proxy=self.__proxy__.builtin_rolling_apply(agg_op, window_start, window_end, 0)) | [
"def",
"rolling_count",
"(",
"self",
",",
"window_start",
",",
"window_end",
")",
":",
"agg_op",
"=",
"'__builtin__nonnull__count__'",
"return",
"SArray",
"(",
"_proxy",
"=",
"self",
".",
"__proxy__",
".",
"builtin_rolling_apply",
"(",
"agg_op",
",",
"window_start... | https://github.com/turi-code/SFrame/blob/796b9bdfb2fa1b881d82080754643c7e68629cd2/oss_src/unity/python/sframe/data_structures/sarray.py#L3916-L3991 | |
benoitsteiner/tensorflow-opencl | cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5 | tensorflow/python/eager/backprop.py | python | _magic_gradient_function | (op_name, attr_tuple, num_inputs,
inputs, outputs, out_grads) | return grad_fn(mock_op, *out_grads) | Calls the gradient function of the op.
Args:
op_name: the name of the op to be differentiated.
attr_tuple: the attrs, as a tuple.
num_inputs: the number of inputs to the op.
inputs: inputs to the original operation.
outputs: outputs to the original operation.
out_grads: gradients of the operation wrt its outputs.
Returns:
The gradients with respect to the inputs of the function, as a list. | Calls the gradient function of the op. | [
"Calls",
"the",
"gradient",
"function",
"of",
"the",
"op",
"."
] | def _magic_gradient_function(op_name, attr_tuple, num_inputs,
inputs, outputs, out_grads):
"""Calls the gradient function of the op.
Args:
op_name: the name of the op to be differentiated.
attr_tuple: the attrs, as a tuple.
num_inputs: the number of inputs to the op.
inputs: inputs to the original operation.
outputs: outputs to the original operation.
out_grads: gradients of the operation wrt its outputs.
Returns:
The gradients with respect to the inputs of the function, as a list.
"""
mock_op = _MockOp(attr_tuple, inputs, outputs, op_name)
grad_fn = ops._gradient_registry.lookup(op_name) # pylint: disable=protected-access
if grad_fn is None:
return [None] * num_inputs
return grad_fn(mock_op, *out_grads) | [
"def",
"_magic_gradient_function",
"(",
"op_name",
",",
"attr_tuple",
",",
"num_inputs",
",",
"inputs",
",",
"outputs",
",",
"out_grads",
")",
":",
"mock_op",
"=",
"_MockOp",
"(",
"attr_tuple",
",",
"inputs",
",",
"outputs",
",",
"op_name",
")",
"grad_fn",
"... | https://github.com/benoitsteiner/tensorflow-opencl/blob/cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5/tensorflow/python/eager/backprop.py#L88-L108 | |
apple/turicreate | cce55aa5311300e3ce6af93cb45ba791fd1bdf49 | src/external/coremltools_wrap/coremltools/coremltools/models/neural_network/quantization_utils.py | python | AdvancedQuantizedLayerSelector.do_quantize | (self, layer, weight_param=None) | return True | weight_param - should be name of the WeightParam field | weight_param - should be name of the WeightParam field | [
"weight_param",
"-",
"should",
"be",
"name",
"of",
"the",
"WeightParam",
"field"
] | def do_quantize(self, layer, weight_param=None):
""" weight_param - should be name of the WeightParam field
"""
ret = super(AdvancedQuantizedLayerSelector, self).do_quantize(layer)
if not ret:
return False
layer_type = layer.WhichOneof("layer")
if layer_type in self.skip_layer_types:
return False
if layer_type == "convolution":
oc = layer.convolution.outputChannels
kc = layer.convolution.kernelChannels
kh = layer.convolution.kernelSize[0]
kw = layer.convolution.kernelSize[1]
groups = layer.convolution.nGroups
counts = oc * kc * kh * kw
has_bias = layer.convolution.hasBias
if weight_param is None or weight_param == "weights":
if "depthwiseConv" in self.skip_layer_types and kc == 1 and groups > 1:
return False
if (
kc < self.minimum_conv_kernel_channels
or counts < self.minimum_conv_weight_count
):
return False
elif weight_param == "bias":
return not "bias" in self.skip_layer_types
else:
raise ValueError(
"Unrecognized quantization weight field {}".format(weight_param)
)
elif layer_type == "innerProduct" or "batchedMatmul":
if weight_param is None or weight_param == "weights":
return True
if weight_param == "bias":
return not "bias" in self.skip_layer_types
else:
raise ValueError(
"Unrecognized quantization weight field {}".format(weight_param)
)
return True | [
"def",
"do_quantize",
"(",
"self",
",",
"layer",
",",
"weight_param",
"=",
"None",
")",
":",
"ret",
"=",
"super",
"(",
"AdvancedQuantizedLayerSelector",
",",
"self",
")",
".",
"do_quantize",
"(",
"layer",
")",
"if",
"not",
"ret",
":",
"return",
"False",
... | https://github.com/apple/turicreate/blob/cce55aa5311300e3ce6af93cb45ba791fd1bdf49/src/external/coremltools_wrap/coremltools/coremltools/models/neural_network/quantization_utils.py#L135-L182 | |
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/osx_cocoa/stc.py | python | StyledTextCtrl.SetTargetStart | (*args, **kwargs) | return _stc.StyledTextCtrl_SetTargetStart(*args, **kwargs) | SetTargetStart(self, int pos)
Sets the position that starts the target which is used for updating the
document without affecting the scroll position. | SetTargetStart(self, int pos) | [
"SetTargetStart",
"(",
"self",
"int",
"pos",
")"
] | def SetTargetStart(*args, **kwargs):
"""
SetTargetStart(self, int pos)
Sets the position that starts the target which is used for updating the
document without affecting the scroll position.
"""
return _stc.StyledTextCtrl_SetTargetStart(*args, **kwargs) | [
"def",
"SetTargetStart",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_stc",
".",
"StyledTextCtrl_SetTargetStart",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_cocoa/stc.py#L3713-L3720 | |
benoitsteiner/tensorflow-opencl | cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5 | tensorflow/contrib/gan/python/train.py | python | _get_update_ops | (kwargs, gen_scope, dis_scope, check_for_unused_ops=True) | return gen_update_ops, dis_update_ops | Gets generator and discriminator update ops.
Args:
kwargs: A dictionary of kwargs to be passed to `create_train_op`.
`update_ops` is removed, if present.
gen_scope: A scope for the generator.
dis_scope: A scope for the discriminator.
check_for_unused_ops: A Python bool. If `True`, throw Exception if there are
unused update ops.
Returns:
A 2-tuple of (generator update ops, discriminator train ops).
Raises:
ValueError: If there are update ops outside of the generator or
discriminator scopes. | Gets generator and discriminator update ops. | [
"Gets",
"generator",
"and",
"discriminator",
"update",
"ops",
"."
] | def _get_update_ops(kwargs, gen_scope, dis_scope, check_for_unused_ops=True):
"""Gets generator and discriminator update ops.
Args:
kwargs: A dictionary of kwargs to be passed to `create_train_op`.
`update_ops` is removed, if present.
gen_scope: A scope for the generator.
dis_scope: A scope for the discriminator.
check_for_unused_ops: A Python bool. If `True`, throw Exception if there are
unused update ops.
Returns:
A 2-tuple of (generator update ops, discriminator train ops).
Raises:
ValueError: If there are update ops outside of the generator or
discriminator scopes.
"""
if 'update_ops' in kwargs:
update_ops = set(kwargs['update_ops'])
del kwargs['update_ops']
else:
update_ops = set(ops.get_collection(ops.GraphKeys.UPDATE_OPS))
all_gen_ops = set(ops.get_collection(ops.GraphKeys.UPDATE_OPS, gen_scope))
all_dis_ops = set(ops.get_collection(ops.GraphKeys.UPDATE_OPS, dis_scope))
if check_for_unused_ops:
unused_ops = update_ops - all_gen_ops - all_dis_ops
if unused_ops:
raise ValueError('There are unused update ops: %s' % unused_ops)
gen_update_ops = list(all_gen_ops & update_ops)
dis_update_ops = list(all_dis_ops & update_ops)
return gen_update_ops, dis_update_ops | [
"def",
"_get_update_ops",
"(",
"kwargs",
",",
"gen_scope",
",",
"dis_scope",
",",
"check_for_unused_ops",
"=",
"True",
")",
":",
"if",
"'update_ops'",
"in",
"kwargs",
":",
"update_ops",
"=",
"set",
"(",
"kwargs",
"[",
"'update_ops'",
"]",
")",
"del",
"kwargs... | https://github.com/benoitsteiner/tensorflow-opencl/blob/cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5/tensorflow/contrib/gan/python/train.py#L438-L473 | |
may0324/DeepCompression-caffe | 0aff6c1287bda4cfc7f378ed8a16524e1afabd8c | scripts/cpp_lint.py | python | ProcessLine | (filename, file_extension, clean_lines, line,
include_state, function_state, nesting_state, error,
extra_check_functions=[]) | Processes a single line in the file.
Args:
filename: Filename of the file that is being processed.
file_extension: The extension (dot not included) of the file.
clean_lines: An array of strings, each representing a line of the file,
with comments stripped.
line: Number of line being processed.
include_state: An _IncludeState instance in which the headers are inserted.
function_state: A _FunctionState instance which counts function lines, etc.
nesting_state: A _NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: A callable to which errors are reported, which takes 4 arguments:
filename, line number, error level, and message
extra_check_functions: An array of additional check functions that will be
run on each source line. Each function takes 4
arguments: filename, clean_lines, line, error | Processes a single line in the file. | [
"Processes",
"a",
"single",
"line",
"in",
"the",
"file",
"."
] | def ProcessLine(filename, file_extension, clean_lines, line,
include_state, function_state, nesting_state, error,
extra_check_functions=[]):
"""Processes a single line in the file.
Args:
filename: Filename of the file that is being processed.
file_extension: The extension (dot not included) of the file.
clean_lines: An array of strings, each representing a line of the file,
with comments stripped.
line: Number of line being processed.
include_state: An _IncludeState instance in which the headers are inserted.
function_state: A _FunctionState instance which counts function lines, etc.
nesting_state: A _NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: A callable to which errors are reported, which takes 4 arguments:
filename, line number, error level, and message
extra_check_functions: An array of additional check functions that will be
run on each source line. Each function takes 4
arguments: filename, clean_lines, line, error
"""
raw_lines = clean_lines.raw_lines
ParseNolintSuppressions(filename, raw_lines[line], line, error)
nesting_state.Update(filename, clean_lines, line, error)
if nesting_state.stack and nesting_state.stack[-1].inline_asm != _NO_ASM:
return
CheckForFunctionLengths(filename, clean_lines, line, function_state, error)
CheckForMultilineCommentsAndStrings(filename, clean_lines, line, error)
CheckStyle(filename, clean_lines, line, file_extension, nesting_state, error)
CheckLanguage(filename, clean_lines, line, file_extension, include_state,
nesting_state, error)
CheckForNonConstReference(filename, clean_lines, line, nesting_state, error)
CheckForNonStandardConstructs(filename, clean_lines, line,
nesting_state, error)
CheckVlogArguments(filename, clean_lines, line, error)
CheckCaffeAlternatives(filename, clean_lines, line, error)
CheckCaffeDataLayerSetUp(filename, clean_lines, line, error)
CheckCaffeRandom(filename, clean_lines, line, error)
CheckPosixThreading(filename, clean_lines, line, error)
CheckInvalidIncrement(filename, clean_lines, line, error)
CheckMakePairUsesDeduction(filename, clean_lines, line, error)
for check_fn in extra_check_functions:
check_fn(filename, clean_lines, line, error) | [
"def",
"ProcessLine",
"(",
"filename",
",",
"file_extension",
",",
"clean_lines",
",",
"line",
",",
"include_state",
",",
"function_state",
",",
"nesting_state",
",",
"error",
",",
"extra_check_functions",
"=",
"[",
"]",
")",
":",
"raw_lines",
"=",
"clean_lines"... | https://github.com/may0324/DeepCompression-caffe/blob/0aff6c1287bda4cfc7f378ed8a16524e1afabd8c/scripts/cpp_lint.py#L4600-L4642 | ||
weolar/miniblink49 | 1c4678db0594a4abde23d3ebbcc7cd13c3170777 | third_party/WebKit/Tools/Scripts/webkitpy/thirdparty/irc/irclib.py | python | ServerConnection.ctcp | (self, ctcptype, target, parameter="") | Send a CTCP command. | Send a CTCP command. | [
"Send",
"a",
"CTCP",
"command",
"."
] | def ctcp(self, ctcptype, target, parameter=""):
"""Send a CTCP command."""
ctcptype = ctcptype.upper()
self.privmsg(target, "\001%s%s\001" % (ctcptype, parameter and (" " + parameter) or "")) | [
"def",
"ctcp",
"(",
"self",
",",
"ctcptype",
",",
"target",
",",
"parameter",
"=",
"\"\"",
")",
":",
"ctcptype",
"=",
"ctcptype",
".",
"upper",
"(",
")",
"self",
".",
"privmsg",
"(",
"target",
",",
"\"\\001%s%s\\001\"",
"%",
"(",
"ctcptype",
",",
"para... | https://github.com/weolar/miniblink49/blob/1c4678db0594a4abde23d3ebbcc7cd13c3170777/third_party/WebKit/Tools/Scripts/webkitpy/thirdparty/irc/irclib.py#L638-L641 | ||
alibaba/weex_js_engine | 2bdf4b6f020c1fc99c63f649718f6faf7e27fdde | jni/v8core/v8/build/gyp/pylib/gyp/generator/msvs.py | python | GenerateOutput | (target_list, target_dicts, data, params) | Generate .sln and .vcproj files.
This is the entry point for this generator.
Arguments:
target_list: List of target pairs: 'base/base.gyp:base'.
target_dicts: Dict of target properties keyed on target pair.
data: Dictionary containing per .gyp data. | Generate .sln and .vcproj files. | [
"Generate",
".",
"sln",
"and",
".",
"vcproj",
"files",
"."
] | def GenerateOutput(target_list, target_dicts, data, params):
"""Generate .sln and .vcproj files.
This is the entry point for this generator.
Arguments:
target_list: List of target pairs: 'base/base.gyp:base'.
target_dicts: Dict of target properties keyed on target pair.
data: Dictionary containing per .gyp data.
"""
global fixpath_prefix
options = params['options']
# Get the project file format version back out of where we stashed it in
# GeneratorCalculatedVariables.
msvs_version = params['msvs_version']
generator_flags = params.get('generator_flags', {})
# Optionally shard targets marked with 'msvs_shard': SHARD_COUNT.
(target_list, target_dicts) = _ShardTargets(target_list, target_dicts)
# Prepare the set of configurations.
configs = set()
for qualified_target in target_list:
spec = target_dicts[qualified_target]
for config_name, config in spec['configurations'].iteritems():
configs.add(_ConfigFullName(config_name, config))
configs = list(configs)
# Figure out all the projects that will be generated and their guids
project_objects = _CreateProjectObjects(target_list, target_dicts, options,
msvs_version)
# Generate each project.
missing_sources = []
for project in project_objects.values():
fixpath_prefix = project.fixpath_prefix
missing_sources.extend(_GenerateProject(project, options, msvs_version,
generator_flags))
fixpath_prefix = None
for build_file in data:
# Validate build_file extension
if not build_file.endswith('.gyp'):
continue
sln_path = os.path.splitext(build_file)[0] + options.suffix + '.sln'
if options.generator_output:
sln_path = os.path.join(options.generator_output, sln_path)
# Get projects in the solution, and their dependents.
sln_projects = gyp.common.BuildFileTargets(target_list, build_file)
sln_projects += gyp.common.DeepDependencyTargets(target_dicts, sln_projects)
# Create folder hierarchy.
root_entries = _GatherSolutionFolders(
sln_projects, project_objects, flat=msvs_version.FlatSolution())
# Create solution.
sln = MSVSNew.MSVSSolution(sln_path,
entries=root_entries,
variants=configs,
websiteProperties=False,
version=msvs_version)
sln.Write()
if missing_sources:
error_message = "Missing input files:\n" + \
'\n'.join(set(missing_sources))
if generator_flags.get('msvs_error_on_missing_sources', False):
raise Exception(error_message)
else:
print >>sys.stdout, "Warning: " + error_message | [
"def",
"GenerateOutput",
"(",
"target_list",
",",
"target_dicts",
",",
"data",
",",
"params",
")",
":",
"global",
"fixpath_prefix",
"options",
"=",
"params",
"[",
"'options'",
"]",
"# Get the project file format version back out of where we stashed it in",
"# GeneratorCalcu... | https://github.com/alibaba/weex_js_engine/blob/2bdf4b6f020c1fc99c63f649718f6faf7e27fdde/jni/v8core/v8/build/gyp/pylib/gyp/generator/msvs.py#L1804-L1873 | ||
Xilinx/Vitis-AI | fc74d404563d9951b57245443c73bef389f3657f | tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/python/ops/metrics_impl.py | python | precision_at_top_k | (labels,
predictions_idx,
k=None,
class_id=None,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None) | Computes precision@k of the predictions with respect to sparse labels.
Differs from `sparse_precision_at_k` in that predictions must be in the form
of top `k` class indices, whereas `sparse_precision_at_k` expects logits.
Refer to `sparse_precision_at_k` for more details.
Args:
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels] or [D1, ... DN], where the latter implies
num_labels=1. N >= 1 and num_labels is the number of target classes for
the associated prediction. Commonly, N=1 and `labels` has shape
[batch_size, num_labels]. [D1, ... DN] must match `predictions`. Values
should be in range [0, num_classes), where num_classes is the last
dimension of `predictions`. Values outside this range are ignored.
predictions_idx: Integer `Tensor` with shape [D1, ... DN, k] where
N >= 1. Commonly, N=1 and predictions has shape [batch size, k].
The final dimension contains the top `k` predicted class indices.
[D1, ... DN] must match `labels`.
k: Integer, k for @k metric. Only used for the default op name.
class_id: Integer class ID for which we want binary metrics. This should be
in range [0, num_classes], where num_classes is the last dimension of
`predictions`. If `class_id` is outside this range, the method returns
NAN.
weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of
`labels`. If the latter, it must be broadcastable to `labels` (i.e., all
dimensions must be either `1`, or the same as the corresponding `labels`
dimension).
metrics_collections: An optional list of collections that values should
be added to.
updates_collections: An optional list of collections that updates should
be added to.
name: Name of new update operation, and namespace for other dependent ops.
Returns:
precision: Scalar `float64` `Tensor` with the value of `true_positives`
divided by the sum of `true_positives` and `false_positives`.
update_op: `Operation` that increments `true_positives` and
`false_positives` variables appropriately, and whose value matches
`precision`.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match
`predictions`, or if either `metrics_collections` or `updates_collections`
are not a list or tuple.
RuntimeError: If eager execution is enabled. | Computes precision@k of the predictions with respect to sparse labels. | [
"Computes",
"precision@k",
"of",
"the",
"predictions",
"with",
"respect",
"to",
"sparse",
"labels",
"."
] | def precision_at_top_k(labels,
predictions_idx,
k=None,
class_id=None,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes precision@k of the predictions with respect to sparse labels.
Differs from `sparse_precision_at_k` in that predictions must be in the form
of top `k` class indices, whereas `sparse_precision_at_k` expects logits.
Refer to `sparse_precision_at_k` for more details.
Args:
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels] or [D1, ... DN], where the latter implies
num_labels=1. N >= 1 and num_labels is the number of target classes for
the associated prediction. Commonly, N=1 and `labels` has shape
[batch_size, num_labels]. [D1, ... DN] must match `predictions`. Values
should be in range [0, num_classes), where num_classes is the last
dimension of `predictions`. Values outside this range are ignored.
predictions_idx: Integer `Tensor` with shape [D1, ... DN, k] where
N >= 1. Commonly, N=1 and predictions has shape [batch size, k].
The final dimension contains the top `k` predicted class indices.
[D1, ... DN] must match `labels`.
k: Integer, k for @k metric. Only used for the default op name.
class_id: Integer class ID for which we want binary metrics. This should be
in range [0, num_classes], where num_classes is the last dimension of
`predictions`. If `class_id` is outside this range, the method returns
NAN.
weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of
`labels`. If the latter, it must be broadcastable to `labels` (i.e., all
dimensions must be either `1`, or the same as the corresponding `labels`
dimension).
metrics_collections: An optional list of collections that values should
be added to.
updates_collections: An optional list of collections that updates should
be added to.
name: Name of new update operation, and namespace for other dependent ops.
Returns:
precision: Scalar `float64` `Tensor` with the value of `true_positives`
divided by the sum of `true_positives` and `false_positives`.
update_op: `Operation` that increments `true_positives` and
`false_positives` variables appropriately, and whose value matches
`precision`.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match
`predictions`, or if either `metrics_collections` or `updates_collections`
are not a list or tuple.
RuntimeError: If eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError('tf.metrics.precision_at_top_k is not '
'supported when eager execution is enabled.')
with ops.name_scope(name, _at_k_name('precision', k, class_id=class_id),
(predictions_idx, labels, weights)) as scope:
labels = _maybe_expand_labels(labels, predictions_idx)
top_k_idx = math_ops.cast(predictions_idx, dtypes.int64)
tp, tp_update = _streaming_sparse_true_positive_at_k(
predictions_idx=top_k_idx,
labels=labels,
k=k,
class_id=class_id,
weights=weights)
fp, fp_update = _streaming_sparse_false_positive_at_k(
predictions_idx=top_k_idx,
labels=labels,
k=k,
class_id=class_id,
weights=weights)
def precision_across_replicas(_, tp, fp):
return math_ops.div(tp, math_ops.add(tp, fp), name=scope)
metric = _aggregate_across_replicas(
metrics_collections, precision_across_replicas, tp, fp)
update = math_ops.div(
tp_update, math_ops.add(tp_update, fp_update), name='update')
if updates_collections:
ops.add_to_collections(updates_collections, update)
return metric, update | [
"def",
"precision_at_top_k",
"(",
"labels",
",",
"predictions_idx",
",",
"k",
"=",
"None",
",",
"class_id",
"=",
"None",
",",
"weights",
"=",
"None",
",",
"metrics_collections",
"=",
"None",
",",
"updates_collections",
"=",
"None",
",",
"name",
"=",
"None",
... | https://github.com/Xilinx/Vitis-AI/blob/fc74d404563d9951b57245443c73bef389f3657f/tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/python/ops/metrics_impl.py#L3418-L3503 | ||
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/osx_carbon/_core.py | python | Sizer.Add | (*args, **kwargs) | return _core_.Sizer_Add(*args, **kwargs) | Add(self, item, int proportion=0, int flag=0, int border=0,
PyObject userData=None) -> wx.SizerItem
Appends a child item to the sizer. | Add(self, item, int proportion=0, int flag=0, int border=0,
PyObject userData=None) -> wx.SizerItem | [
"Add",
"(",
"self",
"item",
"int",
"proportion",
"=",
"0",
"int",
"flag",
"=",
"0",
"int",
"border",
"=",
"0",
"PyObject",
"userData",
"=",
"None",
")",
"-",
">",
"wx",
".",
"SizerItem"
] | def Add(*args, **kwargs):
"""
Add(self, item, int proportion=0, int flag=0, int border=0,
PyObject userData=None) -> wx.SizerItem
Appends a child item to the sizer.
"""
return _core_.Sizer_Add(*args, **kwargs) | [
"def",
"Add",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_core_",
".",
"Sizer_Add",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_carbon/_core.py#L14454-L14461 | |
adobe/chromium | cfe5bf0b51b1f6b9fe239c2a3c2f2364da9967d7 | ppapi/generators/idl_parser.py | python | IDLParser.p_top_list | (self, p) | top_list : callback_decl top_list
| describe_block top_list
| dictionary_block top_list
| enum_block top_list
| inline top_list
| interface_block top_list
| label_block top_list
| namespace top_list
| struct_block top_list
| typedef_decl top_list
| | top_list : callback_decl top_list
| describe_block top_list
| dictionary_block top_list
| enum_block top_list
| inline top_list
| interface_block top_list
| label_block top_list
| namespace top_list
| struct_block top_list
| typedef_decl top_list
| | [
"top_list",
":",
"callback_decl",
"top_list",
"|",
"describe_block",
"top_list",
"|",
"dictionary_block",
"top_list",
"|",
"enum_block",
"top_list",
"|",
"inline",
"top_list",
"|",
"interface_block",
"top_list",
"|",
"label_block",
"top_list",
"|",
"namespace",
"top_l... | def p_top_list(self, p):
"""top_list : callback_decl top_list
| describe_block top_list
| dictionary_block top_list
| enum_block top_list
| inline top_list
| interface_block top_list
| label_block top_list
| namespace top_list
| struct_block top_list
| typedef_decl top_list
| """
if len(p) > 2:
p[0] = ListFromConcat(p[1], p[2])
if self.parse_debug: DumpReduction('top_list', p) | [
"def",
"p_top_list",
"(",
"self",
",",
"p",
")",
":",
"if",
"len",
"(",
"p",
")",
">",
"2",
":",
"p",
"[",
"0",
"]",
"=",
"ListFromConcat",
"(",
"p",
"[",
"1",
"]",
",",
"p",
"[",
"2",
"]",
")",
"if",
"self",
".",
"parse_debug",
":",
"DumpR... | https://github.com/adobe/chromium/blob/cfe5bf0b51b1f6b9fe239c2a3c2f2364da9967d7/ppapi/generators/idl_parser.py#L225-L239 | ||
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/gtk/dataview.py | python | DataViewModel.IsListModel | (*args, **kwargs) | return _dataview.DataViewModel_IsListModel(*args, **kwargs) | IsListModel(self) -> bool | IsListModel(self) -> bool | [
"IsListModel",
"(",
"self",
")",
"-",
">",
"bool"
] | def IsListModel(*args, **kwargs):
"""IsListModel(self) -> bool"""
return _dataview.DataViewModel_IsListModel(*args, **kwargs) | [
"def",
"IsListModel",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_dataview",
".",
"DataViewModel_IsListModel",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/gtk/dataview.py#L677-L679 | |
Kitware/ParaView | f760af9124ff4634b23ebbeab95a4f56e0261955 | Wrapping/Python/paraview/servermanager.py | python | CreateRenderView | (session=None, **extraArgs) | return _create_view("RenderView", session, **extraArgs) | Creates a render window on the particular session. If session
is not specified, then the active session is used, if available.
This method can also be used to initialize properties by passing
keyword arguments where the key is the name of the property. In addition
registrationGroup and registrationName (optional) can be specified (as
keyword arguments) to automatically register the proxy with the proxy
manager. | Creates a render window on the particular session. If session
is not specified, then the active session is used, if available. | [
"Creates",
"a",
"render",
"window",
"on",
"the",
"particular",
"session",
".",
"If",
"session",
"is",
"not",
"specified",
"then",
"the",
"active",
"session",
"is",
"used",
"if",
"available",
"."
] | def CreateRenderView(session=None, **extraArgs):
"""Creates a render window on the particular session. If session
is not specified, then the active session is used, if available.
This method can also be used to initialize properties by passing
keyword arguments where the key is the name of the property. In addition
registrationGroup and registrationName (optional) can be specified (as
keyword arguments) to automatically register the proxy with the proxy
manager."""
return _create_view("RenderView", session, **extraArgs) | [
"def",
"CreateRenderView",
"(",
"session",
"=",
"None",
",",
"*",
"*",
"extraArgs",
")",
":",
"return",
"_create_view",
"(",
"\"RenderView\"",
",",
"session",
",",
"*",
"*",
"extraArgs",
")"
] | https://github.com/Kitware/ParaView/blob/f760af9124ff4634b23ebbeab95a4f56e0261955/Wrapping/Python/paraview/servermanager.py#L2268-L2277 | |
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Gems/CloudGemMetric/v1/AWS/common-code/Lib/numpy/linalg/linalg.py | python | svd | (a, full_matrices=True, compute_uv=True, hermitian=False) | Singular Value Decomposition.
When `a` is a 2D array, it is factorized as ``u @ np.diag(s) @ vh
= (u * s) @ vh``, where `u` and `vh` are 2D unitary arrays and `s` is a 1D
array of `a`'s singular values. When `a` is higher-dimensional, SVD is
applied in stacked mode as explained below.
Parameters
----------
a : (..., M, N) array_like
A real or complex array with ``a.ndim >= 2``.
full_matrices : bool, optional
If True (default), `u` and `vh` have the shapes ``(..., M, M)`` and
``(..., N, N)``, respectively. Otherwise, the shapes are
``(..., M, K)`` and ``(..., K, N)``, respectively, where
``K = min(M, N)``.
compute_uv : bool, optional
Whether or not to compute `u` and `vh` in addition to `s`. True
by default.
hermitian : bool, optional
If True, `a` is assumed to be Hermitian (symmetric if real-valued),
enabling a more efficient method for finding singular values.
Defaults to False.
.. versionadded:: 1.17.0
Returns
-------
u : { (..., M, M), (..., M, K) } array
Unitary array(s). The first ``a.ndim - 2`` dimensions have the same
size as those of the input `a`. The size of the last two dimensions
depends on the value of `full_matrices`. Only returned when
`compute_uv` is True.
s : (..., K) array
Vector(s) with the singular values, within each vector sorted in
descending order. The first ``a.ndim - 2`` dimensions have the same
size as those of the input `a`.
vh : { (..., N, N), (..., K, N) } array
Unitary array(s). The first ``a.ndim - 2`` dimensions have the same
size as those of the input `a`. The size of the last two dimensions
depends on the value of `full_matrices`. Only returned when
`compute_uv` is True.
Raises
------
LinAlgError
If SVD computation does not converge.
Notes
-----
.. versionchanged:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The decomposition is performed using LAPACK routine ``_gesdd``.
SVD is usually described for the factorization of a 2D matrix :math:`A`.
The higher-dimensional case will be discussed below. In the 2D case, SVD is
written as :math:`A = U S V^H`, where :math:`A = a`, :math:`U= u`,
:math:`S= \\mathtt{np.diag}(s)` and :math:`V^H = vh`. The 1D array `s`
contains the singular values of `a` and `u` and `vh` are unitary. The rows
of `vh` are the eigenvectors of :math:`A^H A` and the columns of `u` are
the eigenvectors of :math:`A A^H`. In both cases the corresponding
(possibly non-zero) eigenvalues are given by ``s**2``.
If `a` has more than two dimensions, then broadcasting rules apply, as
explained in :ref:`routines.linalg-broadcasting`. This means that SVD is
working in "stacked" mode: it iterates over all indices of the first
``a.ndim - 2`` dimensions and for each combination SVD is applied to the
last two indices. The matrix `a` can be reconstructed from the
decomposition with either ``(u * s[..., None, :]) @ vh`` or
``u @ (s[..., None] * vh)``. (The ``@`` operator can be replaced by the
function ``np.matmul`` for python versions below 3.5.)
If `a` is a ``matrix`` object (as opposed to an ``ndarray``), then so are
all the return values.
Examples
--------
>>> a = np.random.randn(9, 6) + 1j*np.random.randn(9, 6)
>>> b = np.random.randn(2, 7, 8, 3) + 1j*np.random.randn(2, 7, 8, 3)
Reconstruction based on full SVD, 2D case:
>>> u, s, vh = np.linalg.svd(a, full_matrices=True)
>>> u.shape, s.shape, vh.shape
((9, 9), (6,), (6, 6))
>>> np.allclose(a, np.dot(u[:, :6] * s, vh))
True
>>> smat = np.zeros((9, 6), dtype=complex)
>>> smat[:6, :6] = np.diag(s)
>>> np.allclose(a, np.dot(u, np.dot(smat, vh)))
True
Reconstruction based on reduced SVD, 2D case:
>>> u, s, vh = np.linalg.svd(a, full_matrices=False)
>>> u.shape, s.shape, vh.shape
((9, 6), (6,), (6, 6))
>>> np.allclose(a, np.dot(u * s, vh))
True
>>> smat = np.diag(s)
>>> np.allclose(a, np.dot(u, np.dot(smat, vh)))
True
Reconstruction based on full SVD, 4D case:
>>> u, s, vh = np.linalg.svd(b, full_matrices=True)
>>> u.shape, s.shape, vh.shape
((2, 7, 8, 8), (2, 7, 3), (2, 7, 3, 3))
>>> np.allclose(b, np.matmul(u[..., :3] * s[..., None, :], vh))
True
>>> np.allclose(b, np.matmul(u[..., :3], s[..., None] * vh))
True
Reconstruction based on reduced SVD, 4D case:
>>> u, s, vh = np.linalg.svd(b, full_matrices=False)
>>> u.shape, s.shape, vh.shape
((2, 7, 8, 3), (2, 7, 3), (2, 7, 3, 3))
>>> np.allclose(b, np.matmul(u * s[..., None, :], vh))
True
>>> np.allclose(b, np.matmul(u, s[..., None] * vh))
True | Singular Value Decomposition. | [
"Singular",
"Value",
"Decomposition",
"."
] | def svd(a, full_matrices=True, compute_uv=True, hermitian=False):
"""
Singular Value Decomposition.
When `a` is a 2D array, it is factorized as ``u @ np.diag(s) @ vh
= (u * s) @ vh``, where `u` and `vh` are 2D unitary arrays and `s` is a 1D
array of `a`'s singular values. When `a` is higher-dimensional, SVD is
applied in stacked mode as explained below.
Parameters
----------
a : (..., M, N) array_like
A real or complex array with ``a.ndim >= 2``.
full_matrices : bool, optional
If True (default), `u` and `vh` have the shapes ``(..., M, M)`` and
``(..., N, N)``, respectively. Otherwise, the shapes are
``(..., M, K)`` and ``(..., K, N)``, respectively, where
``K = min(M, N)``.
compute_uv : bool, optional
Whether or not to compute `u` and `vh` in addition to `s`. True
by default.
hermitian : bool, optional
If True, `a` is assumed to be Hermitian (symmetric if real-valued),
enabling a more efficient method for finding singular values.
Defaults to False.
.. versionadded:: 1.17.0
Returns
-------
u : { (..., M, M), (..., M, K) } array
Unitary array(s). The first ``a.ndim - 2`` dimensions have the same
size as those of the input `a`. The size of the last two dimensions
depends on the value of `full_matrices`. Only returned when
`compute_uv` is True.
s : (..., K) array
Vector(s) with the singular values, within each vector sorted in
descending order. The first ``a.ndim - 2`` dimensions have the same
size as those of the input `a`.
vh : { (..., N, N), (..., K, N) } array
Unitary array(s). The first ``a.ndim - 2`` dimensions have the same
size as those of the input `a`. The size of the last two dimensions
depends on the value of `full_matrices`. Only returned when
`compute_uv` is True.
Raises
------
LinAlgError
If SVD computation does not converge.
Notes
-----
.. versionchanged:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The decomposition is performed using LAPACK routine ``_gesdd``.
SVD is usually described for the factorization of a 2D matrix :math:`A`.
The higher-dimensional case will be discussed below. In the 2D case, SVD is
written as :math:`A = U S V^H`, where :math:`A = a`, :math:`U= u`,
:math:`S= \\mathtt{np.diag}(s)` and :math:`V^H = vh`. The 1D array `s`
contains the singular values of `a` and `u` and `vh` are unitary. The rows
of `vh` are the eigenvectors of :math:`A^H A` and the columns of `u` are
the eigenvectors of :math:`A A^H`. In both cases the corresponding
(possibly non-zero) eigenvalues are given by ``s**2``.
If `a` has more than two dimensions, then broadcasting rules apply, as
explained in :ref:`routines.linalg-broadcasting`. This means that SVD is
working in "stacked" mode: it iterates over all indices of the first
``a.ndim - 2`` dimensions and for each combination SVD is applied to the
last two indices. The matrix `a` can be reconstructed from the
decomposition with either ``(u * s[..., None, :]) @ vh`` or
``u @ (s[..., None] * vh)``. (The ``@`` operator can be replaced by the
function ``np.matmul`` for python versions below 3.5.)
If `a` is a ``matrix`` object (as opposed to an ``ndarray``), then so are
all the return values.
Examples
--------
>>> a = np.random.randn(9, 6) + 1j*np.random.randn(9, 6)
>>> b = np.random.randn(2, 7, 8, 3) + 1j*np.random.randn(2, 7, 8, 3)
Reconstruction based on full SVD, 2D case:
>>> u, s, vh = np.linalg.svd(a, full_matrices=True)
>>> u.shape, s.shape, vh.shape
((9, 9), (6,), (6, 6))
>>> np.allclose(a, np.dot(u[:, :6] * s, vh))
True
>>> smat = np.zeros((9, 6), dtype=complex)
>>> smat[:6, :6] = np.diag(s)
>>> np.allclose(a, np.dot(u, np.dot(smat, vh)))
True
Reconstruction based on reduced SVD, 2D case:
>>> u, s, vh = np.linalg.svd(a, full_matrices=False)
>>> u.shape, s.shape, vh.shape
((9, 6), (6,), (6, 6))
>>> np.allclose(a, np.dot(u * s, vh))
True
>>> smat = np.diag(s)
>>> np.allclose(a, np.dot(u, np.dot(smat, vh)))
True
Reconstruction based on full SVD, 4D case:
>>> u, s, vh = np.linalg.svd(b, full_matrices=True)
>>> u.shape, s.shape, vh.shape
((2, 7, 8, 8), (2, 7, 3), (2, 7, 3, 3))
>>> np.allclose(b, np.matmul(u[..., :3] * s[..., None, :], vh))
True
>>> np.allclose(b, np.matmul(u[..., :3], s[..., None] * vh))
True
Reconstruction based on reduced SVD, 4D case:
>>> u, s, vh = np.linalg.svd(b, full_matrices=False)
>>> u.shape, s.shape, vh.shape
((2, 7, 8, 3), (2, 7, 3), (2, 7, 3, 3))
>>> np.allclose(b, np.matmul(u * s[..., None, :], vh))
True
>>> np.allclose(b, np.matmul(u, s[..., None] * vh))
True
"""
a, wrap = _makearray(a)
if hermitian:
# note: lapack returns eigenvalues in reverse order to our contract.
# reversing is cheap by design in numpy, so we do so to be consistent
if compute_uv:
s, u = eigh(a)
s = s[..., ::-1]
u = u[..., ::-1]
# singular values are unsigned, move the sign into v
vt = transpose(u * sign(s)[..., None, :]).conjugate()
s = abs(s)
return wrap(u), s, wrap(vt)
else:
s = eigvalsh(a)
s = s[..., ::-1]
s = abs(s)
return s
_assert_stacked_2d(a)
t, result_t = _commonType(a)
extobj = get_linalg_error_extobj(_raise_linalgerror_svd_nonconvergence)
m, n = a.shape[-2:]
if compute_uv:
if full_matrices:
if m < n:
gufunc = _umath_linalg.svd_m_f
else:
gufunc = _umath_linalg.svd_n_f
else:
if m < n:
gufunc = _umath_linalg.svd_m_s
else:
gufunc = _umath_linalg.svd_n_s
signature = 'D->DdD' if isComplexType(t) else 'd->ddd'
u, s, vh = gufunc(a, signature=signature, extobj=extobj)
u = u.astype(result_t, copy=False)
s = s.astype(_realType(result_t), copy=False)
vh = vh.astype(result_t, copy=False)
return wrap(u), s, wrap(vh)
else:
if m < n:
gufunc = _umath_linalg.svd_m
else:
gufunc = _umath_linalg.svd_n
signature = 'D->d' if isComplexType(t) else 'd->d'
s = gufunc(a, signature=signature, extobj=extobj)
s = s.astype(_realType(result_t), copy=False)
return s | [
"def",
"svd",
"(",
"a",
",",
"full_matrices",
"=",
"True",
",",
"compute_uv",
"=",
"True",
",",
"hermitian",
"=",
"False",
")",
":",
"a",
",",
"wrap",
"=",
"_makearray",
"(",
"a",
")",
"if",
"hermitian",
":",
"# note: lapack returns eigenvalues in reverse or... | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemMetric/v1/AWS/common-code/Lib/numpy/linalg/linalg.py#L1459-L1640 | ||
Tencent/CMONGO | c40380caa14e05509f46993aa8b8da966b09b0b5 | buildscripts/clang_format.py | python | get_clang_format_from_linux_cache | (dest_file) | Get clang-format from mongodb's cache | Get clang-format from mongodb's cache | [
"Get",
"clang",
"-",
"format",
"from",
"mongodb",
"s",
"cache"
] | def get_clang_format_from_linux_cache(dest_file):
"""Get clang-format from mongodb's cache
"""
# Get URL
url = CLANG_FORMAT_HTTP_LINUX_CACHE
dest_dir = tempfile.gettempdir()
temp_tar_file = os.path.join(dest_dir, "temp.tar.xz")
# Download the file
print("Downloading clang-format %s from %s, saving to %s" % (CLANG_FORMAT_VERSION,
url, temp_tar_file))
urllib.urlretrieve(url, temp_tar_file)
extract_clang_format(temp_tar_file)
# Destination Path
shutil.move("llvm/Release/bin/clang-format", dest_file) | [
"def",
"get_clang_format_from_linux_cache",
"(",
"dest_file",
")",
":",
"# Get URL",
"url",
"=",
"CLANG_FORMAT_HTTP_LINUX_CACHE",
"dest_dir",
"=",
"tempfile",
".",
"gettempdir",
"(",
")",
"temp_tar_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"dest_dir",
",",
... | https://github.com/Tencent/CMONGO/blob/c40380caa14e05509f46993aa8b8da966b09b0b5/buildscripts/clang_format.py#L167-L184 | ||
rrwick/Unicycler | 96ffea71e3a78d63ade19d6124946773e65cf129 | unicycler/assembly_graph.py | python | AssemblyGraph.get_mean_path_depth | (self, path) | return new_depth, original_depth | Returns the mean depth for the path. If any segments in the path are bridges, their depth
isn't counted because bridges got their depth from the segments they are bridging, so to
count them would be to count that depth twice. | Returns the mean depth for the path. If any segments in the path are bridges, their depth
isn't counted because bridges got their depth from the segments they are bridging, so to
count them would be to count that depth twice. | [
"Returns",
"the",
"mean",
"depth",
"for",
"the",
"path",
".",
"If",
"any",
"segments",
"in",
"the",
"path",
"are",
"bridges",
"their",
"depth",
"isn",
"t",
"counted",
"because",
"bridges",
"got",
"their",
"depth",
"from",
"the",
"segments",
"they",
"are",
... | def get_mean_path_depth(self, path):
"""
Returns the mean depth for the path. If any segments in the path are bridges, their depth
isn't counted because bridges got their depth from the segments they are bridging, so to
count them would be to count that depth twice.
"""
non_bridge_seg_nums = [abs(x) for x in path if self.segments[abs(x)].bridge is None]
# If possible, we'd like to only use the depth from segments which haven't had their depth
# altered by being used in bridges. But if none are available (i.e. all segments have been
# used in bridges), then we go ahead and use them anyway.
original_depth_seg_nums = [x for x in non_bridge_seg_nums
if self.segments[x].original_depth]
if original_depth_seg_nums:
segs_nums_for_depth = original_depth_seg_nums
original_depth = True
else:
segs_nums_for_depth = non_bridge_seg_nums
original_depth = False
depths = [self.segments[x].depth for x in segs_nums_for_depth]
lengths = [self.segments[x].get_length() - self.overlap for x in segs_nums_for_depth]
if sum(lengths) > 0.0:
new_depth = weighted_average_list(depths, lengths)
else:
new_depth = 1.0
return new_depth, original_depth | [
"def",
"get_mean_path_depth",
"(",
"self",
",",
"path",
")",
":",
"non_bridge_seg_nums",
"=",
"[",
"abs",
"(",
"x",
")",
"for",
"x",
"in",
"path",
"if",
"self",
".",
"segments",
"[",
"abs",
"(",
"x",
")",
"]",
".",
"bridge",
"is",
"None",
"]",
"# I... | https://github.com/rrwick/Unicycler/blob/96ffea71e3a78d63ade19d6124946773e65cf129/unicycler/assembly_graph.py#L574-L600 | |
zyq8709/DexHunter | 9d829a9f6f608ebad26923f29a294ae9c68d0441 | art/tools/cpplint.py | python | CheckCheck | (filename, clean_lines, linenum, error) | Checks the use of CHECK and EXPECT macros.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found. | Checks the use of CHECK and EXPECT macros. | [
"Checks",
"the",
"use",
"of",
"CHECK",
"and",
"EXPECT",
"macros",
"."
] | def CheckCheck(filename, clean_lines, linenum, error):
"""Checks the use of CHECK and EXPECT macros.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
# Decide the set of replacement macros that should be suggested
raw_lines = clean_lines.raw_lines
current_macro = ''
for macro in _CHECK_MACROS:
if raw_lines[linenum].find(macro) >= 0:
current_macro = macro
break
if not current_macro:
# Don't waste time here if line doesn't contain 'CHECK' or 'EXPECT'
return
line = clean_lines.elided[linenum] # get rid of comments and strings
# Encourage replacing plain CHECKs with CHECK_EQ/CHECK_NE/etc.
for operator in ['==', '!=', '>=', '>', '<=', '<']:
if ReplaceableCheck(operator, current_macro, line):
error(filename, linenum, 'readability/check', 2,
'Consider using %s instead of %s(a %s b)' % (
_CHECK_REPLACEMENT[current_macro][operator],
current_macro, operator))
break | [
"def",
"CheckCheck",
"(",
"filename",
",",
"clean_lines",
",",
"linenum",
",",
"error",
")",
":",
"# Decide the set of replacement macros that should be suggested",
"raw_lines",
"=",
"clean_lines",
".",
"raw_lines",
"current_macro",
"=",
"''",
"for",
"macro",
"in",
"_... | https://github.com/zyq8709/DexHunter/blob/9d829a9f6f608ebad26923f29a294ae9c68d0441/art/tools/cpplint.py#L2715-L2745 | ||
mindspore-ai/mindspore | fb8fd3338605bb34fa5cea054e535a8b1d753fab | mindspore/python/mindspore/_extends/parse/parser.py | python | Parser.is_supported_namespace_module | (self, value) | return False | To check if the module is allowed to support. | To check if the module is allowed to support. | [
"To",
"check",
"if",
"the",
"module",
"is",
"allowed",
"to",
"support",
"."
] | def is_supported_namespace_module(self, value):
"""To check if the module is allowed to support."""
# Check `mindspore` namespace.
if not hasattr(value, '__name__'):
logger.debug(f"'{str(value)}' has no '__name__' attribute, we suppose it's supported.")
return True
name = value.__name__
if name == 'mindspore':
logger.debug(f"Found 'mindspore' root namespace.")
return True
if name == 'mindspore.ops':
logger.debug(f"Found 'mindspore.ops' namespace.")
return True
if name == 'mindspore.nn':
logger.debug(f"Found 'mindspore.nn' namespace.")
return True
if name == 'mindspore.numpy':
logger.debug(f"Found 'mindspore.numpy' namespace.")
return True
if name == 'mindspore.context':
logger.debug(f"Found 'mindspore.context' namespace.")
return True
# Check `builtins` namespace.
if hasattr(value, '__module__'): # Not types.ModuleType
mod = value.__module__
if mod == 'builtins':
logger.debug(f"Found '{name}' in 'builtins' namespace.")
return True
# We suppose it's supported if not a Module.
if not isinstance(value, types.ModuleType):
logger.debug(f"Found '{name}', not a module.")
return True
# Check supported Module namespace.
rightmost_name = name.split('.')[-1]
if rightmost_name in self.ms_ops_ns:
logger.debug(f"Found '{name}'({rightmost_name}) in ops namespace: {str(self.ms_ops_ns)}.")
return True
if rightmost_name in self.ms_ops_c_ns:
logger.debug(f"Found '{name}'({rightmost_name}) in C namespace: {str(self.ms_ops_c_ns)}.")
return True
if rightmost_name in self.ms_ops_c_multitype_ns:
logger.debug(
f"Found '{name}'({rightmost_name}) in C.multitype namespace: {str(self.ms_ops_c_multitype_ns)}.")
return True
if rightmost_name in self.ms_ops_p_ns:
logger.debug(f"Found '{name}'({rightmost_name}) in P namespace: {str(self.ms_ops_p_ns)}.")
return True
if rightmost_name in self.ms_common_ns:
logger.debug(f"Found '{name}'({rightmost_name}) in common namespace: {str(self.ms_common_ns)}.")
return True
# Support nn.layer. To check if exclude other module.
if rightmost_name in self.ms_nn_ns:
logger.debug(f"Found '{name}'({rightmost_name}) in nn namespace: {str(self.ms_nn_ns)}.")
return True
if rightmost_name in trope_ns:
logger.debug(f"Found '{name}'({rightmost_name}) in trope namespace: {str(trope_ns)}.")
return True
logger.info(f"Not found '{name}' in mindspore supported namespace.")
return False | [
"def",
"is_supported_namespace_module",
"(",
"self",
",",
"value",
")",
":",
"# Check `mindspore` namespace.",
"if",
"not",
"hasattr",
"(",
"value",
",",
"'__name__'",
")",
":",
"logger",
".",
"debug",
"(",
"f\"'{str(value)}' has no '__name__' attribute, we suppose it's s... | https://github.com/mindspore-ai/mindspore/blob/fb8fd3338605bb34fa5cea054e535a8b1d753fab/mindspore/python/mindspore/_extends/parse/parser.py#L693-L755 | |
hanpfei/chromium-net | 392cc1fa3a8f92f42e4071ab6e674d8e0482f83f | third_party/catapult/third_party/py_vulcanize/third_party/rcssmin/bench/cssmin.py | python | condense_semicolons | (css) | return re.sub(r";;+", ";", css) | Condense multiple adjacent semicolon characters into one. | Condense multiple adjacent semicolon characters into one. | [
"Condense",
"multiple",
"adjacent",
"semicolon",
"characters",
"into",
"one",
"."
] | def condense_semicolons(css):
"""Condense multiple adjacent semicolon characters into one."""
return re.sub(r";;+", ";", css) | [
"def",
"condense_semicolons",
"(",
"css",
")",
":",
"return",
"re",
".",
"sub",
"(",
"r\";;+\"",
",",
"\";\"",
",",
"css",
")"
] | https://github.com/hanpfei/chromium-net/blob/392cc1fa3a8f92f42e4071ab6e674d8e0482f83f/third_party/catapult/third_party/py_vulcanize/third_party/rcssmin/bench/cssmin.py#L186-L189 | |
klzgrad/naiveproxy | ed2c513637c77b18721fe428d7ed395b4d284c83 | src/tools/grit/grit/node/node_io.py | python | EmitNode.GetEmitType | (self) | return self.attrs['emit_type'] | Returns the emit_type for this node. Default is 'append'. | Returns the emit_type for this node. Default is 'append'. | [
"Returns",
"the",
"emit_type",
"for",
"this",
"node",
".",
"Default",
"is",
"append",
"."
] | def GetEmitType(self):
'''Returns the emit_type for this node. Default is 'append'.'''
return self.attrs['emit_type'] | [
"def",
"GetEmitType",
"(",
"self",
")",
":",
"return",
"self",
".",
"attrs",
"[",
"'emit_type'",
"]"
] | https://github.com/klzgrad/naiveproxy/blob/ed2c513637c77b18721fe428d7ed395b4d284c83/src/tools/grit/grit/node/node_io.py#L115-L117 | |
deepmind/reverb | ef3c8f0be1b720a741d2dee335e15e44668c291a | reverb/server_executable/server_from_proto.py | python | selector_from_proto | (
s: schema_pb2.KeyDistributionOptions
) | Convert protobuf to reverb_types.SelectorType. | Convert protobuf to reverb_types.SelectorType. | [
"Convert",
"protobuf",
"to",
"reverb_types",
".",
"SelectorType",
"."
] | def selector_from_proto(
s: schema_pb2.KeyDistributionOptions
) -> reverb_types.SelectorType:
"""Convert protobuf to reverb_types.SelectorType."""
if s.fifo:
return reverb.selectors.Fifo()
elif s.uniform:
return reverb.selectors.Uniform()
elif s.lifo:
return reverb.selectors.Lifo()
elif s.WhichOneof('distribution') == 'heap':
if s.heap.min_heap:
return reverb.selectors.MinHeap()
else:
return reverb.selectors.MaxHeap()
elif s.WhichOneof('distribution') == 'prioritized':
return reverb.selectors.Prioritized(
s.prioritized.priority_exponent)
else:
simple_booleans_options = ('fifo', 'lifo', 'uniform')
if s.WhichOneof('distribution') in simple_booleans_options:
raise ValueError(f'distribution={s.WhichOneof("distribution")}'
' but the associated boolean value is false.')
else:
raise NotImplementedError(
f'distribution={s.WhichOneof("distribution")}') | [
"def",
"selector_from_proto",
"(",
"s",
":",
"schema_pb2",
".",
"KeyDistributionOptions",
")",
"->",
"reverb_types",
".",
"SelectorType",
":",
"if",
"s",
".",
"fifo",
":",
"return",
"reverb",
".",
"selectors",
".",
"Fifo",
"(",
")",
"elif",
"s",
".",
"unif... | https://github.com/deepmind/reverb/blob/ef3c8f0be1b720a741d2dee335e15e44668c291a/reverb/server_executable/server_from_proto.py#L28-L53 | ||
microsoft/EdgeML | ef9f8a77f096acbdeb941014791f8eda1c1bc35b | pytorch/edgeml_pytorch/trainer/bonsaiTrainer.py | python | BonsaiTrainer.getModelSize | (self) | return totalnnZ, totalSize, hasSparse | Function to get aimed model size | Function to get aimed model size | [
"Function",
"to",
"get",
"aimed",
"model",
"size"
] | def getModelSize(self):
'''
Function to get aimed model size
'''
nnzZ, sizeZ, sparseZ = utils.estimateNNZ(self.bonsaiObj.Z, self.sZ)
nnzW, sizeW, sparseW = utils.estimateNNZ(self.bonsaiObj.W, self.sW)
nnzV, sizeV, sparseV = utils.estimateNNZ(self.bonsaiObj.V, self.sV)
nnzT, sizeT, sparseT = utils.estimateNNZ(self.bonsaiObj.T, self.sT)
totalnnZ = (nnzZ + nnzT + nnzV + nnzW)
totalSize = (sizeZ + sizeW + sizeV + sizeT)
hasSparse = (sparseW or sparseV or sparseT or sparseZ)
return totalnnZ, totalSize, hasSparse | [
"def",
"getModelSize",
"(",
"self",
")",
":",
"nnzZ",
",",
"sizeZ",
",",
"sparseZ",
"=",
"utils",
".",
"estimateNNZ",
"(",
"self",
".",
"bonsaiObj",
".",
"Z",
",",
"self",
".",
"sZ",
")",
"nnzW",
",",
"sizeW",
",",
"sparseW",
"=",
"utils",
".",
"es... | https://github.com/microsoft/EdgeML/blob/ef9f8a77f096acbdeb941014791f8eda1c1bc35b/pytorch/edgeml_pytorch/trainer/bonsaiTrainer.py#L223-L235 | |
Smorodov/Multitarget-tracker | bee300e8bfd660c86cbeb6892c65a5b7195c9381 | thirdparty/pybind11/tools/clang/cindex.py | python | Type.get_pointee | (self) | return conf.lib.clang_getPointeeType(self) | For pointer types, returns the type of the pointee. | For pointer types, returns the type of the pointee. | [
"For",
"pointer",
"types",
"returns",
"the",
"type",
"of",
"the",
"pointee",
"."
] | def get_pointee(self):
"""
For pointer types, returns the type of the pointee.
"""
return conf.lib.clang_getPointeeType(self) | [
"def",
"get_pointee",
"(",
"self",
")",
":",
"return",
"conf",
".",
"lib",
".",
"clang_getPointeeType",
"(",
"self",
")"
] | https://github.com/Smorodov/Multitarget-tracker/blob/bee300e8bfd660c86cbeb6892c65a5b7195c9381/thirdparty/pybind11/tools/clang/cindex.py#L2042-L2046 | |
ceph/ceph | 959663007321a369c83218414a29bd9dbc8bda3a | qa/tasks/ceph_manager.py | python | CephManager.get_mon_status | (self, mon) | return json.loads(out) | Extract all the monitor status information from the cluster | Extract all the monitor status information from the cluster | [
"Extract",
"all",
"the",
"monitor",
"status",
"information",
"from",
"the",
"cluster"
] | def get_mon_status(self, mon):
"""
Extract all the monitor status information from the cluster
"""
out = self.raw_cluster_cmd('tell', 'mon.%s' % mon, 'mon_status')
return json.loads(out) | [
"def",
"get_mon_status",
"(",
"self",
",",
"mon",
")",
":",
"out",
"=",
"self",
".",
"raw_cluster_cmd",
"(",
"'tell'",
",",
"'mon.%s'",
"%",
"mon",
",",
"'mon_status'",
")",
"return",
"json",
".",
"loads",
"(",
"out",
")"
] | https://github.com/ceph/ceph/blob/959663007321a369c83218414a29bd9dbc8bda3a/qa/tasks/ceph_manager.py#L3093-L3098 | |
catboost/catboost | 167f64f237114a4d10b2b4ee42adb4569137debe | contrib/python/pandas/py3/pandas/core/dtypes/cast.py | python | sanitize_to_nanoseconds | (values: np.ndarray, copy: bool = False) | return values | Safely convert non-nanosecond datetime64 or timedelta64 values to nanosecond. | Safely convert non-nanosecond datetime64 or timedelta64 values to nanosecond. | [
"Safely",
"convert",
"non",
"-",
"nanosecond",
"datetime64",
"or",
"timedelta64",
"values",
"to",
"nanosecond",
"."
] | def sanitize_to_nanoseconds(values: np.ndarray, copy: bool = False) -> np.ndarray:
"""
Safely convert non-nanosecond datetime64 or timedelta64 values to nanosecond.
"""
dtype = values.dtype
if dtype.kind == "M" and dtype != DT64NS_DTYPE:
values = conversion.ensure_datetime64ns(values)
elif dtype.kind == "m" and dtype != TD64NS_DTYPE:
values = conversion.ensure_timedelta64ns(values)
elif copy:
values = values.copy()
return values | [
"def",
"sanitize_to_nanoseconds",
"(",
"values",
":",
"np",
".",
"ndarray",
",",
"copy",
":",
"bool",
"=",
"False",
")",
"->",
"np",
".",
"ndarray",
":",
"dtype",
"=",
"values",
".",
"dtype",
"if",
"dtype",
".",
"kind",
"==",
"\"M\"",
"and",
"dtype",
... | https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/pandas/py3/pandas/core/dtypes/cast.py#L1727-L1741 | |
catboost/catboost | 167f64f237114a4d10b2b4ee42adb4569137debe | contrib/python/pandas/py3/pandas/io/formats/style_render.py | python | Tooltips._class_styles | (self) | return [
{
"selector": f".{self.class_name}",
"props": maybe_convert_css_to_tuples(self.class_properties),
}
] | Combine the ``_Tooltips`` CSS class name and CSS properties to the format
required to extend the underlying ``Styler`` `table_styles` to allow
tooltips to render in HTML.
Returns
-------
styles : List | Combine the ``_Tooltips`` CSS class name and CSS properties to the format
required to extend the underlying ``Styler`` `table_styles` to allow
tooltips to render in HTML. | [
"Combine",
"the",
"_Tooltips",
"CSS",
"class",
"name",
"and",
"CSS",
"properties",
"to",
"the",
"format",
"required",
"to",
"extend",
"the",
"underlying",
"Styler",
"table_styles",
"to",
"allow",
"tooltips",
"to",
"render",
"in",
"HTML",
"."
] | def _class_styles(self):
"""
Combine the ``_Tooltips`` CSS class name and CSS properties to the format
required to extend the underlying ``Styler`` `table_styles` to allow
tooltips to render in HTML.
Returns
-------
styles : List
"""
return [
{
"selector": f".{self.class_name}",
"props": maybe_convert_css_to_tuples(self.class_properties),
}
] | [
"def",
"_class_styles",
"(",
"self",
")",
":",
"return",
"[",
"{",
"\"selector\"",
":",
"f\".{self.class_name}\"",
",",
"\"props\"",
":",
"maybe_convert_css_to_tuples",
"(",
"self",
".",
"class_properties",
")",
",",
"}",
"]"
] | https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/pandas/py3/pandas/io/formats/style_render.py#L1121-L1136 | |
Polidea/SiriusObfuscator | b0e590d8130e97856afe578869b83a209e2b19be | SymbolExtractorAndRenamer/lldb/scripts/Python/static-binding/lldb.py | python | SBExpressionOptions.GetTopLevel | (self) | return _lldb.SBExpressionOptions_GetTopLevel(self) | GetTopLevel(self) -> bool | GetTopLevel(self) -> bool | [
"GetTopLevel",
"(",
"self",
")",
"-",
">",
"bool"
] | def GetTopLevel(self):
"""GetTopLevel(self) -> bool"""
return _lldb.SBExpressionOptions_GetTopLevel(self) | [
"def",
"GetTopLevel",
"(",
"self",
")",
":",
"return",
"_lldb",
".",
"SBExpressionOptions_GetTopLevel",
"(",
"self",
")"
] | https://github.com/Polidea/SiriusObfuscator/blob/b0e590d8130e97856afe578869b83a209e2b19be/SymbolExtractorAndRenamer/lldb/scripts/Python/static-binding/lldb.py#L4263-L4265 | |
deepmind/open_spiel | 4ca53bea32bb2875c7385d215424048ae92f78c8 | open_spiel/python/pytorch/losses/rl_losses.py | python | compute_advantages | (policy_logits, action_values, use_relu=False) | return torch.sum(policy_advantages, dim=1) | Compute advantages using pi and Q. | Compute advantages using pi and Q. | [
"Compute",
"advantages",
"using",
"pi",
"and",
"Q",
"."
] | def compute_advantages(policy_logits, action_values, use_relu=False):
"""Compute advantages using pi and Q."""
# Compute advantage.
policy = F.softmax(policy_logits, dim=1)
# Avoid computing gradients for action_values.
action_values = action_values.detach()
baseline = compute_baseline(policy, action_values)
advantages = action_values - torch.unsqueeze(baseline, 1)
if use_relu:
advantages = F.relu(advantages)
# Compute advantage weighted by policy.
policy_advantages = -torch.mul(policy, advantages.detach())
return torch.sum(policy_advantages, dim=1) | [
"def",
"compute_advantages",
"(",
"policy_logits",
",",
"action_values",
",",
"use_relu",
"=",
"False",
")",
":",
"# Compute advantage.",
"policy",
"=",
"F",
".",
"softmax",
"(",
"policy_logits",
",",
"dim",
"=",
"1",
")",
"# Avoid computing gradients for action_val... | https://github.com/deepmind/open_spiel/blob/4ca53bea32bb2875c7385d215424048ae92f78c8/open_spiel/python/pytorch/losses/rl_losses.py#L69-L84 | |
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/site-packages/pip/_vendor/pyparsing.py | python | ParserElement.searchString | (self, instring, maxMatches=_MAX_INT) | Another extension to :class:`scanString`, simplifying the access to the tokens found
to match the given parse expression. May be called with optional
``maxMatches`` argument, to clip searching after 'n' matches are found.
Example::
# a capitalized word starts with an uppercase letter, followed by zero or more lowercase letters
cap_word = Word(alphas.upper(), alphas.lower())
print(cap_word.searchString("More than Iron, more than Lead, more than Gold I need Electricity"))
# the sum() builtin can be used to merge results into a single ParseResults object
print(sum(cap_word.searchString("More than Iron, more than Lead, more than Gold I need Electricity")))
prints::
[['More'], ['Iron'], ['Lead'], ['Gold'], ['I'], ['Electricity']]
['More', 'Iron', 'Lead', 'Gold', 'I', 'Electricity'] | Another extension to :class:`scanString`, simplifying the access to the tokens found
to match the given parse expression. May be called with optional
``maxMatches`` argument, to clip searching after 'n' matches are found. | [
"Another",
"extension",
"to",
":",
"class",
":",
"scanString",
"simplifying",
"the",
"access",
"to",
"the",
"tokens",
"found",
"to",
"match",
"the",
"given",
"parse",
"expression",
".",
"May",
"be",
"called",
"with",
"optional",
"maxMatches",
"argument",
"to",... | def searchString(self, instring, maxMatches=_MAX_INT):
"""
Another extension to :class:`scanString`, simplifying the access to the tokens found
to match the given parse expression. May be called with optional
``maxMatches`` argument, to clip searching after 'n' matches are found.
Example::
# a capitalized word starts with an uppercase letter, followed by zero or more lowercase letters
cap_word = Word(alphas.upper(), alphas.lower())
print(cap_word.searchString("More than Iron, more than Lead, more than Gold I need Electricity"))
# the sum() builtin can be used to merge results into a single ParseResults object
print(sum(cap_word.searchString("More than Iron, more than Lead, more than Gold I need Electricity")))
prints::
[['More'], ['Iron'], ['Lead'], ['Gold'], ['I'], ['Electricity']]
['More', 'Iron', 'Lead', 'Gold', 'I', 'Electricity']
"""
try:
return ParseResults([t for t, s, e in self.scanString(instring, maxMatches)])
except ParseBaseException as exc:
if ParserElement.verbose_stacktrace:
raise
else:
# catch and re-raise exception from here, clearing out pyparsing internal stack trace
if getattr(exc, '__traceback__', None) is not None:
exc.__traceback__ = self._trim_traceback(exc.__traceback__)
raise exc | [
"def",
"searchString",
"(",
"self",
",",
"instring",
",",
"maxMatches",
"=",
"_MAX_INT",
")",
":",
"try",
":",
"return",
"ParseResults",
"(",
"[",
"t",
"for",
"t",
",",
"s",
",",
"e",
"in",
"self",
".",
"scanString",
"(",
"instring",
",",
"maxMatches",... | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/site-packages/pip/_vendor/pyparsing.py#L2081-L2111 | ||
deepmind/open_spiel | 4ca53bea32bb2875c7385d215424048ae92f78c8 | open_spiel/python/algorithms/mcts.py | python | SearchNode.to_str | (self, state=None) | return ("{:>6}: player: {}, prior: {:5.3f}, value: {:6.3f}, sims: {:5d}, "
"outcome: {}, {:3d} children").format(
action, self.player, self.prior, self.explore_count and
self.total_reward / self.explore_count, self.explore_count,
("{:4.1f}".format(self.outcome[self.player])
if self.outcome else "none"), len(self.children)) | Returns the string representation of this node.
Args:
state: A `pyspiel.State` object, to be used to convert the action id into
a human readable format. If None, the action integer id is used. | Returns the string representation of this node. | [
"Returns",
"the",
"string",
"representation",
"of",
"this",
"node",
"."
] | def to_str(self, state=None):
"""Returns the string representation of this node.
Args:
state: A `pyspiel.State` object, to be used to convert the action id into
a human readable format. If None, the action integer id is used.
"""
action = (
state.action_to_string(state.current_player(), self.action)
if state and self.action is not None else str(self.action))
return ("{:>6}: player: {}, prior: {:5.3f}, value: {:6.3f}, sims: {:5d}, "
"outcome: {}, {:3d} children").format(
action, self.player, self.prior, self.explore_count and
self.total_reward / self.explore_count, self.explore_count,
("{:4.1f}".format(self.outcome[self.player])
if self.outcome else "none"), len(self.children)) | [
"def",
"to_str",
"(",
"self",
",",
"state",
"=",
"None",
")",
":",
"action",
"=",
"(",
"state",
".",
"action_to_string",
"(",
"state",
".",
"current_player",
"(",
")",
",",
"self",
".",
"action",
")",
"if",
"state",
"and",
"self",
".",
"action",
"is"... | https://github.com/deepmind/open_spiel/blob/4ca53bea32bb2875c7385d215424048ae92f78c8/open_spiel/python/algorithms/mcts.py#L178-L193 | |
hanpfei/chromium-net | 392cc1fa3a8f92f42e4071ab6e674d8e0482f83f | third_party/catapult/third_party/coverage/coverage/plugin.py | python | FileTracer.has_dynamic_source_filename | (self) | return False | Does this FileTracer have dynamic source file names?
FileTracers can provide dynamically determined file names by
implementing :meth:`dynamic_source_filename`. Invoking that function
is expensive. To determine whether to invoke it, coverage.py uses the
result of this function to know if it needs to bother invoking
:meth:`dynamic_source_filename`.
See :meth:`CoveragePlugin.file_tracer` for details about static and
dynamic file names.
Returns True if :meth:`dynamic_source_filename` should be called to get
dynamic source file names. | Does this FileTracer have dynamic source file names? | [
"Does",
"this",
"FileTracer",
"have",
"dynamic",
"source",
"file",
"names?"
] | def has_dynamic_source_filename(self):
"""Does this FileTracer have dynamic source file names?
FileTracers can provide dynamically determined file names by
implementing :meth:`dynamic_source_filename`. Invoking that function
is expensive. To determine whether to invoke it, coverage.py uses the
result of this function to know if it needs to bother invoking
:meth:`dynamic_source_filename`.
See :meth:`CoveragePlugin.file_tracer` for details about static and
dynamic file names.
Returns True if :meth:`dynamic_source_filename` should be called to get
dynamic source file names.
"""
return False | [
"def",
"has_dynamic_source_filename",
"(",
"self",
")",
":",
"return",
"False"
] | https://github.com/hanpfei/chromium-net/blob/392cc1fa3a8f92f42e4071ab6e674d8e0482f83f/third_party/catapult/third_party/coverage/coverage/plugin.py#L133-L149 | |
hanpfei/chromium-net | 392cc1fa3a8f92f42e4071ab6e674d8e0482f83f | third_party/catapult/third_party/gsutil/gslib/tab_complete.py | python | CloudObjectCompleter.__init__ | (self, gsutil_api, bucket_only=False) | Instantiates completer for Cloud URLs.
Args:
gsutil_api: gsutil Cloud API instance to use.
bucket_only: Whether the completer should only match buckets. | Instantiates completer for Cloud URLs. | [
"Instantiates",
"completer",
"for",
"Cloud",
"URLs",
"."
] | def __init__(self, gsutil_api, bucket_only=False):
"""Instantiates completer for Cloud URLs.
Args:
gsutil_api: gsutil Cloud API instance to use.
bucket_only: Whether the completer should only match buckets.
"""
self._gsutil_api = gsutil_api
self._bucket_only = bucket_only | [
"def",
"__init__",
"(",
"self",
",",
"gsutil_api",
",",
"bucket_only",
"=",
"False",
")",
":",
"self",
".",
"_gsutil_api",
"=",
"gsutil_api",
"self",
".",
"_bucket_only",
"=",
"bucket_only"
] | https://github.com/hanpfei/chromium-net/blob/392cc1fa3a8f92f42e4071ab6e674d8e0482f83f/third_party/catapult/third_party/gsutil/gslib/tab_complete.py#L193-L201 | ||
google/llvm-propeller | 45c226984fe8377ebfb2ad7713c680d652ba678d | lldb/examples/python/crashlog.py | python | CrashLog.__init__ | (self, path, verbose) | CrashLog constructor that take a path to a darwin crash log file | CrashLog constructor that take a path to a darwin crash log file | [
"CrashLog",
"constructor",
"that",
"take",
"a",
"path",
"to",
"a",
"darwin",
"crash",
"log",
"file"
] | def __init__(self, path, verbose):
"""CrashLog constructor that take a path to a darwin crash log file"""
symbolication.Symbolicator.__init__(self)
self.path = os.path.expanduser(path)
self.info_lines = list()
self.system_profile = list()
self.threads = list()
self.backtraces = list() # For application specific backtraces
self.idents = list() # A list of the required identifiers for doing all stack backtraces
self.crashed_thread_idx = -1
self.version = -1
self.error = None
self.target = None
self.verbose = verbose
# With possible initial component of ~ or ~user replaced by that user's
# home directory.
try:
f = open(self.path)
except IOError:
self.error = 'error: cannot open "%s"' % self.path
return
self.file_lines = f.read().splitlines()
parse_mode = PARSE_MODE_NORMAL
thread = None
app_specific_backtrace = False
for line in self.file_lines:
# print line
line_len = len(line)
if line_len == 0:
if thread:
if parse_mode == PARSE_MODE_THREAD:
if thread.index == self.crashed_thread_idx:
thread.reason = ''
if self.thread_exception:
thread.reason += self.thread_exception
if self.thread_exception_data:
thread.reason += " (%s)" % self.thread_exception_data
if app_specific_backtrace:
self.backtraces.append(thread)
else:
self.threads.append(thread)
thread = None
else:
# only append an extra empty line if the previous line
# in the info_lines wasn't empty
if len(self.info_lines) > 0 and len(self.info_lines[-1]):
self.info_lines.append(line)
parse_mode = PARSE_MODE_NORMAL
# print 'PARSE_MODE_NORMAL'
elif parse_mode == PARSE_MODE_NORMAL:
if line.startswith('Process:'):
(self.process_name, pid_with_brackets) = line[
8:].strip().split(' [')
self.process_id = pid_with_brackets.strip('[]')
elif line.startswith('Path:'):
self.process_path = line[5:].strip()
elif line.startswith('Identifier:'):
self.process_identifier = line[11:].strip()
elif line.startswith('Version:'):
version_string = line[8:].strip()
matched_pair = re.search("(.+)\((.+)\)", version_string)
if matched_pair:
self.process_version = matched_pair.group(1)
self.process_compatability_version = matched_pair.group(
2)
else:
self.process = version_string
self.process_compatability_version = version_string
elif self.parent_process_regex.search(line):
parent_process_match = self.parent_process_regex.search(
line)
self.parent_process_name = parent_process_match.group(1)
self.parent_process_id = parent_process_match.group(2)
elif line.startswith('Exception Type:'):
self.thread_exception = line[15:].strip()
continue
elif line.startswith('Exception Codes:'):
self.thread_exception_data = line[16:].strip()
continue
elif line.startswith('Exception Subtype:'): # iOS
self.thread_exception_data = line[18:].strip()
continue
elif line.startswith('Crashed Thread:'):
self.crashed_thread_idx = int(line[15:].strip().split()[0])
continue
elif line.startswith('Triggered by Thread:'): # iOS
self.crashed_thread_idx = int(line[20:].strip().split()[0])
continue
elif line.startswith('Report Version:'):
self.version = int(line[15:].strip())
continue
elif line.startswith('System Profile:'):
parse_mode = PARSE_MODE_SYSTEM
continue
elif (line.startswith('Interval Since Last Report:') or
line.startswith('Crashes Since Last Report:') or
line.startswith('Per-App Interval Since Last Report:') or
line.startswith('Per-App Crashes Since Last Report:') or
line.startswith('Sleep/Wake UUID:') or
line.startswith('Anonymous UUID:')):
# ignore these
continue
elif line.startswith('Thread'):
thread_state_match = self.thread_state_regex.search(line)
if thread_state_match:
app_specific_backtrace = False
thread_state_match = self.thread_regex.search(line)
thread_idx = int(thread_state_match.group(1))
parse_mode = PARSE_MODE_THREGS
thread = self.threads[thread_idx]
else:
thread_match = self.thread_regex.search(line)
if thread_match:
app_specific_backtrace = False
parse_mode = PARSE_MODE_THREAD
thread_idx = int(thread_match.group(1))
thread = CrashLog.Thread(thread_idx, False)
continue
elif line.startswith('Binary Images:'):
parse_mode = PARSE_MODE_IMAGES
continue
elif line.startswith('Application Specific Backtrace'):
app_backtrace_match = self.app_backtrace_regex.search(line)
if app_backtrace_match:
parse_mode = PARSE_MODE_THREAD
app_specific_backtrace = True
idx = int(app_backtrace_match.group(1))
thread = CrashLog.Thread(idx, True)
elif line.startswith('Last Exception Backtrace:'): # iOS
parse_mode = PARSE_MODE_THREAD
app_specific_backtrace = True
idx = 1
thread = CrashLog.Thread(idx, True)
self.info_lines.append(line.strip())
elif parse_mode == PARSE_MODE_THREAD:
if line.startswith('Thread'):
continue
if self.null_frame_regex.search(line):
print('warning: thread parser ignored null-frame: "%s"' % line)
continue
frame_match = self.frame_regex.search(line)
if frame_match:
(frame_id, frame_img_name, _, frame_img_version, _,
frame_addr, frame_ofs) = frame_match.groups()
ident = frame_img_name
thread.add_ident(ident)
if ident not in self.idents:
self.idents.append(ident)
thread.frames.append(CrashLog.Frame(int(frame_id), int(
frame_addr, 0), frame_ofs))
else:
print('error: frame regex failed for line: "%s"' % line)
elif parse_mode == PARSE_MODE_IMAGES:
image_match = self.image_regex_uuid.search(line)
if image_match:
(img_lo, img_hi, img_name, _, img_version, _,
_, img_uuid, img_path) = image_match.groups()
image = CrashLog.DarwinImage(int(img_lo, 0), int(img_hi, 0),
img_name.strip(),
img_version.strip()
if img_version else "",
uuid.UUID(img_uuid), img_path,
self.verbose)
self.images.append(image)
else:
print("error: image regex failed for: %s" % line)
elif parse_mode == PARSE_MODE_THREGS:
stripped_line = line.strip()
# "r12: 0x00007fff6b5939c8 r13: 0x0000000007000006 r14: 0x0000000000002a03 r15: 0x0000000000000c00"
reg_values = re.findall(
'([a-zA-Z0-9]+: 0[Xx][0-9a-fA-F]+) *', stripped_line)
for reg_value in reg_values:
# print 'reg_value = "%s"' % reg_value
(reg, value) = reg_value.split(': ')
# print 'reg = "%s"' % reg
# print 'value = "%s"' % value
thread.registers[reg.strip()] = int(value, 0)
elif parse_mode == PARSE_MODE_SYSTEM:
self.system_profile.append(line)
f.close() | [
"def",
"__init__",
"(",
"self",
",",
"path",
",",
"verbose",
")",
":",
"symbolication",
".",
"Symbolicator",
".",
"__init__",
"(",
"self",
")",
"self",
".",
"path",
"=",
"os",
".",
"path",
".",
"expanduser",
"(",
"path",
")",
"self",
".",
"info_lines",... | https://github.com/google/llvm-propeller/blob/45c226984fe8377ebfb2ad7713c680d652ba678d/lldb/examples/python/crashlog.py#L361-L542 | ||
snap-stanford/snap-python | d53c51b0a26aa7e3e7400b014cdf728948fde80a | setup/snap.py | python | TStr_GetNullStr | () | return _snap.TStr_GetNullStr() | TStr_GetNullStr() -> TStr | TStr_GetNullStr() -> TStr | [
"TStr_GetNullStr",
"()",
"-",
">",
"TStr"
] | def TStr_GetNullStr():
"""TStr_GetNullStr() -> TStr"""
return _snap.TStr_GetNullStr() | [
"def",
"TStr_GetNullStr",
"(",
")",
":",
"return",
"_snap",
".",
"TStr_GetNullStr",
"(",
")"
] | https://github.com/snap-stanford/snap-python/blob/d53c51b0a26aa7e3e7400b014cdf728948fde80a/setup/snap.py#L11361-L11363 | |
fatih/subvim | 241b6d170597857105da219c9b7d36059e9f11fb | vim/base/YouCompleteMe/third_party/jedi/jedi/docstrings.py | python | _search_param_in_docstr | (docstr, param_str) | return None | Search `docstr` for a type of `param_str`.
>>> _search_param_in_docstr(':type param: int', 'param')
'int'
>>> _search_param_in_docstr('@type param: int', 'param')
'int'
>>> _search_param_in_docstr(
... ':type param: :class:`threading.Thread`', 'param')
'threading.Thread'
>>> _search_param_in_docstr('no document', 'param') is None
True | Search `docstr` for a type of `param_str`. | [
"Search",
"docstr",
"for",
"a",
"type",
"of",
"param_str",
"."
] | def _search_param_in_docstr(docstr, param_str):
"""
Search `docstr` for a type of `param_str`.
>>> _search_param_in_docstr(':type param: int', 'param')
'int'
>>> _search_param_in_docstr('@type param: int', 'param')
'int'
>>> _search_param_in_docstr(
... ':type param: :class:`threading.Thread`', 'param')
'threading.Thread'
>>> _search_param_in_docstr('no document', 'param') is None
True
"""
# look at #40 to see definitions of those params
patterns = [re.compile(p % re.escape(param_str))
for p in DOCSTRING_PARAM_PATTERNS]
for pattern in patterns:
match = pattern.search(docstr)
if match:
return _strip_rest_role(match.group(1))
return None | [
"def",
"_search_param_in_docstr",
"(",
"docstr",
",",
"param_str",
")",
":",
"# look at #40 to see definitions of those params",
"patterns",
"=",
"[",
"re",
".",
"compile",
"(",
"p",
"%",
"re",
".",
"escape",
"(",
"param_str",
")",
")",
"for",
"p",
"in",
"DOCS... | https://github.com/fatih/subvim/blob/241b6d170597857105da219c9b7d36059e9f11fb/vim/base/YouCompleteMe/third_party/jedi/jedi/docstrings.py#L62-L85 | |
google/syzygy | 8164b24ebde9c5649c9a09e88a7fc0b0fcbd1bc5 | third_party/numpy/files/numpy/lib/format.py | python | write_array | (fp, array, version=(1,0)) | Write an array to an NPY file, including a header.
If the array is neither C-contiguous nor Fortran-contiguous AND the
file_like object is not a real file object, this function will have to
copy data in memory.
Parameters
----------
fp : file_like object
An open, writable file object, or similar object with a ``.write()``
method.
array : ndarray
The array to write to disk.
version : (int, int), optional
The version number of the format. Default: (1, 0)
Raises
------
ValueError
If the array cannot be persisted.
Various other errors
If the array contains Python objects as part of its dtype, the
process of pickling them may raise various errors if the objects
are not picklable. | Write an array to an NPY file, including a header. | [
"Write",
"an",
"array",
"to",
"an",
"NPY",
"file",
"including",
"a",
"header",
"."
] | def write_array(fp, array, version=(1,0)):
"""
Write an array to an NPY file, including a header.
If the array is neither C-contiguous nor Fortran-contiguous AND the
file_like object is not a real file object, this function will have to
copy data in memory.
Parameters
----------
fp : file_like object
An open, writable file object, or similar object with a ``.write()``
method.
array : ndarray
The array to write to disk.
version : (int, int), optional
The version number of the format. Default: (1, 0)
Raises
------
ValueError
If the array cannot be persisted.
Various other errors
If the array contains Python objects as part of its dtype, the
process of pickling them may raise various errors if the objects
are not picklable.
"""
if version != (1, 0):
msg = "we only support format version (1,0), not %s"
raise ValueError(msg % (version,))
fp.write(magic(*version))
write_array_header_1_0(fp, header_data_from_array_1_0(array))
if array.dtype.hasobject:
# We contain Python objects so we cannot write out the data directly.
# Instead, we will pickle it out with version 2 of the pickle protocol.
cPickle.dump(array, fp, protocol=2)
elif array.flags.f_contiguous and not array.flags.c_contiguous:
if isfileobj(fp):
array.T.tofile(fp)
else:
fp.write(array.T.tostring('C'))
else:
if isfileobj(fp):
array.tofile(fp)
else:
# XXX: We could probably chunk this using something like
# arrayterator.
fp.write(array.tostring('C')) | [
"def",
"write_array",
"(",
"fp",
",",
"array",
",",
"version",
"=",
"(",
"1",
",",
"0",
")",
")",
":",
"if",
"version",
"!=",
"(",
"1",
",",
"0",
")",
":",
"msg",
"=",
"\"we only support format version (1,0), not %s\"",
"raise",
"ValueError",
"(",
"msg",... | https://github.com/google/syzygy/blob/8164b24ebde9c5649c9a09e88a7fc0b0fcbd1bc5/third_party/numpy/files/numpy/lib/format.py#L365-L413 | ||
idaholab/moose | 9eeebc65e098b4c30f8205fb41591fd5b61eb6ff | python/chigger/observers/TimerObserver.py | python | TimerObserver.addObserver | (self, event, vtkinteractor) | return vtkinteractor.AddObserver(event, self._callback) | Add the TimerEvent for this object. | Add the TimerEvent for this object. | [
"Add",
"the",
"TimerEvent",
"for",
"this",
"object",
"."
] | def addObserver(self, event, vtkinteractor):
"""
Add the TimerEvent for this object.
"""
vtkinteractor.CreateRepeatingTimer(self.getOption('duration'))
return vtkinteractor.AddObserver(event, self._callback) | [
"def",
"addObserver",
"(",
"self",
",",
"event",
",",
"vtkinteractor",
")",
":",
"vtkinteractor",
".",
"CreateRepeatingTimer",
"(",
"self",
".",
"getOption",
"(",
"'duration'",
")",
")",
"return",
"vtkinteractor",
".",
"AddObserver",
"(",
"event",
",",
"self",... | https://github.com/idaholab/moose/blob/9eeebc65e098b4c30f8205fb41591fd5b61eb6ff/python/chigger/observers/TimerObserver.py#L29-L34 | |
adobe/chromium | cfe5bf0b51b1f6b9fe239c2a3c2f2364da9967d7 | tools/symsrc/pefile.py | python | PE.parse_export_directory | (self, rva, size) | return ExportDirData(
struct = export_dir,
symbols = exports) | Parse the export directory.
Given the rva of the export directory, it will process all
its entries.
The exports will be made available through a list "exports"
containing a tuple with the following elements:
(ordinal, symbol_address, symbol_name)
And also through a dicionary "exports_by_ordinal" whose keys
will be the ordinals and the values tuples of the from:
(symbol_address, symbol_name)
The symbol addresses are relative, not absolute. | Parse the export directory.
Given the rva of the export directory, it will process all
its entries.
The exports will be made available through a list "exports"
containing a tuple with the following elements:
(ordinal, symbol_address, symbol_name)
And also through a dicionary "exports_by_ordinal" whose keys
will be the ordinals and the values tuples of the from:
(symbol_address, symbol_name)
The symbol addresses are relative, not absolute. | [
"Parse",
"the",
"export",
"directory",
".",
"Given",
"the",
"rva",
"of",
"the",
"export",
"directory",
"it",
"will",
"process",
"all",
"its",
"entries",
".",
"The",
"exports",
"will",
"be",
"made",
"available",
"through",
"a",
"list",
"exports",
"containing"... | def parse_export_directory(self, rva, size):
"""Parse the export directory.
Given the rva of the export directory, it will process all
its entries.
The exports will be made available through a list "exports"
containing a tuple with the following elements:
(ordinal, symbol_address, symbol_name)
And also through a dicionary "exports_by_ordinal" whose keys
will be the ordinals and the values tuples of the from:
(symbol_address, symbol_name)
The symbol addresses are relative, not absolute.
"""
try:
export_dir = self.__unpack_data__(
self.__IMAGE_EXPORT_DIRECTORY_format__, self.get_data(rva),
file_offset = self.get_offset_from_rva(rva) )
except PEFormatError:
self.__warnings.append(
'Error parsing export directory at RVA: 0x%x' % ( rva ) )
return
if not export_dir:
return
try:
address_of_names = self.get_data(
export_dir.AddressOfNames, export_dir.NumberOfNames*4)
address_of_name_ordinals = self.get_data(
export_dir.AddressOfNameOrdinals, export_dir.NumberOfNames*4)
address_of_functions = self.get_data(
export_dir.AddressOfFunctions, export_dir.NumberOfFunctions*4)
except PEFormatError:
self.__warnings.append(
'Error parsing export directory at RVA: 0x%x' % ( rva ) )
return
exports = []
for i in xrange(export_dir.NumberOfNames):
symbol_name = self.get_string_at_rva(
self.get_dword_from_data(address_of_names, i))
symbol_ordinal = self.get_word_from_data(
address_of_name_ordinals, i)
if symbol_ordinal*4<len(address_of_functions):
symbol_address = self.get_dword_from_data(
address_of_functions, symbol_ordinal)
else:
# Corrupt? a bad pointer... we assume it's all
# useless, no exports
return None
# If the funcion's rva points within the export directory
# it will point to a string with the forwarded symbol's string
# instead of pointing the the function start address.
if symbol_address>=rva and symbol_address<rva+size:
forwarder_str = self.get_string_at_rva(symbol_address)
else:
forwarder_str = None
exports.append(
ExportData(
ordinal = export_dir.Base+symbol_ordinal,
address = symbol_address,
name = symbol_name,
forwarder = forwarder_str))
ordinals = [exp.ordinal for exp in exports]
for idx in xrange(export_dir.NumberOfFunctions):
if not idx+export_dir.Base in ordinals:
symbol_address = self.get_dword_from_data(
address_of_functions,
idx)
#
# Checking for forwarder again.
#
if symbol_address>=rva and symbol_address<rva+size:
forwarder_str = self.get_string_at_rva(symbol_address)
else:
forwarder_str = None
exports.append(
ExportData(
ordinal = export_dir.Base+idx,
address = symbol_address,
name = None,
forwarder = forwarder_str))
return ExportDirData(
struct = export_dir,
symbols = exports) | [
"def",
"parse_export_directory",
"(",
"self",
",",
"rva",
",",
"size",
")",
":",
"try",
":",
"export_dir",
"=",
"self",
".",
"__unpack_data__",
"(",
"self",
".",
"__IMAGE_EXPORT_DIRECTORY_format__",
",",
"self",
".",
"get_data",
"(",
"rva",
")",
",",
"file_o... | https://github.com/adobe/chromium/blob/cfe5bf0b51b1f6b9fe239c2a3c2f2364da9967d7/tools/symsrc/pefile.py#L2576-L2682 | |
wlanjie/AndroidFFmpeg | 7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf | tools/fdk-aac-build/armeabi/toolchain/lib/python2.7/urllib2.py | python | parse_keqv_list | (l) | return parsed | Parse list of key=value strings where keys are not duplicated. | Parse list of key=value strings where keys are not duplicated. | [
"Parse",
"list",
"of",
"key",
"=",
"value",
"strings",
"where",
"keys",
"are",
"not",
"duplicated",
"."
] | def parse_keqv_list(l):
"""Parse list of key=value strings where keys are not duplicated."""
parsed = {}
for elt in l:
k, v = elt.split('=', 1)
if v[0] == '"' and v[-1] == '"':
v = v[1:-1]
parsed[k] = v
return parsed | [
"def",
"parse_keqv_list",
"(",
"l",
")",
":",
"parsed",
"=",
"{",
"}",
"for",
"elt",
"in",
"l",
":",
"k",
",",
"v",
"=",
"elt",
".",
"split",
"(",
"'='",
",",
"1",
")",
"if",
"v",
"[",
"0",
"]",
"==",
"'\"'",
"and",
"v",
"[",
"-",
"1",
"]... | https://github.com/wlanjie/AndroidFFmpeg/blob/7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf/tools/fdk-aac-build/armeabi/toolchain/lib/python2.7/urllib2.py#L1249-L1257 | |
zyq8709/DexHunter | 9d829a9f6f608ebad26923f29a294ae9c68d0441 | art/tools/cpplint.py | python | CheckForUnicodeReplacementCharacters | (filename, lines, error) | Logs an error for each line containing Unicode replacement characters.
These indicate that either the file contained invalid UTF-8 (likely)
or Unicode replacement characters (which it shouldn't). Note that
it's possible for this to throw off line numbering if the invalid
UTF-8 occurred adjacent to a newline.
Args:
filename: The name of the current file.
lines: An array of strings, each representing a line of the file.
error: The function to call with any errors found. | Logs an error for each line containing Unicode replacement characters. | [
"Logs",
"an",
"error",
"for",
"each",
"line",
"containing",
"Unicode",
"replacement",
"characters",
"."
] | def CheckForUnicodeReplacementCharacters(filename, lines, error):
"""Logs an error for each line containing Unicode replacement characters.
These indicate that either the file contained invalid UTF-8 (likely)
or Unicode replacement characters (which it shouldn't). Note that
it's possible for this to throw off line numbering if the invalid
UTF-8 occurred adjacent to a newline.
Args:
filename: The name of the current file.
lines: An array of strings, each representing a line of the file.
error: The function to call with any errors found.
"""
for linenum, line in enumerate(lines):
if u'\ufffd' in line:
error(filename, linenum, 'readability/utf8', 5,
'Line contains invalid UTF-8 (or Unicode replacement character).') | [
"def",
"CheckForUnicodeReplacementCharacters",
"(",
"filename",
",",
"lines",
",",
"error",
")",
":",
"for",
"linenum",
",",
"line",
"in",
"enumerate",
"(",
"lines",
")",
":",
"if",
"u'\\ufffd'",
"in",
"line",
":",
"error",
"(",
"filename",
",",
"linenum",
... | https://github.com/zyq8709/DexHunter/blob/9d829a9f6f608ebad26923f29a294ae9c68d0441/art/tools/cpplint.py#L1217-L1233 | ||
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Tools/Python/3.7.10/windows/Lib/_pydecimal.py | python | Decimal._round_ceiling | (self, prec) | Rounds up (not away from 0 if negative.) | Rounds up (not away from 0 if negative.) | [
"Rounds",
"up",
"(",
"not",
"away",
"from",
"0",
"if",
"negative",
".",
")"
] | def _round_ceiling(self, prec):
"""Rounds up (not away from 0 if negative.)"""
if self._sign:
return self._round_down(prec)
else:
return -self._round_down(prec) | [
"def",
"_round_ceiling",
"(",
"self",
",",
"prec",
")",
":",
"if",
"self",
".",
"_sign",
":",
"return",
"self",
".",
"_round_down",
"(",
"prec",
")",
"else",
":",
"return",
"-",
"self",
".",
"_round_down",
"(",
"prec",
")"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/windows/Lib/_pydecimal.py#L1798-L1803 | ||
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Gems/CloudGemDefectReporter/v1/AWS/common-code/Lib/pkg_resources/__init__.py | python | to_filename | (name) | return name.replace('-', '_') | Convert a project or version name to its filename-escaped form
Any '-' characters are currently replaced with '_'. | Convert a project or version name to its filename-escaped form | [
"Convert",
"a",
"project",
"or",
"version",
"name",
"to",
"its",
"filename",
"-",
"escaped",
"form"
] | def to_filename(name):
"""Convert a project or version name to its filename-escaped form
Any '-' characters are currently replaced with '_'.
"""
return name.replace('-', '_') | [
"def",
"to_filename",
"(",
"name",
")",
":",
"return",
"name",
".",
"replace",
"(",
"'-'",
",",
"'_'",
")"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemDefectReporter/v1/AWS/common-code/Lib/pkg_resources/__init__.py#L1427-L1432 | |
gem5/gem5 | 141cc37c2d4b93959d4c249b8f7e6a8b2ef75338 | src/mem/slicc/parser.py | python | SLICC.p_param__pointer | (self, p) | param : type STAR ident | param : type STAR ident | [
"param",
":",
"type",
"STAR",
"ident"
] | def p_param__pointer(self, p):
"param : type STAR ident"
p[0] = ast.FormalParamAST(self, p[1], p[3], None, "PTR") | [
"def",
"p_param__pointer",
"(",
"self",
",",
"p",
")",
":",
"p",
"[",
"0",
"]",
"=",
"ast",
".",
"FormalParamAST",
"(",
"self",
",",
"p",
"[",
"1",
"]",
",",
"p",
"[",
"3",
"]",
",",
"None",
",",
"\"PTR\"",
")"
] | https://github.com/gem5/gem5/blob/141cc37c2d4b93959d4c249b8f7e6a8b2ef75338/src/mem/slicc/parser.py#L450-L452 | ||
wlanjie/AndroidFFmpeg | 7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf | tools/fdk-aac-build/armeabi/toolchain/lib/python2.7/lib-tk/Tkinter.py | python | Text.window_configure | (self, index, cnf=None, **kw) | return self._configure(('window', 'configure', index), cnf, kw) | Configure an embedded window at INDEX. | Configure an embedded window at INDEX. | [
"Configure",
"an",
"embedded",
"window",
"at",
"INDEX",
"."
] | def window_configure(self, index, cnf=None, **kw):
"""Configure an embedded window at INDEX."""
return self._configure(('window', 'configure', index), cnf, kw) | [
"def",
"window_configure",
"(",
"self",
",",
"index",
",",
"cnf",
"=",
"None",
",",
"*",
"*",
"kw",
")",
":",
"return",
"self",
".",
"_configure",
"(",
"(",
"'window'",
",",
"'configure'",
",",
"index",
")",
",",
"cnf",
",",
"kw",
")"
] | https://github.com/wlanjie/AndroidFFmpeg/blob/7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf/tools/fdk-aac-build/armeabi/toolchain/lib/python2.7/lib-tk/Tkinter.py#L3173-L3175 | |
netket/netket | 0d534e54ecbf25b677ea72af6b85947979420652 | netket/operator/_hamiltonian.py | python | BoseHubbard.J | (self) | return self._J | The hopping amplitude. | The hopping amplitude. | [
"The",
"hopping",
"amplitude",
"."
] | def J(self):
"""The hopping amplitude."""
return self._J | [
"def",
"J",
"(",
"self",
")",
":",
"return",
"self",
".",
"_J"
] | https://github.com/netket/netket/blob/0d534e54ecbf25b677ea72af6b85947979420652/netket/operator/_hamiltonian.py#L548-L550 | |
Slicer/SlicerGitSVNArchive | 65e92bb16c2b32ea47a1a66bee71f238891ee1ca | Base/Python/tpycl/tpycl.py | python | tpycl.py_eval | (self,cmd) | return( repr(evalResult) ) | evaluated the python command string and returns the result
- if the result is a vtk object instance, it is registered in the tcl interp
- if the result is a tuple, it is converted to a tcl-style list | evaluated the python command string and returns the result
- if the result is a vtk object instance, it is registered in the tcl interp
- if the result is a tuple, it is converted to a tcl-style list | [
"evaluated",
"the",
"python",
"command",
"string",
"and",
"returns",
"the",
"result",
"-",
"if",
"the",
"result",
"is",
"a",
"vtk",
"object",
"instance",
"it",
"is",
"registered",
"in",
"the",
"tcl",
"interp",
"-",
"if",
"the",
"result",
"is",
"a",
"tupl... | def py_eval(self,cmd):
""" evaluated the python command string and returns the result
- if the result is a vtk object instance, it is registered in the tcl interp
- if the result is a tuple, it is converted to a tcl-style list
"""
cmd = "__tpycl_result = " + cmd
try:
exec( cmd, globals() )
except:
print( "Error executing %s" % cmd )
print( sys.exc_info() )
raise
evalResult = globals()["__tpycl_result"]
try:
if evalResult.IsA("vtkObject"):
instanceName = self.py_vtkInstanceName(evalResult)
if self.tcl_eval("info command %s" % instanceName) == "":
exec ("%s = globals()['__tpycl_result']" % instanceName, globals())
self.tcl_eval( "proc ::%s {args} {tpycl::methodCaller %s %s $args}" % (instanceName, instanceName, instanceName) )
return( instanceName )
except AttributeError:
pass
try:
if evalResult.__class__.__name__ == 'tuple':
returnValue = evalResult[0]
for element in evalResult[1:]:
returnValue = "%s %s" % (returnValue, element)
return( returnValue )
except AttributeError:
pass
return( repr(evalResult) ) | [
"def",
"py_eval",
"(",
"self",
",",
"cmd",
")",
":",
"cmd",
"=",
"\"__tpycl_result = \"",
"+",
"cmd",
"try",
":",
"exec",
"(",
"cmd",
",",
"globals",
"(",
")",
")",
"except",
":",
"print",
"(",
"\"Error executing %s\"",
"%",
"cmd",
")",
"print",
"(",
... | https://github.com/Slicer/SlicerGitSVNArchive/blob/65e92bb16c2b32ea47a1a66bee71f238891ee1ca/Base/Python/tpycl/tpycl.py#L139-L169 | |
hughperkins/tf-coriander | 970d3df6c11400ad68405f22b0c42a52374e94ca | tensorflow/contrib/factorization/examples/mnist.py | python | run_training | () | Train MNIST for a number of steps. | Train MNIST for a number of steps. | [
"Train",
"MNIST",
"for",
"a",
"number",
"of",
"steps",
"."
] | def run_training():
"""Train MNIST for a number of steps."""
# Get the sets of images and labels for training, validation, and
# test on MNIST.
train_dir = tempfile.mkdtemp()
data_sets = input_data.read_data_sets(train_dir, FLAGS.fake_data)
# Tell TensorFlow that the model will be built into the default Graph.
with tf.Graph().as_default():
# Generate placeholders for the images and labels.
images_placeholder, labels_placeholder = placeholder_inputs()
# Build a Graph that computes predictions from the inference model.
logits, clustering_loss, kmeans_training_op = inference(images_placeholder,
FLAGS.num_clusters,
FLAGS.hidden1,
FLAGS.hidden2)
# Add to the Graph the Ops for loss calculation.
loss = mnist.loss(logits, labels_placeholder)
# Add to the Graph the Ops that calculate and apply gradients.
train_op = tf.group(mnist.training(loss, FLAGS.learning_rate),
kmeans_training_op)
# Add the Op to compare the logits to the labels during evaluation.
eval_correct = mnist.evaluation(logits, labels_placeholder)
# Add the variable initializer Op.
init = tf.initialize_all_variables()
# Create a session for running Ops on the Graph.
sess = tf.Session()
feed_dict = fill_feed_dict(data_sets.train,
images_placeholder,
labels_placeholder,
batch_size=5000)
# Run the Op to initialize the variables.
sess.run(init, feed_dict=feed_dict)
# Start the training loop.
max_test_prec = 0
for step in xrange(FLAGS.max_steps):
start_time = time.time()
# Fill a feed dictionary with the actual set of images and labels
# for this particular training step.
feed_dict = fill_feed_dict(data_sets.train,
images_placeholder,
labels_placeholder,
FLAGS.batch_size)
# Run one step of the model.
_, loss_value, clustering_loss_value = sess.run([train_op,
loss,
clustering_loss],
feed_dict=feed_dict)
duration = time.time() - start_time
if step % 100 == 0:
# Print status to stdout.
print('Step %d: loss = %.2f, clustering_loss = %.2f (%.3f sec)' % (
step, loss_value, clustering_loss_value, duration))
# Save a checkpoint and evaluate the model periodically.
if (step + 1) % 1000 == 0 or (step + 1) == FLAGS.max_steps:
# Evaluate against the training set.
print('Training Data Eval:')
do_eval(sess,
eval_correct,
images_placeholder,
labels_placeholder,
data_sets.train)
# Evaluate against the validation set.
print('Validation Data Eval:')
do_eval(sess,
eval_correct,
images_placeholder,
labels_placeholder,
data_sets.validation)
# Evaluate against the test set.
print('Test Data Eval:')
test_prec = do_eval(sess,
eval_correct,
images_placeholder,
labels_placeholder,
data_sets.test)
max_test_prec = max(max_test_prec, test_prec)
return max_test_prec | [
"def",
"run_training",
"(",
")",
":",
"# Get the sets of images and labels for training, validation, and",
"# test on MNIST.",
"train_dir",
"=",
"tempfile",
".",
"mkdtemp",
"(",
")",
"data_sets",
"=",
"input_data",
".",
"read_data_sets",
"(",
"train_dir",
",",
"FLAGS",
... | https://github.com/hughperkins/tf-coriander/blob/970d3df6c11400ad68405f22b0c42a52374e94ca/tensorflow/contrib/factorization/examples/mnist.py#L180-L269 | ||
ideawu/ssdb-rocks | a3cbb322cafb2f493252829c608e2239df98c9ac | deps/cpy/antlr3/recognizers.py | python | Parser.setTokenStream | (self, input) | Set the token stream and reset the parser | Set the token stream and reset the parser | [
"Set",
"the",
"token",
"stream",
"and",
"reset",
"the",
"parser"
] | def setTokenStream(self, input):
"""Set the token stream and reset the parser"""
self.input = None
self.reset()
self.input = input | [
"def",
"setTokenStream",
"(",
"self",
",",
"input",
")",
":",
"self",
".",
"input",
"=",
"None",
"self",
".",
"reset",
"(",
")",
"self",
".",
"input",
"=",
"input"
] | https://github.com/ideawu/ssdb-rocks/blob/a3cbb322cafb2f493252829c608e2239df98c9ac/deps/cpy/antlr3/recognizers.py#L1432-L1437 | ||
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Gems/CloudGemDefectReporter/v1/AWS/common-code/Lib/requests_oauthlib/oauth2_session.py | python | OAuth2Session.__init__ | (self, client_id=None, client=None, auto_refresh_url=None,
auto_refresh_kwargs=None, scope=None, redirect_uri=None, token=None,
state=None, token_updater=None, **kwargs) | Construct a new OAuth 2 client session.
:param client_id: Client id obtained during registration
:param client: :class:`oauthlib.oauth2.Client` to be used. Default is
WebApplicationClient which is useful for any
hosted application but not mobile or desktop.
:param scope: List of scopes you wish to request access to
:param redirect_uri: Redirect URI you registered as callback
:param token: Token dictionary, must include access_token
and token_type.
:param state: State string used to prevent CSRF. This will be given
when creating the authorization url and must be supplied
when parsing the authorization response.
Can be either a string or a no argument callable.
:auto_refresh_url: Refresh token endpoint URL, must be HTTPS. Supply
this if you wish the client to automatically refresh
your access tokens.
:auto_refresh_kwargs: Extra arguments to pass to the refresh token
endpoint.
:token_updater: Method with one argument, token, to be used to update
your token databse on automatic token refresh. If not
set a TokenUpdated warning will be raised when a token
has been refreshed. This warning will carry the token
in its token argument.
:param kwargs: Arguments to pass to the Session constructor. | Construct a new OAuth 2 client session. | [
"Construct",
"a",
"new",
"OAuth",
"2",
"client",
"session",
"."
] | def __init__(self, client_id=None, client=None, auto_refresh_url=None,
auto_refresh_kwargs=None, scope=None, redirect_uri=None, token=None,
state=None, token_updater=None, **kwargs):
"""Construct a new OAuth 2 client session.
:param client_id: Client id obtained during registration
:param client: :class:`oauthlib.oauth2.Client` to be used. Default is
WebApplicationClient which is useful for any
hosted application but not mobile or desktop.
:param scope: List of scopes you wish to request access to
:param redirect_uri: Redirect URI you registered as callback
:param token: Token dictionary, must include access_token
and token_type.
:param state: State string used to prevent CSRF. This will be given
when creating the authorization url and must be supplied
when parsing the authorization response.
Can be either a string or a no argument callable.
:auto_refresh_url: Refresh token endpoint URL, must be HTTPS. Supply
this if you wish the client to automatically refresh
your access tokens.
:auto_refresh_kwargs: Extra arguments to pass to the refresh token
endpoint.
:token_updater: Method with one argument, token, to be used to update
your token databse on automatic token refresh. If not
set a TokenUpdated warning will be raised when a token
has been refreshed. This warning will carry the token
in its token argument.
:param kwargs: Arguments to pass to the Session constructor.
"""
super(OAuth2Session, self).__init__(**kwargs)
self._client = client or WebApplicationClient(client_id, token=token)
self.token = token or {}
self.scope = scope
self.redirect_uri = redirect_uri
self.state = state or generate_token
self._state = state
self.auto_refresh_url = auto_refresh_url
self.auto_refresh_kwargs = auto_refresh_kwargs or {}
self.token_updater = token_updater
# Allow customizations for non compliant providers through various
# hooks to adjust requests and responses.
self.compliance_hook = {
'access_token_response': set([]),
'refresh_token_response': set([]),
'protected_request': set([]),
} | [
"def",
"__init__",
"(",
"self",
",",
"client_id",
"=",
"None",
",",
"client",
"=",
"None",
",",
"auto_refresh_url",
"=",
"None",
",",
"auto_refresh_kwargs",
"=",
"None",
",",
"scope",
"=",
"None",
",",
"redirect_uri",
"=",
"None",
",",
"token",
"=",
"Non... | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemDefectReporter/v1/AWS/common-code/Lib/requests_oauthlib/oauth2_session.py#L37-L83 | ||
moflow/moflow | 2dfb27c799c90c6caf1477508eca3eec616ef7d2 | bap/libtracewrap/libtrace/protobuf/python/google/protobuf/internal/encoder.py | python | TagBytes | (field_number, wire_type) | return _VarintBytes(wire_format.PackTag(field_number, wire_type)) | Encode the given tag and return the bytes. Only called at startup. | Encode the given tag and return the bytes. Only called at startup. | [
"Encode",
"the",
"given",
"tag",
"and",
"return",
"the",
"bytes",
".",
"Only",
"called",
"at",
"startup",
"."
] | def TagBytes(field_number, wire_type):
"""Encode the given tag and return the bytes. Only called at startup."""
return _VarintBytes(wire_format.PackTag(field_number, wire_type)) | [
"def",
"TagBytes",
"(",
"field_number",
",",
"wire_type",
")",
":",
"return",
"_VarintBytes",
"(",
"wire_format",
".",
"PackTag",
"(",
"field_number",
",",
"wire_type",
")",
")"
] | https://github.com/moflow/moflow/blob/2dfb27c799c90c6caf1477508eca3eec616ef7d2/bap/libtracewrap/libtrace/protobuf/python/google/protobuf/internal/encoder.py#L388-L391 | |
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/gtk/_core.py | python | Window.PopEventHandler | (*args, **kwargs) | return _core_.Window_PopEventHandler(*args, **kwargs) | PopEventHandler(self, bool deleteHandler=False) -> EvtHandler
Removes and returns the top-most event handler on the event handler
stack. If deleteHandler is True then the wx.EvtHandler object will be
destroyed after it is popped, and ``None`` will be returned instead. | PopEventHandler(self, bool deleteHandler=False) -> EvtHandler | [
"PopEventHandler",
"(",
"self",
"bool",
"deleteHandler",
"=",
"False",
")",
"-",
">",
"EvtHandler"
] | def PopEventHandler(*args, **kwargs):
"""
PopEventHandler(self, bool deleteHandler=False) -> EvtHandler
Removes and returns the top-most event handler on the event handler
stack. If deleteHandler is True then the wx.EvtHandler object will be
destroyed after it is popped, and ``None`` will be returned instead.
"""
return _core_.Window_PopEventHandler(*args, **kwargs) | [
"def",
"PopEventHandler",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_core_",
".",
"Window_PopEventHandler",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/gtk/_core.py#L10404-L10412 | |
natanielruiz/android-yolo | 1ebb54f96a67a20ff83ddfc823ed83a13dc3a47f | jni-build/jni/include/tensorflow/contrib/learn/python/learn/estimators/estimator.py | python | Estimator._call_model_fn | (self, features, targets, mode) | return self._model_fn(features, targets) | Calls model function with support of 2, 3 or 4 arguments. | Calls model function with support of 2, 3 or 4 arguments. | [
"Calls",
"model",
"function",
"with",
"support",
"of",
"2",
"3",
"or",
"4",
"arguments",
"."
] | def _call_model_fn(self, features, targets, mode):
"""Calls model function with support of 2, 3 or 4 arguments."""
model_fn_args = _get_arguments(self._model_fn)
if 'mode' in model_fn_args:
if 'params' in model_fn_args:
return self._model_fn(features, targets, mode=mode, params=self.params)
else:
return self._model_fn(features, targets, mode=mode)
return self._model_fn(features, targets) | [
"def",
"_call_model_fn",
"(",
"self",
",",
"features",
",",
"targets",
",",
"mode",
")",
":",
"model_fn_args",
"=",
"_get_arguments",
"(",
"self",
".",
"_model_fn",
")",
"if",
"'mode'",
"in",
"model_fn_args",
":",
"if",
"'params'",
"in",
"model_fn_args",
":"... | https://github.com/natanielruiz/android-yolo/blob/1ebb54f96a67a20ff83ddfc823ed83a13dc3a47f/jni-build/jni/include/tensorflow/contrib/learn/python/learn/estimators/estimator.py#L725-L733 | |
KhronosGroup/SPIRV-LLVM | 1eb85593f3fe2c39379b9a9b088d51eda4f42b8b | examples/Kaleidoscope/MCJIT/cached/genk-timing.py | python | KScriptGenerator.updateCalledFunctionList | (self, callee) | Maintains a list of functions that will actually be called | Maintains a list of functions that will actually be called | [
"Maintains",
"a",
"list",
"of",
"functions",
"that",
"will",
"actually",
"be",
"called"
] | def updateCalledFunctionList(self, callee):
"""Maintains a list of functions that will actually be called"""
# Update the total call count
self.updateTotalCallCount(callee)
# If this function is already in the list, don't do anything else
if callee in self.calledFunctions:
return
# Add this function to the list of those that will be called.
self.calledFunctions.append(callee)
# If this function calls other functions, add them too
if callee in self.calledFunctionTable:
for subCallee in self.calledFunctionTable[callee]:
self.updateCalledFunctionList(subCallee) | [
"def",
"updateCalledFunctionList",
"(",
"self",
",",
"callee",
")",
":",
"# Update the total call count",
"self",
".",
"updateTotalCallCount",
"(",
"callee",
")",
"# If this function is already in the list, don't do anything else",
"if",
"callee",
"in",
"self",
".",
"called... | https://github.com/KhronosGroup/SPIRV-LLVM/blob/1eb85593f3fe2c39379b9a9b088d51eda4f42b8b/examples/Kaleidoscope/MCJIT/cached/genk-timing.py#L66-L78 | ||
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/calendar.py | python | Calendar.yeardatescalendar | (self, year, width=3) | return [months[i:i+width] for i in range(0, len(months), width) ] | Return the data for the specified year ready for formatting. The return
value is a list of month rows. Each month row contains up to width months.
Each month contains between 4 and 6 weeks and each week contains 1-7
days. Days are datetime.date objects. | Return the data for the specified year ready for formatting. The return
value is a list of month rows. Each month row contains up to width months.
Each month contains between 4 and 6 weeks and each week contains 1-7
days. Days are datetime.date objects. | [
"Return",
"the",
"data",
"for",
"the",
"specified",
"year",
"ready",
"for",
"formatting",
".",
"The",
"return",
"value",
"is",
"a",
"list",
"of",
"month",
"rows",
".",
"Each",
"month",
"row",
"contains",
"up",
"to",
"width",
"months",
".",
"Each",
"month... | def yeardatescalendar(self, year, width=3):
"""
Return the data for the specified year ready for formatting. The return
value is a list of month rows. Each month row contains up to width months.
Each month contains between 4 and 6 weeks and each week contains 1-7
days. Days are datetime.date objects.
"""
months = [
self.monthdatescalendar(year, i)
for i in range(January, January+12)
]
return [months[i:i+width] for i in range(0, len(months), width) ] | [
"def",
"yeardatescalendar",
"(",
"self",
",",
"year",
",",
"width",
"=",
"3",
")",
":",
"months",
"=",
"[",
"self",
".",
"monthdatescalendar",
"(",
"year",
",",
"i",
")",
"for",
"i",
"in",
"range",
"(",
"January",
",",
"January",
"+",
"12",
")",
"]... | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/calendar.py#L254-L265 | |
panda3d/panda3d | 833ad89ebad58395d0af0b7ec08538e5e4308265 | direct/src/distributed/DistributedSmoothNode.py | python | DistributedSmoothNode.stopSmooth | (self) | This function stops the task spawned by startSmooth(), and
allows show code to move the node around directly. | This function stops the task spawned by startSmooth(), and
allows show code to move the node around directly. | [
"This",
"function",
"stops",
"the",
"task",
"spawned",
"by",
"startSmooth",
"()",
"and",
"allows",
"show",
"code",
"to",
"move",
"the",
"node",
"around",
"directly",
"."
] | def stopSmooth(self):
"""
This function stops the task spawned by startSmooth(), and
allows show code to move the node around directly.
"""
if self.smoothStarted:
taskName = self.taskName("smooth")
taskMgr.remove(taskName)
self.forceToTruePosition()
self.smoothStarted = 0 | [
"def",
"stopSmooth",
"(",
"self",
")",
":",
"if",
"self",
".",
"smoothStarted",
":",
"taskName",
"=",
"self",
".",
"taskName",
"(",
"\"smooth\"",
")",
"taskMgr",
".",
"remove",
"(",
"taskName",
")",
"self",
".",
"forceToTruePosition",
"(",
")",
"self",
"... | https://github.com/panda3d/panda3d/blob/833ad89ebad58395d0af0b7ec08538e5e4308265/direct/src/distributed/DistributedSmoothNode.py#L143-L152 | ||
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Gems/CloudGemMetric/v1/AWS/common-code/Lib/pandas/core/frame.py | python | DataFrame.reset_index | (
self,
level: Optional[Union[Hashable, Sequence[Hashable]]] = None,
drop: bool = False,
inplace: bool = False,
col_level: Hashable = 0,
col_fill: Optional[Hashable] = "",
) | return None | Reset the index, or a level of it.
Reset the index of the DataFrame, and use the default one instead.
If the DataFrame has a MultiIndex, this method can remove one or more
levels.
Parameters
----------
level : int, str, tuple, or list, default None
Only remove the given levels from the index. Removes all levels by
default.
drop : bool, default False
Do not try to insert index into dataframe columns. This resets
the index to the default integer index.
inplace : bool, default False
Modify the DataFrame in place (do not create a new object).
col_level : int or str, default 0
If the columns have multiple levels, determines which level the
labels are inserted into. By default it is inserted into the first
level.
col_fill : object, default ''
If the columns have multiple levels, determines how the other
levels are named. If None then the index name is repeated.
Returns
-------
DataFrame or None
DataFrame with the new index or None if ``inplace=True``.
See Also
--------
DataFrame.set_index : Opposite of reset_index.
DataFrame.reindex : Change to new indices or expand indices.
DataFrame.reindex_like : Change to same indices as other DataFrame.
Examples
--------
>>> df = pd.DataFrame([('bird', 389.0),
... ('bird', 24.0),
... ('mammal', 80.5),
... ('mammal', np.nan)],
... index=['falcon', 'parrot', 'lion', 'monkey'],
... columns=('class', 'max_speed'))
>>> df
class max_speed
falcon bird 389.0
parrot bird 24.0
lion mammal 80.5
monkey mammal NaN
When we reset the index, the old index is added as a column, and a
new sequential index is used:
>>> df.reset_index()
index class max_speed
0 falcon bird 389.0
1 parrot bird 24.0
2 lion mammal 80.5
3 monkey mammal NaN
We can use the `drop` parameter to avoid the old index being added as
a column:
>>> df.reset_index(drop=True)
class max_speed
0 bird 389.0
1 bird 24.0
2 mammal 80.5
3 mammal NaN
You can also use `reset_index` with `MultiIndex`.
>>> index = pd.MultiIndex.from_tuples([('bird', 'falcon'),
... ('bird', 'parrot'),
... ('mammal', 'lion'),
... ('mammal', 'monkey')],
... names=['class', 'name'])
>>> columns = pd.MultiIndex.from_tuples([('speed', 'max'),
... ('species', 'type')])
>>> df = pd.DataFrame([(389.0, 'fly'),
... ( 24.0, 'fly'),
... ( 80.5, 'run'),
... (np.nan, 'jump')],
... index=index,
... columns=columns)
>>> df
speed species
max type
class name
bird falcon 389.0 fly
parrot 24.0 fly
mammal lion 80.5 run
monkey NaN jump
If the index has multiple levels, we can reset a subset of them:
>>> df.reset_index(level='class')
class speed species
max type
name
falcon bird 389.0 fly
parrot bird 24.0 fly
lion mammal 80.5 run
monkey mammal NaN jump
If we are not dropping the index, by default, it is placed in the top
level. We can place it in another level:
>>> df.reset_index(level='class', col_level=1)
speed species
class max type
name
falcon bird 389.0 fly
parrot bird 24.0 fly
lion mammal 80.5 run
monkey mammal NaN jump
When the index is inserted under another level, we can specify under
which one with the parameter `col_fill`:
>>> df.reset_index(level='class', col_level=1, col_fill='species')
species speed species
class max type
name
falcon bird 389.0 fly
parrot bird 24.0 fly
lion mammal 80.5 run
monkey mammal NaN jump
If we specify a nonexistent level for `col_fill`, it is created:
>>> df.reset_index(level='class', col_level=1, col_fill='genus')
genus speed species
class max type
name
falcon bird 389.0 fly
parrot bird 24.0 fly
lion mammal 80.5 run
monkey mammal NaN jump | Reset the index, or a level of it. | [
"Reset",
"the",
"index",
"or",
"a",
"level",
"of",
"it",
"."
] | def reset_index(
self,
level: Optional[Union[Hashable, Sequence[Hashable]]] = None,
drop: bool = False,
inplace: bool = False,
col_level: Hashable = 0,
col_fill: Optional[Hashable] = "",
) -> Optional["DataFrame"]:
"""
Reset the index, or a level of it.
Reset the index of the DataFrame, and use the default one instead.
If the DataFrame has a MultiIndex, this method can remove one or more
levels.
Parameters
----------
level : int, str, tuple, or list, default None
Only remove the given levels from the index. Removes all levels by
default.
drop : bool, default False
Do not try to insert index into dataframe columns. This resets
the index to the default integer index.
inplace : bool, default False
Modify the DataFrame in place (do not create a new object).
col_level : int or str, default 0
If the columns have multiple levels, determines which level the
labels are inserted into. By default it is inserted into the first
level.
col_fill : object, default ''
If the columns have multiple levels, determines how the other
levels are named. If None then the index name is repeated.
Returns
-------
DataFrame or None
DataFrame with the new index or None if ``inplace=True``.
See Also
--------
DataFrame.set_index : Opposite of reset_index.
DataFrame.reindex : Change to new indices or expand indices.
DataFrame.reindex_like : Change to same indices as other DataFrame.
Examples
--------
>>> df = pd.DataFrame([('bird', 389.0),
... ('bird', 24.0),
... ('mammal', 80.5),
... ('mammal', np.nan)],
... index=['falcon', 'parrot', 'lion', 'monkey'],
... columns=('class', 'max_speed'))
>>> df
class max_speed
falcon bird 389.0
parrot bird 24.0
lion mammal 80.5
monkey mammal NaN
When we reset the index, the old index is added as a column, and a
new sequential index is used:
>>> df.reset_index()
index class max_speed
0 falcon bird 389.0
1 parrot bird 24.0
2 lion mammal 80.5
3 monkey mammal NaN
We can use the `drop` parameter to avoid the old index being added as
a column:
>>> df.reset_index(drop=True)
class max_speed
0 bird 389.0
1 bird 24.0
2 mammal 80.5
3 mammal NaN
You can also use `reset_index` with `MultiIndex`.
>>> index = pd.MultiIndex.from_tuples([('bird', 'falcon'),
... ('bird', 'parrot'),
... ('mammal', 'lion'),
... ('mammal', 'monkey')],
... names=['class', 'name'])
>>> columns = pd.MultiIndex.from_tuples([('speed', 'max'),
... ('species', 'type')])
>>> df = pd.DataFrame([(389.0, 'fly'),
... ( 24.0, 'fly'),
... ( 80.5, 'run'),
... (np.nan, 'jump')],
... index=index,
... columns=columns)
>>> df
speed species
max type
class name
bird falcon 389.0 fly
parrot 24.0 fly
mammal lion 80.5 run
monkey NaN jump
If the index has multiple levels, we can reset a subset of them:
>>> df.reset_index(level='class')
class speed species
max type
name
falcon bird 389.0 fly
parrot bird 24.0 fly
lion mammal 80.5 run
monkey mammal NaN jump
If we are not dropping the index, by default, it is placed in the top
level. We can place it in another level:
>>> df.reset_index(level='class', col_level=1)
speed species
class max type
name
falcon bird 389.0 fly
parrot bird 24.0 fly
lion mammal 80.5 run
monkey mammal NaN jump
When the index is inserted under another level, we can specify under
which one with the parameter `col_fill`:
>>> df.reset_index(level='class', col_level=1, col_fill='species')
species speed species
class max type
name
falcon bird 389.0 fly
parrot bird 24.0 fly
lion mammal 80.5 run
monkey mammal NaN jump
If we specify a nonexistent level for `col_fill`, it is created:
>>> df.reset_index(level='class', col_level=1, col_fill='genus')
genus speed species
class max type
name
falcon bird 389.0 fly
parrot bird 24.0 fly
lion mammal 80.5 run
monkey mammal NaN jump
"""
inplace = validate_bool_kwarg(inplace, "inplace")
if inplace:
new_obj = self
else:
new_obj = self.copy()
def _maybe_casted_values(index, labels=None):
values = index._values
if not isinstance(index, (PeriodIndex, DatetimeIndex)):
if values.dtype == np.object_:
values = lib.maybe_convert_objects(values)
# if we have the labels, extract the values with a mask
if labels is not None:
mask = labels == -1
# we can have situations where the whole mask is -1,
# meaning there is nothing found in labels, so make all nan's
if mask.all():
values = np.empty(len(mask))
values.fill(np.nan)
else:
values = values.take(labels)
# TODO(https://github.com/pandas-dev/pandas/issues/24206)
# Push this into maybe_upcast_putmask?
# We can't pass EAs there right now. Looks a bit
# complicated.
# So we unbox the ndarray_values, op, re-box.
values_type = type(values)
values_dtype = values.dtype
if issubclass(values_type, DatetimeLikeArray):
values = values._data
if mask.any():
values, _ = maybe_upcast_putmask(values, mask, np.nan)
if issubclass(values_type, DatetimeLikeArray):
values = values_type(values, dtype=values_dtype)
return values
new_index = ibase.default_index(len(new_obj))
if level is not None:
if not isinstance(level, (tuple, list)):
level = [level]
level = [self.index._get_level_number(lev) for lev in level]
if len(level) < self.index.nlevels:
new_index = self.index.droplevel(level)
if not drop:
to_insert: Iterable[Tuple[Any, Optional[Any]]]
if isinstance(self.index, ABCMultiIndex):
names = [
(n if n is not None else f"level_{i}")
for i, n in enumerate(self.index.names)
]
to_insert = zip(self.index.levels, self.index.codes)
else:
default = "index" if "index" not in self else "level_0"
names = [default] if self.index.name is None else [self.index.name]
to_insert = ((self.index, None),)
multi_col = isinstance(self.columns, ABCMultiIndex)
for i, (lev, lab) in reversed(list(enumerate(to_insert))):
if not (level is None or i in level):
continue
name = names[i]
if multi_col:
col_name = list(name) if isinstance(name, tuple) else [name]
if col_fill is None:
if len(col_name) not in (1, self.columns.nlevels):
raise ValueError(
"col_fill=None is incompatible "
f"with incomplete column name {name}"
)
col_fill = col_name[0]
lev_num = self.columns._get_level_number(col_level)
name_lst = [col_fill] * lev_num + col_name
missing = self.columns.nlevels - len(name_lst)
name_lst += [col_fill] * missing
name = tuple(name_lst)
# to ndarray and maybe infer different dtype
level_values = _maybe_casted_values(lev, lab)
new_obj.insert(0, name, level_values)
new_obj.index = new_index
if not inplace:
return new_obj
return None | [
"def",
"reset_index",
"(",
"self",
",",
"level",
":",
"Optional",
"[",
"Union",
"[",
"Hashable",
",",
"Sequence",
"[",
"Hashable",
"]",
"]",
"]",
"=",
"None",
",",
"drop",
":",
"bool",
"=",
"False",
",",
"inplace",
":",
"bool",
"=",
"False",
",",
"... | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemMetric/v1/AWS/common-code/Lib/pandas/core/frame.py#L4369-L4610 | |
krishauser/Klampt | 972cc83ea5befac3f653c1ba20f80155768ad519 | Python/python2_version/klampt/math/spline.py | python | hermite_to_bezier | (x1,v1,x2,v2) | return x1,c1,c2,x2 | Returns the cubic bezier representation of a hermite curve | Returns the cubic bezier representation of a hermite curve | [
"Returns",
"the",
"cubic",
"bezier",
"representation",
"of",
"a",
"hermite",
"curve"
] | def hermite_to_bezier(x1,v1,x2,v2):
"""Returns the cubic bezier representation of a hermite curve"""
c1 = vectorops.madd(x1,v1,1.0/3.0)
c2 = vectorops.madd(x2,v2,-1.0/3.0)
return x1,c1,c2,x2 | [
"def",
"hermite_to_bezier",
"(",
"x1",
",",
"v1",
",",
"x2",
",",
"v2",
")",
":",
"c1",
"=",
"vectorops",
".",
"madd",
"(",
"x1",
",",
"v1",
",",
"1.0",
"/",
"3.0",
")",
"c2",
"=",
"vectorops",
".",
"madd",
"(",
"x2",
",",
"v2",
",",
"-",
"1.... | https://github.com/krishauser/Klampt/blob/972cc83ea5befac3f653c1ba20f80155768ad519/Python/python2_version/klampt/math/spline.py#L73-L77 | |
SFTtech/openage | d6a08c53c48dc1e157807471df92197f6ca9e04d | openage/convert/tool/subtool/acquire_sourcedir.py | python | wanna_convert | () | return answer | Ask the user if assets should be converted. | Ask the user if assets should be converted. | [
"Ask",
"the",
"user",
"if",
"assets",
"should",
"be",
"converted",
"."
] | def wanna_convert():
"""
Ask the user if assets should be converted.
"""
answer = None
while answer is None:
print(" Do you want to convert assets? [Y/n]")
user_selection = input("> ")
if user_selection.lower() in {"yes", "y", ""}:
answer = True
elif user_selection.lower() in {"no", "n"}:
answer = False
return answer | [
"def",
"wanna_convert",
"(",
")",
":",
"answer",
"=",
"None",
"while",
"answer",
"is",
"None",
":",
"print",
"(",
"\" Do you want to convert assets? [Y/n]\"",
")",
"user_selection",
"=",
"input",
"(",
"\"> \"",
")",
"if",
"user_selection",
".",
"lower",
"(",
... | https://github.com/SFTtech/openage/blob/d6a08c53c48dc1e157807471df92197f6ca9e04d/openage/convert/tool/subtool/acquire_sourcedir.py#L35-L50 | |
LiquidPlayer/LiquidCore | 9405979363f2353ac9a71ad8ab59685dd7f919c9 | deps/node-10.15.3/tools/jinja2/bccache.py | python | Bucket.reset | (self) | Resets the bucket (unloads the bytecode). | Resets the bucket (unloads the bytecode). | [
"Resets",
"the",
"bucket",
"(",
"unloads",
"the",
"bytecode",
")",
"."
] | def reset(self):
"""Resets the bucket (unloads the bytecode)."""
self.code = None | [
"def",
"reset",
"(",
"self",
")",
":",
"self",
".",
"code",
"=",
"None"
] | https://github.com/LiquidPlayer/LiquidCore/blob/9405979363f2353ac9a71ad8ab59685dd7f919c9/deps/node-10.15.3/tools/jinja2/bccache.py#L75-L77 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.