nwo stringlengths 5 86 | sha stringlengths 40 40 | path stringlengths 4 189 | language stringclasses 1 value | identifier stringlengths 1 94 | parameters stringlengths 2 4.03k | argument_list stringclasses 1 value | return_statement stringlengths 0 11.5k | docstring stringlengths 1 33.2k | docstring_summary stringlengths 0 5.15k | docstring_tokens list | function stringlengths 34 151k | function_tokens list | url stringlengths 90 278 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
FreeCAD/FreeCAD | ba42231b9c6889b89e064d6d563448ed81e376ec | src/Mod/Arch/importIFClegacy.py | python | IfcFile.nextString | (self, s, start) | return len(s)+1 | Parse the data part of a line | Parse the data part of a line | [
"Parse",
"the",
"data",
"part",
"of",
"a",
"line"
] | def nextString(self, s, start):
"""
Parse the data part of a line
"""
parens = 0
quotes = 0
for pos in range(start,len(s)):
c = s[pos]
if c == "," and parens == 0 and quotes == 0:
return pos+1
elif c == "(" and quotes == 0:
parens += 1
elif c == ")" and quotes == 0:
parens -= 1
elif c == "\'" and quotes == 0:
quotes = 1
elif c =="\'" and quotes == 1:
quotes = 0
return len(s)+1 | [
"def",
"nextString",
"(",
"self",
",",
"s",
",",
"start",
")",
":",
"parens",
"=",
"0",
"quotes",
"=",
"0",
"for",
"pos",
"in",
"range",
"(",
"start",
",",
"len",
"(",
"s",
")",
")",
":",
"c",
"=",
"s",
"[",
"pos",
"]",
"if",
"c",
"==",
"\",\"",
"and",
"parens",
"==",
"0",
"and",
"quotes",
"==",
"0",
":",
"return",
"pos",
"+",
"1",
"elif",
"c",
"==",
"\"(\"",
"and",
"quotes",
"==",
"0",
":",
"parens",
"+=",
"1",
"elif",
"c",
"==",
"\")\"",
"and",
"quotes",
"==",
"0",
":",
"parens",
"-=",
"1",
"elif",
"c",
"==",
"\"\\'\"",
"and",
"quotes",
"==",
"0",
":",
"quotes",
"=",
"1",
"elif",
"c",
"==",
"\"\\'\"",
"and",
"quotes",
"==",
"1",
":",
"quotes",
"=",
"0",
"return",
"len",
"(",
"s",
")",
"+",
"1"
] | https://github.com/FreeCAD/FreeCAD/blob/ba42231b9c6889b89e064d6d563448ed81e376ec/src/Mod/Arch/importIFClegacy.py#L1612-L1632 | |
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/multiprocessing/sharedctypes.py | python | Array | (typecode_or_type, size_or_initializer, *, lock=True, ctx=None) | return synchronized(obj, lock, ctx=ctx) | Return a synchronization wrapper for a RawArray | Return a synchronization wrapper for a RawArray | [
"Return",
"a",
"synchronization",
"wrapper",
"for",
"a",
"RawArray"
] | def Array(typecode_or_type, size_or_initializer, *, lock=True, ctx=None):
'''
Return a synchronization wrapper for a RawArray
'''
obj = RawArray(typecode_or_type, size_or_initializer)
if lock is False:
return obj
if lock in (True, None):
ctx = ctx or get_context()
lock = ctx.RLock()
if not hasattr(lock, 'acquire'):
raise AttributeError("%r has no method 'acquire'" % lock)
return synchronized(obj, lock, ctx=ctx) | [
"def",
"Array",
"(",
"typecode_or_type",
",",
"size_or_initializer",
",",
"*",
",",
"lock",
"=",
"True",
",",
"ctx",
"=",
"None",
")",
":",
"obj",
"=",
"RawArray",
"(",
"typecode_or_type",
",",
"size_or_initializer",
")",
"if",
"lock",
"is",
"False",
":",
"return",
"obj",
"if",
"lock",
"in",
"(",
"True",
",",
"None",
")",
":",
"ctx",
"=",
"ctx",
"or",
"get_context",
"(",
")",
"lock",
"=",
"ctx",
".",
"RLock",
"(",
")",
"if",
"not",
"hasattr",
"(",
"lock",
",",
"'acquire'",
")",
":",
"raise",
"AttributeError",
"(",
"\"%r has no method 'acquire'\"",
"%",
"lock",
")",
"return",
"synchronized",
"(",
"obj",
",",
"lock",
",",
"ctx",
"=",
"ctx",
")"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/multiprocessing/sharedctypes.py#L84-L96 | |
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | wx/lib/agw/aui/framemanager.py | python | AuiManager.AddPane | (self, window, arg1=None, arg2=None, target=None) | Tells the frame manager to start managing a child window. There
are four versions of this function. The first verison allows the full spectrum
of pane parameter possibilities (:meth:`AddPane1`). The second version is used for
simpler user interfaces which do not require as much configuration (:meth:`AddPane2`).
The :meth:`AddPane3` version allows a drop position to be specified, which will determine
where the pane will be added. The :meth:`AddPane4` version allows to turn the target
:class:`AuiPaneInfo` pane into a notebook and the added pane into a page.
In your code, simply call :meth:`AddPane`.
:param Window `window`: the child window to manage;
:param `arg1`: a :class:`AuiPaneInfo` or an integer value (direction);
:param `arg2`: a :class:`AuiPaneInfo` or a :class:`Point` (drop position);
:param `target`: a :class:`AuiPaneInfo` to be turned into a notebook
and new pane added to it as a page. (additionally, target can be any pane in
an existing notebook) | Tells the frame manager to start managing a child window. There
are four versions of this function. The first verison allows the full spectrum
of pane parameter possibilities (:meth:`AddPane1`). The second version is used for
simpler user interfaces which do not require as much configuration (:meth:`AddPane2`).
The :meth:`AddPane3` version allows a drop position to be specified, which will determine
where the pane will be added. The :meth:`AddPane4` version allows to turn the target
:class:`AuiPaneInfo` pane into a notebook and the added pane into a page. | [
"Tells",
"the",
"frame",
"manager",
"to",
"start",
"managing",
"a",
"child",
"window",
".",
"There",
"are",
"four",
"versions",
"of",
"this",
"function",
".",
"The",
"first",
"verison",
"allows",
"the",
"full",
"spectrum",
"of",
"pane",
"parameter",
"possibilities",
"(",
":",
"meth",
":",
"AddPane1",
")",
".",
"The",
"second",
"version",
"is",
"used",
"for",
"simpler",
"user",
"interfaces",
"which",
"do",
"not",
"require",
"as",
"much",
"configuration",
"(",
":",
"meth",
":",
"AddPane2",
")",
".",
"The",
":",
"meth",
":",
"AddPane3",
"version",
"allows",
"a",
"drop",
"position",
"to",
"be",
"specified",
"which",
"will",
"determine",
"where",
"the",
"pane",
"will",
"be",
"added",
".",
"The",
":",
"meth",
":",
"AddPane4",
"version",
"allows",
"to",
"turn",
"the",
"target",
":",
"class",
":",
"AuiPaneInfo",
"pane",
"into",
"a",
"notebook",
"and",
"the",
"added",
"pane",
"into",
"a",
"page",
"."
] | def AddPane(self, window, arg1=None, arg2=None, target=None):
"""
Tells the frame manager to start managing a child window. There
are four versions of this function. The first verison allows the full spectrum
of pane parameter possibilities (:meth:`AddPane1`). The second version is used for
simpler user interfaces which do not require as much configuration (:meth:`AddPane2`).
The :meth:`AddPane3` version allows a drop position to be specified, which will determine
where the pane will be added. The :meth:`AddPane4` version allows to turn the target
:class:`AuiPaneInfo` pane into a notebook and the added pane into a page.
In your code, simply call :meth:`AddPane`.
:param Window `window`: the child window to manage;
:param `arg1`: a :class:`AuiPaneInfo` or an integer value (direction);
:param `arg2`: a :class:`AuiPaneInfo` or a :class:`Point` (drop position);
:param `target`: a :class:`AuiPaneInfo` to be turned into a notebook
and new pane added to it as a page. (additionally, target can be any pane in
an existing notebook)
"""
if target in self._panes:
return self.AddPane4(window, arg1, target)
if type(arg1) == type(1):
# This Is Addpane2
if arg1 is None:
arg1 = wx.LEFT
if arg2 is None:
arg2 = ""
return self.AddPane2(window, arg1, arg2)
else:
if isinstance(arg2, wx.Point):
return self.AddPane3(window, arg1, arg2)
else:
return self.AddPane1(window, arg1) | [
"def",
"AddPane",
"(",
"self",
",",
"window",
",",
"arg1",
"=",
"None",
",",
"arg2",
"=",
"None",
",",
"target",
"=",
"None",
")",
":",
"if",
"target",
"in",
"self",
".",
"_panes",
":",
"return",
"self",
".",
"AddPane4",
"(",
"window",
",",
"arg1",
",",
"target",
")",
"if",
"type",
"(",
"arg1",
")",
"==",
"type",
"(",
"1",
")",
":",
"# This Is Addpane2",
"if",
"arg1",
"is",
"None",
":",
"arg1",
"=",
"wx",
".",
"LEFT",
"if",
"arg2",
"is",
"None",
":",
"arg2",
"=",
"\"\"",
"return",
"self",
".",
"AddPane2",
"(",
"window",
",",
"arg1",
",",
"arg2",
")",
"else",
":",
"if",
"isinstance",
"(",
"arg2",
",",
"wx",
".",
"Point",
")",
":",
"return",
"self",
".",
"AddPane3",
"(",
"window",
",",
"arg1",
",",
"arg2",
")",
"else",
":",
"return",
"self",
".",
"AddPane1",
"(",
"window",
",",
"arg1",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/wx/lib/agw/aui/framemanager.py#L4683-L4717 | ||
miyosuda/TensorFlowAndroidDemo | 35903e0221aa5f109ea2dbef27f20b52e317f42d | jni-build/jni/include/tensorflow/contrib/learn/python/learn/ops/autoencoder_ops.py | python | dnn_autoencoder | (
tensor_in, hidden_units, activation=nn.relu, add_noise=None, dropout=None,
scope=None) | Creates fully connected autoencoder subgraph.
Args:
tensor_in: tensor or placeholder for input features.
hidden_units: list of counts of hidden units in each layer.
activation: activation function used to map inner latent layer onto
reconstruction layer.
add_noise: a function that adds noise to tensor_in,
e.g. def add_noise(x):
return(x + np.random.normal(0, 0.1, (len(x), len(x[0]))))
dropout: if not None, will add a dropout layer with given
probability.
scope: the variable scope for this op.
Returns:
Tensors for encoder and decoder. | Creates fully connected autoencoder subgraph. | [
"Creates",
"fully",
"connected",
"autoencoder",
"subgraph",
"."
] | def dnn_autoencoder(
tensor_in, hidden_units, activation=nn.relu, add_noise=None, dropout=None,
scope=None):
"""Creates fully connected autoencoder subgraph.
Args:
tensor_in: tensor or placeholder for input features.
hidden_units: list of counts of hidden units in each layer.
activation: activation function used to map inner latent layer onto
reconstruction layer.
add_noise: a function that adds noise to tensor_in,
e.g. def add_noise(x):
return(x + np.random.normal(0, 0.1, (len(x), len(x[0]))))
dropout: if not None, will add a dropout layer with given
probability.
scope: the variable scope for this op.
Returns:
Tensors for encoder and decoder.
"""
with vs.variable_op_scope([tensor_in], scope, "autoencoder"):
if add_noise is not None:
tensor_in = add_noise(tensor_in)
with vs.variable_scope("encoder"):
# build DNN encoder
encoder = dnn_ops.dnn(
tensor_in, hidden_units, activation=activation, dropout=dropout)
with vs.variable_scope("decoder"):
# reverse hidden_units and built DNN decoder
decoder = dnn_ops.dnn(
encoder, hidden_units[::-1], activation=activation, dropout=dropout)
return encoder, decoder | [
"def",
"dnn_autoencoder",
"(",
"tensor_in",
",",
"hidden_units",
",",
"activation",
"=",
"nn",
".",
"relu",
",",
"add_noise",
"=",
"None",
",",
"dropout",
"=",
"None",
",",
"scope",
"=",
"None",
")",
":",
"with",
"vs",
".",
"variable_op_scope",
"(",
"[",
"tensor_in",
"]",
",",
"scope",
",",
"\"autoencoder\"",
")",
":",
"if",
"add_noise",
"is",
"not",
"None",
":",
"tensor_in",
"=",
"add_noise",
"(",
"tensor_in",
")",
"with",
"vs",
".",
"variable_scope",
"(",
"\"encoder\"",
")",
":",
"# build DNN encoder",
"encoder",
"=",
"dnn_ops",
".",
"dnn",
"(",
"tensor_in",
",",
"hidden_units",
",",
"activation",
"=",
"activation",
",",
"dropout",
"=",
"dropout",
")",
"with",
"vs",
".",
"variable_scope",
"(",
"\"decoder\"",
")",
":",
"# reverse hidden_units and built DNN decoder",
"decoder",
"=",
"dnn_ops",
".",
"dnn",
"(",
"encoder",
",",
"hidden_units",
"[",
":",
":",
"-",
"1",
"]",
",",
"activation",
"=",
"activation",
",",
"dropout",
"=",
"dropout",
")",
"return",
"encoder",
",",
"decoder"
] | https://github.com/miyosuda/TensorFlowAndroidDemo/blob/35903e0221aa5f109ea2dbef27f20b52e317f42d/jni-build/jni/include/tensorflow/contrib/learn/python/learn/ops/autoencoder_ops.py#L27-L58 | ||
alibaba/MNN | c4d9566171d589c3ded23aa18ffb197016995a12 | pymnn/pip_package/MNN/expr/__init__.py | python | relu6 | (x, min=0.0, max=6.0) | return _F.relu6(x, min, max) | relu6(x, min=0.0, max=6.0)
`max(min(x, max), min)` of `x`.
Parameters
----------
x : var_like, input value.
min : float, input value. Default is 0.0;
max : float, input value. Default is 6.0;
Returns
-------
relu6_res : Var.
Example:
-------
>>> expr.relu6([-1.0, 7.0, 2.0])
var[0., 6., 2.], dtype=float32) | relu6(x, min=0.0, max=6.0)
`max(min(x, max), min)` of `x`. | [
"relu6",
"(",
"x",
"min",
"=",
"0",
".",
"0",
"max",
"=",
"6",
".",
"0",
")",
"max",
"(",
"min",
"(",
"x",
"max",
")",
"min",
")",
"of",
"x",
"."
] | def relu6(x, min=0.0, max=6.0):
'''
relu6(x, min=0.0, max=6.0)
`max(min(x, max), min)` of `x`.
Parameters
----------
x : var_like, input value.
min : float, input value. Default is 0.0;
max : float, input value. Default is 6.0;
Returns
-------
relu6_res : Var.
Example:
-------
>>> expr.relu6([-1.0, 7.0, 2.0])
var[0., 6., 2.], dtype=float32)
'''
x = _to_var(x)
min = _to_float(min)
max = _to_float(max)
return _F.relu6(x, min, max) | [
"def",
"relu6",
"(",
"x",
",",
"min",
"=",
"0.0",
",",
"max",
"=",
"6.0",
")",
":",
"x",
"=",
"_to_var",
"(",
"x",
")",
"min",
"=",
"_to_float",
"(",
"min",
")",
"max",
"=",
"_to_float",
"(",
"max",
")",
"return",
"_F",
".",
"relu6",
"(",
"x",
",",
"min",
",",
"max",
")"
] | https://github.com/alibaba/MNN/blob/c4d9566171d589c3ded23aa18ffb197016995a12/pymnn/pip_package/MNN/expr/__init__.py#L1918-L1941 | |
Xilinx/Vitis-AI | fc74d404563d9951b57245443c73bef389f3657f | tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/python/ops/ragged/ragged_math_ops.py | python | ragged_reduce_aggregate | (reduce_op,
unsorted_segment_op,
rt_input,
axis,
keepdims,
separator=None,
name=None) | Aggregates across axes of a RaggedTensor using the given `Tensor` ops.
Reduces `rt_input` along the dimensions given in `axis`. The rank of the
tensor is reduced by 1 for each entry in `axis`. If `axis` is not specified,
then all dimensions are reduced, and a scalar value is returned.
This op assumes that `reduce_op` and `unsorted_segment_op` are associative;
if not, then reducing multiple axes will return incorrect results. (In
particular, reducing multiple axes is currently implemented by reducing the
axes one at a time.)
Args:
reduce_op: The tensorflow `op` that should be used to reduce values in
uniform dimensions. Must have the same signature and basic behavior as
`reduce_sum`, `reduce_max`, etc.
unsorted_segment_op: The tensorflow `op` that should be used to combine
values in ragged dimensions. Must have the same signature and basic
behavior as `unsorted_segment_sum`, `unsorted_segment_max`, etc.
rt_input: A `Tensor` or `RaggedTensor` containing the values to be reduced.
axis: The axis or axes to reduce. May be `None` (to reduce all axes), an
`int` (to reduce a single axis), a `list` or `tuple` of `int` (to reduce a
given set of axes), or a `Tensor` with a constant value. Must be in the
range `[0, rt_input.rank)`.
keepdims: If true, retains reduced dimensions with length 1.
separator: An optional string. Defaults to None. The separator to use when
joining. The separator must not be set for non-string data types. (i.e.
if separator is not None then it uses string ops)
name: A name prefix for the returned tensor (optional).
Returns:
A `RaggedTensor` containing the reduced values. The returned tensor
has the same dtype as `data`, and its shape is given by removing the
dimensions specified in `axis` from `rt_input.shape`. The `ragged_rank`
of the returned tensor is given by substracting any ragged dimensions
specified in `axis` from `rt_input.ragged_rank`.
Raises:
ValueError: If `axis` contains a `Tensor` whose value is not constant. | Aggregates across axes of a RaggedTensor using the given `Tensor` ops. | [
"Aggregates",
"across",
"axes",
"of",
"a",
"RaggedTensor",
"using",
"the",
"given",
"Tensor",
"ops",
"."
] | def ragged_reduce_aggregate(reduce_op,
unsorted_segment_op,
rt_input,
axis,
keepdims,
separator=None,
name=None):
"""Aggregates across axes of a RaggedTensor using the given `Tensor` ops.
Reduces `rt_input` along the dimensions given in `axis`. The rank of the
tensor is reduced by 1 for each entry in `axis`. If `axis` is not specified,
then all dimensions are reduced, and a scalar value is returned.
This op assumes that `reduce_op` and `unsorted_segment_op` are associative;
if not, then reducing multiple axes will return incorrect results. (In
particular, reducing multiple axes is currently implemented by reducing the
axes one at a time.)
Args:
reduce_op: The tensorflow `op` that should be used to reduce values in
uniform dimensions. Must have the same signature and basic behavior as
`reduce_sum`, `reduce_max`, etc.
unsorted_segment_op: The tensorflow `op` that should be used to combine
values in ragged dimensions. Must have the same signature and basic
behavior as `unsorted_segment_sum`, `unsorted_segment_max`, etc.
rt_input: A `Tensor` or `RaggedTensor` containing the values to be reduced.
axis: The axis or axes to reduce. May be `None` (to reduce all axes), an
`int` (to reduce a single axis), a `list` or `tuple` of `int` (to reduce a
given set of axes), or a `Tensor` with a constant value. Must be in the
range `[0, rt_input.rank)`.
keepdims: If true, retains reduced dimensions with length 1.
separator: An optional string. Defaults to None. The separator to use when
joining. The separator must not be set for non-string data types. (i.e.
if separator is not None then it uses string ops)
name: A name prefix for the returned tensor (optional).
Returns:
A `RaggedTensor` containing the reduced values. The returned tensor
has the same dtype as `data`, and its shape is given by removing the
dimensions specified in `axis` from `rt_input.shape`. The `ragged_rank`
of the returned tensor is given by substracting any ragged dimensions
specified in `axis` from `rt_input.ragged_rank`.
Raises:
ValueError: If `axis` contains a `Tensor` whose value is not constant.
"""
if not ragged_tensor.is_ragged(rt_input):
if separator is None:
return reduce_op(rt_input, axis, name=name)
else:
# When separator is not None, We infer that dtype is string and
# reduce_join will be called.
return reduce_op(rt_input, axis, name=name, separator=separator)
if keepdims:
raise ValueError('keepdims=True is not supported for RaggedTensors.')
if isinstance(axis, ops.Tensor):
axis = tensor_util.constant_value(axis)
if axis is None:
raise ValueError('axis must be known at graph construction time.')
if isinstance(axis, np.ndarray):
axis = axis.tolist()
# When reducing all axes, just ignore splits & reduce the inner values.
if axis is None:
return reduce_op(rt_input.flat_values, None, name=name)
with ops.name_scope(name, 'RaggedReduce', [rt_input, axis]):
if isinstance(axis, (tuple, list)):
if not axis:
return rt_input
elif len(axis) == 1:
axis = axis[0]
else:
# When reducing multiple axes, as we reduce one at a time (see below),
# the negative axis has to be converted to positive at the first run
# as the sort with negative axis will have different orders.
# See GitHub issue 27497.
axis = [
ragged_util.get_positive_axis(a, rt_input.shape.ndims) for a in axis
]
# When reducing multiple axes, just reduce one at a time. This is less
# efficient, and only works for associative ops. (In particular, it
# does not work for reduce_mean.) However, reducing multiple axes at
# once will probably require a nontrivial c++ op.
axis = sorted(axis)
inner_reduced = ragged_reduce_aggregate(reduce_op, unsorted_segment_op,
rt_input, axis[-1], keepdims,
separator)
return ragged_reduce_aggregate(reduce_op, unsorted_segment_op,
inner_reduced, axis[:-1], keepdims,
separator)
rt_input = ragged_tensor.convert_to_tensor_or_ragged_tensor(
rt_input, name='rt_input')
axis = ragged_util.get_positive_axis(axis, rt_input.shape.ndims)
if axis == 0:
# out[i_1, i_2, ..., i_N] = sum_{j} rt_input[j, i_1, i_2, ..., i_N]
row_lengths = rt_input.row_splits[1:] - rt_input.row_splits[:-1]
num_segments = math_ops.maximum(math_ops.reduce_max(row_lengths), 0)
segment_ids = range(row_lengths).values
return _ragged_segment_aggregate(unsorted_segment_op, rt_input.values,
segment_ids, num_segments, separator)
elif axis == 1:
# out[i_0, i_1, i_2, ..., i_N] = sum_{j} rt_input[i_0, j, i_2, ..., i_N]
num_segments = array_ops.shape(rt_input.row_splits)[0] - 1
segment_ids = segment_id_ops.row_splits_to_segment_ids(
rt_input.row_splits)
return _ragged_segment_aggregate(unsorted_segment_op, rt_input.values,
segment_ids, num_segments, separator)
else:
# out[i_0, ..., i_[axis-1], i_axis+1], ..., i_N] =
# sum_{j} rt_input [i_0, ..., i_[axis-1], j, i_axis+1], ..., i_N]
return rt_input.with_values(
ragged_reduce_aggregate(reduce_op, unsorted_segment_op,
rt_input.values, axis - 1, keepdims,
separator)) | [
"def",
"ragged_reduce_aggregate",
"(",
"reduce_op",
",",
"unsorted_segment_op",
",",
"rt_input",
",",
"axis",
",",
"keepdims",
",",
"separator",
"=",
"None",
",",
"name",
"=",
"None",
")",
":",
"if",
"not",
"ragged_tensor",
".",
"is_ragged",
"(",
"rt_input",
")",
":",
"if",
"separator",
"is",
"None",
":",
"return",
"reduce_op",
"(",
"rt_input",
",",
"axis",
",",
"name",
"=",
"name",
")",
"else",
":",
"# When separator is not None, We infer that dtype is string and",
"# reduce_join will be called.",
"return",
"reduce_op",
"(",
"rt_input",
",",
"axis",
",",
"name",
"=",
"name",
",",
"separator",
"=",
"separator",
")",
"if",
"keepdims",
":",
"raise",
"ValueError",
"(",
"'keepdims=True is not supported for RaggedTensors.'",
")",
"if",
"isinstance",
"(",
"axis",
",",
"ops",
".",
"Tensor",
")",
":",
"axis",
"=",
"tensor_util",
".",
"constant_value",
"(",
"axis",
")",
"if",
"axis",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"'axis must be known at graph construction time.'",
")",
"if",
"isinstance",
"(",
"axis",
",",
"np",
".",
"ndarray",
")",
":",
"axis",
"=",
"axis",
".",
"tolist",
"(",
")",
"# When reducing all axes, just ignore splits & reduce the inner values.",
"if",
"axis",
"is",
"None",
":",
"return",
"reduce_op",
"(",
"rt_input",
".",
"flat_values",
",",
"None",
",",
"name",
"=",
"name",
")",
"with",
"ops",
".",
"name_scope",
"(",
"name",
",",
"'RaggedReduce'",
",",
"[",
"rt_input",
",",
"axis",
"]",
")",
":",
"if",
"isinstance",
"(",
"axis",
",",
"(",
"tuple",
",",
"list",
")",
")",
":",
"if",
"not",
"axis",
":",
"return",
"rt_input",
"elif",
"len",
"(",
"axis",
")",
"==",
"1",
":",
"axis",
"=",
"axis",
"[",
"0",
"]",
"else",
":",
"# When reducing multiple axes, as we reduce one at a time (see below),",
"# the negative axis has to be converted to positive at the first run",
"# as the sort with negative axis will have different orders.",
"# See GitHub issue 27497.",
"axis",
"=",
"[",
"ragged_util",
".",
"get_positive_axis",
"(",
"a",
",",
"rt_input",
".",
"shape",
".",
"ndims",
")",
"for",
"a",
"in",
"axis",
"]",
"# When reducing multiple axes, just reduce one at a time. This is less",
"# efficient, and only works for associative ops. (In particular, it",
"# does not work for reduce_mean.) However, reducing multiple axes at",
"# once will probably require a nontrivial c++ op.",
"axis",
"=",
"sorted",
"(",
"axis",
")",
"inner_reduced",
"=",
"ragged_reduce_aggregate",
"(",
"reduce_op",
",",
"unsorted_segment_op",
",",
"rt_input",
",",
"axis",
"[",
"-",
"1",
"]",
",",
"keepdims",
",",
"separator",
")",
"return",
"ragged_reduce_aggregate",
"(",
"reduce_op",
",",
"unsorted_segment_op",
",",
"inner_reduced",
",",
"axis",
"[",
":",
"-",
"1",
"]",
",",
"keepdims",
",",
"separator",
")",
"rt_input",
"=",
"ragged_tensor",
".",
"convert_to_tensor_or_ragged_tensor",
"(",
"rt_input",
",",
"name",
"=",
"'rt_input'",
")",
"axis",
"=",
"ragged_util",
".",
"get_positive_axis",
"(",
"axis",
",",
"rt_input",
".",
"shape",
".",
"ndims",
")",
"if",
"axis",
"==",
"0",
":",
"# out[i_1, i_2, ..., i_N] = sum_{j} rt_input[j, i_1, i_2, ..., i_N]",
"row_lengths",
"=",
"rt_input",
".",
"row_splits",
"[",
"1",
":",
"]",
"-",
"rt_input",
".",
"row_splits",
"[",
":",
"-",
"1",
"]",
"num_segments",
"=",
"math_ops",
".",
"maximum",
"(",
"math_ops",
".",
"reduce_max",
"(",
"row_lengths",
")",
",",
"0",
")",
"segment_ids",
"=",
"range",
"(",
"row_lengths",
")",
".",
"values",
"return",
"_ragged_segment_aggregate",
"(",
"unsorted_segment_op",
",",
"rt_input",
".",
"values",
",",
"segment_ids",
",",
"num_segments",
",",
"separator",
")",
"elif",
"axis",
"==",
"1",
":",
"# out[i_0, i_1, i_2, ..., i_N] = sum_{j} rt_input[i_0, j, i_2, ..., i_N]",
"num_segments",
"=",
"array_ops",
".",
"shape",
"(",
"rt_input",
".",
"row_splits",
")",
"[",
"0",
"]",
"-",
"1",
"segment_ids",
"=",
"segment_id_ops",
".",
"row_splits_to_segment_ids",
"(",
"rt_input",
".",
"row_splits",
")",
"return",
"_ragged_segment_aggregate",
"(",
"unsorted_segment_op",
",",
"rt_input",
".",
"values",
",",
"segment_ids",
",",
"num_segments",
",",
"separator",
")",
"else",
":",
"# out[i_0, ..., i_[axis-1], i_axis+1], ..., i_N] =",
"# sum_{j} rt_input [i_0, ..., i_[axis-1], j, i_axis+1], ..., i_N]",
"return",
"rt_input",
".",
"with_values",
"(",
"ragged_reduce_aggregate",
"(",
"reduce_op",
",",
"unsorted_segment_op",
",",
"rt_input",
".",
"values",
",",
"axis",
"-",
"1",
",",
"keepdims",
",",
"separator",
")",
")"
] | https://github.com/Xilinx/Vitis-AI/blob/fc74d404563d9951b57245443c73bef389f3657f/tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/python/ops/ragged/ragged_math_ops.py#L427-L545 | ||
idaholab/moose | 9eeebc65e098b4c30f8205fb41591fd5b61eb6ff | python/chigger/observers/KeyObserver.py | python | KeyObserver.addObserver | (self, event, vtkinteractor) | return vtkinteractor.AddObserver(event, self._callback) | Add the KeyPressEvent for this object. | Add the KeyPressEvent for this object. | [
"Add",
"the",
"KeyPressEvent",
"for",
"this",
"object",
"."
] | def addObserver(self, event, vtkinteractor):
"""
Add the KeyPressEvent for this object.
"""
return vtkinteractor.AddObserver(event, self._callback) | [
"def",
"addObserver",
"(",
"self",
",",
"event",
",",
"vtkinteractor",
")",
":",
"return",
"vtkinteractor",
".",
"AddObserver",
"(",
"event",
",",
"self",
".",
"_callback",
")"
] | https://github.com/idaholab/moose/blob/9eeebc65e098b4c30f8205fb41591fd5b61eb6ff/python/chigger/observers/KeyObserver.py#L26-L30 | |
lmb-freiburg/flownet2 | b92e198b56b0e52e1ba0a5a98dc0e39fa5ae70cc | scripts/cpp_lint.py | python | FileInfo.NoExtension | (self) | return '/'.join(self.Split()[0:2]) | File has no source file extension. | File has no source file extension. | [
"File",
"has",
"no",
"source",
"file",
"extension",
"."
] | def NoExtension(self):
"""File has no source file extension."""
return '/'.join(self.Split()[0:2]) | [
"def",
"NoExtension",
"(",
"self",
")",
":",
"return",
"'/'",
".",
"join",
"(",
"self",
".",
"Split",
"(",
")",
"[",
"0",
":",
"2",
"]",
")"
] | https://github.com/lmb-freiburg/flownet2/blob/b92e198b56b0e52e1ba0a5a98dc0e39fa5ae70cc/scripts/cpp_lint.py#L952-L954 | |
catboost/catboost | 167f64f237114a4d10b2b4ee42adb4569137debe | contrib/tools/python3/src/Lib/distutils/ccompiler.py | python | CCompiler.undefine_macro | (self, name) | Undefine a preprocessor macro for all compilations driven by
this compiler object. If the same macro is defined by
'define_macro()' and undefined by 'undefine_macro()' the last call
takes precedence (including multiple redefinitions or
undefinitions). If the macro is redefined/undefined on a
per-compilation basis (ie. in the call to 'compile()'), then that
takes precedence. | Undefine a preprocessor macro for all compilations driven by
this compiler object. If the same macro is defined by
'define_macro()' and undefined by 'undefine_macro()' the last call
takes precedence (including multiple redefinitions or
undefinitions). If the macro is redefined/undefined on a
per-compilation basis (ie. in the call to 'compile()'), then that
takes precedence. | [
"Undefine",
"a",
"preprocessor",
"macro",
"for",
"all",
"compilations",
"driven",
"by",
"this",
"compiler",
"object",
".",
"If",
"the",
"same",
"macro",
"is",
"defined",
"by",
"define_macro",
"()",
"and",
"undefined",
"by",
"undefine_macro",
"()",
"the",
"last",
"call",
"takes",
"precedence",
"(",
"including",
"multiple",
"redefinitions",
"or",
"undefinitions",
")",
".",
"If",
"the",
"macro",
"is",
"redefined",
"/",
"undefined",
"on",
"a",
"per",
"-",
"compilation",
"basis",
"(",
"ie",
".",
"in",
"the",
"call",
"to",
"compile",
"()",
")",
"then",
"that",
"takes",
"precedence",
"."
] | def undefine_macro(self, name):
"""Undefine a preprocessor macro for all compilations driven by
this compiler object. If the same macro is defined by
'define_macro()' and undefined by 'undefine_macro()' the last call
takes precedence (including multiple redefinitions or
undefinitions). If the macro is redefined/undefined on a
per-compilation basis (ie. in the call to 'compile()'), then that
takes precedence.
"""
# Delete from the list of macro definitions/undefinitions if
# already there (so that this one will take precedence).
i = self._find_macro (name)
if i is not None:
del self.macros[i]
undefn = (name,)
self.macros.append(undefn) | [
"def",
"undefine_macro",
"(",
"self",
",",
"name",
")",
":",
"# Delete from the list of macro definitions/undefinitions if",
"# already there (so that this one will take precedence).",
"i",
"=",
"self",
".",
"_find_macro",
"(",
"name",
")",
"if",
"i",
"is",
"not",
"None",
":",
"del",
"self",
".",
"macros",
"[",
"i",
"]",
"undefn",
"=",
"(",
"name",
",",
")",
"self",
".",
"macros",
".",
"append",
"(",
"undefn",
")"
] | https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/tools/python3/src/Lib/distutils/ccompiler.py#L199-L215 | ||
GeometryCollective/boundary-first-flattening | 8250e5a0e85980ec50b5e8aa8f49dd6519f915cd | deps/nanogui/ext/pybind11/tools/clang/cindex.py | python | register_functions | (lib, ignore_errors) | Register function prototypes with a libclang library instance.
This must be called as part of library instantiation so Python knows how
to call out to the shared library. | Register function prototypes with a libclang library instance. | [
"Register",
"function",
"prototypes",
"with",
"a",
"libclang",
"library",
"instance",
"."
] | def register_functions(lib, ignore_errors):
"""Register function prototypes with a libclang library instance.
This must be called as part of library instantiation so Python knows how
to call out to the shared library.
"""
def register(item):
return register_function(lib, item, ignore_errors)
for f in functionList:
register(f) | [
"def",
"register_functions",
"(",
"lib",
",",
"ignore_errors",
")",
":",
"def",
"register",
"(",
"item",
")",
":",
"return",
"register_function",
"(",
"lib",
",",
"item",
",",
"ignore_errors",
")",
"for",
"f",
"in",
"functionList",
":",
"register",
"(",
"f",
")"
] | https://github.com/GeometryCollective/boundary-first-flattening/blob/8250e5a0e85980ec50b5e8aa8f49dd6519f915cd/deps/nanogui/ext/pybind11/tools/clang/cindex.py#L3618-L3629 | ||
miyosuda/TensorFlowAndroidDemo | 35903e0221aa5f109ea2dbef27f20b52e317f42d | jni-build/jni/include/tensorflow/python/ops/math_ops.py | python | real | (input, name=None) | Returns the real part of a complex number.
Given a tensor `input` of complex numbers, this operation returns a tensor of
type `float32` or `float64` that is the real part of each element in `input`.
All elements in `input` must be complex numbers of the form \\(a + bj\\),
where *a* is the real part returned by this operation and *b* is the
imaginary part.
For example:
```
# tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j]
tf.real(input) ==> [-2.25, 3.25]
```
Args:
input: A `Tensor`. Must be one of the following types: `complex64`,
`complex128`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `float32` or `float64`. | Returns the real part of a complex number. | [
"Returns",
"the",
"real",
"part",
"of",
"a",
"complex",
"number",
"."
] | def real(input, name=None):
"""Returns the real part of a complex number.
Given a tensor `input` of complex numbers, this operation returns a tensor of
type `float32` or `float64` that is the real part of each element in `input`.
All elements in `input` must be complex numbers of the form \\(a + bj\\),
where *a* is the real part returned by this operation and *b* is the
imaginary part.
For example:
```
# tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j]
tf.real(input) ==> [-2.25, 3.25]
```
Args:
input: A `Tensor`. Must be one of the following types: `complex64`,
`complex128`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `float32` or `float64`.
"""
with ops.op_scope([input], name, "Real") as name:
return gen_math_ops.real(input, Tout=input.dtype.real_dtype, name=name) | [
"def",
"real",
"(",
"input",
",",
"name",
"=",
"None",
")",
":",
"with",
"ops",
".",
"op_scope",
"(",
"[",
"input",
"]",
",",
"name",
",",
"\"Real\"",
")",
"as",
"name",
":",
"return",
"gen_math_ops",
".",
"real",
"(",
"input",
",",
"Tout",
"=",
"input",
".",
"dtype",
".",
"real_dtype",
",",
"name",
"=",
"name",
")"
] | https://github.com/miyosuda/TensorFlowAndroidDemo/blob/35903e0221aa5f109ea2dbef27f20b52e317f42d/jni-build/jni/include/tensorflow/python/ops/math_ops.py#L508-L533 | ||
catboost/catboost | 167f64f237114a4d10b2b4ee42adb4569137debe | contrib/python/scipy/py3/scipy/fftpack/pseudo_diffs.py | python | itilbert | (x,h,period=None, _cache=_cache) | return convolve.convolve(tmp,omega,swap_real_imag=1,overwrite_x=overwrite_x) | Return inverse h-Tilbert transform of a periodic sequence x.
If ``x_j`` and ``y_j`` are Fourier coefficients of periodic functions x
and y, respectively, then::
y_j = -sqrt(-1)*tanh(j*h*2*pi/period) * x_j
y_0 = 0
For more details, see `tilbert`. | Return inverse h-Tilbert transform of a periodic sequence x. | [
"Return",
"inverse",
"h",
"-",
"Tilbert",
"transform",
"of",
"a",
"periodic",
"sequence",
"x",
"."
] | def itilbert(x,h,period=None, _cache=_cache):
"""
Return inverse h-Tilbert transform of a periodic sequence x.
If ``x_j`` and ``y_j`` are Fourier coefficients of periodic functions x
and y, respectively, then::
y_j = -sqrt(-1)*tanh(j*h*2*pi/period) * x_j
y_0 = 0
For more details, see `tilbert`.
"""
tmp = asarray(x)
if iscomplexobj(tmp):
return itilbert(tmp.real,h,period) + \
1j*itilbert(tmp.imag,h,period)
if period is not None:
h = h*2*pi/period
n = len(x)
omega = _cache.get((n,h))
if omega is None:
if len(_cache) > 20:
while _cache:
_cache.popitem()
def kernel(k,h=h):
if k:
return -tanh(h*k)
return 0
omega = convolve.init_convolution_kernel(n,kernel,d=1)
_cache[(n,h)] = omega
overwrite_x = _datacopied(tmp, x)
return convolve.convolve(tmp,omega,swap_real_imag=1,overwrite_x=overwrite_x) | [
"def",
"itilbert",
"(",
"x",
",",
"h",
",",
"period",
"=",
"None",
",",
"_cache",
"=",
"_cache",
")",
":",
"tmp",
"=",
"asarray",
"(",
"x",
")",
"if",
"iscomplexobj",
"(",
"tmp",
")",
":",
"return",
"itilbert",
"(",
"tmp",
".",
"real",
",",
"h",
",",
"period",
")",
"+",
"1j",
"*",
"itilbert",
"(",
"tmp",
".",
"imag",
",",
"h",
",",
"period",
")",
"if",
"period",
"is",
"not",
"None",
":",
"h",
"=",
"h",
"*",
"2",
"*",
"pi",
"/",
"period",
"n",
"=",
"len",
"(",
"x",
")",
"omega",
"=",
"_cache",
".",
"get",
"(",
"(",
"n",
",",
"h",
")",
")",
"if",
"omega",
"is",
"None",
":",
"if",
"len",
"(",
"_cache",
")",
">",
"20",
":",
"while",
"_cache",
":",
"_cache",
".",
"popitem",
"(",
")",
"def",
"kernel",
"(",
"k",
",",
"h",
"=",
"h",
")",
":",
"if",
"k",
":",
"return",
"-",
"tanh",
"(",
"h",
"*",
"k",
")",
"return",
"0",
"omega",
"=",
"convolve",
".",
"init_convolution_kernel",
"(",
"n",
",",
"kernel",
",",
"d",
"=",
"1",
")",
"_cache",
"[",
"(",
"n",
",",
"h",
")",
"]",
"=",
"omega",
"overwrite_x",
"=",
"_datacopied",
"(",
"tmp",
",",
"x",
")",
"return",
"convolve",
".",
"convolve",
"(",
"tmp",
",",
"omega",
",",
"swap_real_imag",
"=",
"1",
",",
"overwrite_x",
"=",
"overwrite_x",
")"
] | https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/scipy/py3/scipy/fftpack/pseudo_diffs.py#L159-L192 | |
Xilinx/Vitis-AI | fc74d404563d9951b57245443c73bef389f3657f | tools/Vitis-AI-Quantizer/vai_q_tensorflow2.x/tensorflow_model_optimization/python/core/quantization/keras/vitis/utils/model_utils.py | python | _adjust_vitis_sigmoid | (model, quantize_info) | return adjusted_quantize_info | Adjust quantize info of VitisSigmoid layers.
DPU compiler constraints for VitisSigmoid:
1. input pos of VitisSigmoid >= 0
2. output pos of VitisSigmoid >= 7 | Adjust quantize info of VitisSigmoid layers. | [
"Adjust",
"quantize",
"info",
"of",
"VitisSigmoid",
"layers",
"."
] | def _adjust_vitis_sigmoid(model, quantize_info):
"""Adjust quantize info of VitisSigmoid layers.
DPU compiler constraints for VitisSigmoid:
1. input pos of VitisSigmoid >= 0
2. output pos of VitisSigmoid >= 7
"""
adjusted_quantize_info = copy.deepcopy(quantize_info)
for i in range(1, len(model.layers)):
layer = model.layers[i]
if isinstance(layer, vitis_quantize_wrapper.QuantizeWrapper) and isinstance(
layer.layer, vitis_activation.VitisSigmoid):
pre_layer = layer.inbound_nodes[0].inbound_layers
ipos = _get_pos(pre_layer, adjusted_quantize_info, 'o')
if ipos < 0:
_set_pos(pre_layer, adjusted_quantize_info, 'o', 0)
logger.debug(
'Input quantize pos of VitisSimoid layer {} is {}, modify it to 0 '
'to meet the DPU constraints.'.format(layer.name, int(ipos)))
opos = _get_pos(layer, adjusted_quantize_info, 'o')
if opos < 7.0:
_set_pos(layer, adjusted_quantize_info, 'o', 7.0)
logger.debug(
'Output quantize pos of VitisSimoid layer {} is {}, modify it to 7 '
'to meet the DPU constraints.'.format(layer.name, int(opos)))
return adjusted_quantize_info | [
"def",
"_adjust_vitis_sigmoid",
"(",
"model",
",",
"quantize_info",
")",
":",
"adjusted_quantize_info",
"=",
"copy",
".",
"deepcopy",
"(",
"quantize_info",
")",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"len",
"(",
"model",
".",
"layers",
")",
")",
":",
"layer",
"=",
"model",
".",
"layers",
"[",
"i",
"]",
"if",
"isinstance",
"(",
"layer",
",",
"vitis_quantize_wrapper",
".",
"QuantizeWrapper",
")",
"and",
"isinstance",
"(",
"layer",
".",
"layer",
",",
"vitis_activation",
".",
"VitisSigmoid",
")",
":",
"pre_layer",
"=",
"layer",
".",
"inbound_nodes",
"[",
"0",
"]",
".",
"inbound_layers",
"ipos",
"=",
"_get_pos",
"(",
"pre_layer",
",",
"adjusted_quantize_info",
",",
"'o'",
")",
"if",
"ipos",
"<",
"0",
":",
"_set_pos",
"(",
"pre_layer",
",",
"adjusted_quantize_info",
",",
"'o'",
",",
"0",
")",
"logger",
".",
"debug",
"(",
"'Input quantize pos of VitisSimoid layer {} is {}, modify it to 0 '",
"'to meet the DPU constraints.'",
".",
"format",
"(",
"layer",
".",
"name",
",",
"int",
"(",
"ipos",
")",
")",
")",
"opos",
"=",
"_get_pos",
"(",
"layer",
",",
"adjusted_quantize_info",
",",
"'o'",
")",
"if",
"opos",
"<",
"7.0",
":",
"_set_pos",
"(",
"layer",
",",
"adjusted_quantize_info",
",",
"'o'",
",",
"7.0",
")",
"logger",
".",
"debug",
"(",
"'Output quantize pos of VitisSimoid layer {} is {}, modify it to 7 '",
"'to meet the DPU constraints.'",
".",
"format",
"(",
"layer",
".",
"name",
",",
"int",
"(",
"opos",
")",
")",
")",
"return",
"adjusted_quantize_info"
] | https://github.com/Xilinx/Vitis-AI/blob/fc74d404563d9951b57245443c73bef389f3657f/tools/Vitis-AI-Quantizer/vai_q_tensorflow2.x/tensorflow_model_optimization/python/core/quantization/keras/vitis/utils/model_utils.py#L575-L603 | |
hanpfei/chromium-net | 392cc1fa3a8f92f42e4071ab6e674d8e0482f83f | tools/json_schema_compiler/model.py | python | _GetTypes | (parent, json, namespace, origin) | return types | Creates Type objects extracted from |json|. | Creates Type objects extracted from |json|. | [
"Creates",
"Type",
"objects",
"extracted",
"from",
"|json|",
"."
] | def _GetTypes(parent, json, namespace, origin):
"""Creates Type objects extracted from |json|.
"""
types = OrderedDict()
for type_json in json.get('types', []):
type_ = Type(parent, type_json['id'], type_json, namespace, origin)
types[type_.name] = type_
return types | [
"def",
"_GetTypes",
"(",
"parent",
",",
"json",
",",
"namespace",
",",
"origin",
")",
":",
"types",
"=",
"OrderedDict",
"(",
")",
"for",
"type_json",
"in",
"json",
".",
"get",
"(",
"'types'",
",",
"[",
"]",
")",
":",
"type_",
"=",
"Type",
"(",
"parent",
",",
"type_json",
"[",
"'id'",
"]",
",",
"type_json",
",",
"namespace",
",",
"origin",
")",
"types",
"[",
"type_",
".",
"name",
"]",
"=",
"type_",
"return",
"types"
] | https://github.com/hanpfei/chromium-net/blob/392cc1fa3a8f92f42e4071ab6e674d8e0482f83f/tools/json_schema_compiler/model.py#L543-L550 | |
mantidproject/mantid | 03deeb89254ec4289edb8771e0188c2090a02f32 | Framework/PythonInterface/plugins/algorithms/LRAutoReduction.py | python | LRAutoReduction._save_partial_output | (self, data_set, first_run_of_set, sequence_number, run_number) | return file_path | Stitch and save the full reflectivity curve, or as much as we have at the moment.
@param data_set: DataSets object
@param run_number: run number according to the data file name
@param first_run_of_set: first run in the sequence (sequence ID)
@param sequence_number: the ID of the data set within the sequence of runs | Stitch and save the full reflectivity curve, or as much as we have at the moment. | [
"Stitch",
"and",
"save",
"the",
"full",
"reflectivity",
"curve",
"or",
"as",
"much",
"as",
"we",
"have",
"at",
"the",
"moment",
"."
] | def _save_partial_output(self, data_set, first_run_of_set, sequence_number, run_number):
"""
Stitch and save the full reflectivity curve, or as much as we have at the moment.
@param data_set: DataSets object
@param run_number: run number according to the data file name
@param first_run_of_set: first run in the sequence (sequence ID)
@param sequence_number: the ID of the data set within the sequence of runs
"""
output_dir = self.getProperty("OutputDirectory").value
output_file = self.getProperty("OutputFilename").value
if len(output_file.strip()) == 0:
output_file = "REFL_%s_%s_%s_auto.nxs" % (first_run_of_set, sequence_number, run_number)
# Save partial output
n_ts = 0
output_ws = None
prefix = 'reflectivity_%s_%s_%s' % (first_run_of_set, sequence_number, run_number)
for ws in AnalysisDataService.getObjectNames():
if ws.endswith("ts") and ws.startswith(prefix):
output_ws = ws
n_ts += 1
if n_ts > 1:
logger.error("More than one reduced output for %s" % prefix)
file_path = os.path.join(output_dir, output_file)
SaveNexus(Filename=file_path, InputWorkspace=output_ws)
# Put the reflectivity curve together
for f in os.listdir(output_dir):
if f.startswith("REFL_%s" % first_run_of_set) and f.endswith("auto.nxs"):
ws_name = f.replace("_auto.nxs", "")
ws_name = ws_name.replace("REFL_", "")
LoadNexus(Filename=os.path.join(output_dir, f), OutputWorkspace="reflectivity_%s_auto_ts" % ws_name)
ws_list = AnalysisDataService.getObjectNames()
input_ws_list = []
for ws in ws_list:
if ws.endswith("auto_ts"):
input_ws_list.append(ws)
if len(input_ws_list) == 0:
logger.notice("No data sets to stitch.")
return
input_ws_list = sorted(input_ws_list)
default_file_name = 'REFL_%s_combined_data_auto.txt' % first_run_of_set
file_path = os.path.join(output_dir, default_file_name)
scale_to_unity = self.getProperty("ScaleToUnity").value
wl_cutoff = self.getProperty("ScalingWavelengthCutoff").value
# The following were the values used in the auto-reduction before 2016
# output_binning = [0.005, -0.01, 2.0]
output_binning = [data_set.q_min, -abs(data_set.q_step), 2.0]
dQ_constant = data_set.fourth_column_dq0
dQ_slope = data_set.fourth_column_dq_over_q
compute_resolution = self.getProperty("ComputeResolution").value
LRReflectivityOutput(ReducedWorkspaces=input_ws_list, ScaleToUnity=scale_to_unity,
ScalingWavelengthCutoff=wl_cutoff, OutputBinning=output_binning,
DQConstant=dQ_constant, DQSlope=dQ_slope,
ComputeDQ=compute_resolution, OutputFilename=file_path)
for ws in input_ws_list:
AnalysisDataService.remove(str(ws))
return file_path | [
"def",
"_save_partial_output",
"(",
"self",
",",
"data_set",
",",
"first_run_of_set",
",",
"sequence_number",
",",
"run_number",
")",
":",
"output_dir",
"=",
"self",
".",
"getProperty",
"(",
"\"OutputDirectory\"",
")",
".",
"value",
"output_file",
"=",
"self",
".",
"getProperty",
"(",
"\"OutputFilename\"",
")",
".",
"value",
"if",
"len",
"(",
"output_file",
".",
"strip",
"(",
")",
")",
"==",
"0",
":",
"output_file",
"=",
"\"REFL_%s_%s_%s_auto.nxs\"",
"%",
"(",
"first_run_of_set",
",",
"sequence_number",
",",
"run_number",
")",
"# Save partial output",
"n_ts",
"=",
"0",
"output_ws",
"=",
"None",
"prefix",
"=",
"'reflectivity_%s_%s_%s'",
"%",
"(",
"first_run_of_set",
",",
"sequence_number",
",",
"run_number",
")",
"for",
"ws",
"in",
"AnalysisDataService",
".",
"getObjectNames",
"(",
")",
":",
"if",
"ws",
".",
"endswith",
"(",
"\"ts\"",
")",
"and",
"ws",
".",
"startswith",
"(",
"prefix",
")",
":",
"output_ws",
"=",
"ws",
"n_ts",
"+=",
"1",
"if",
"n_ts",
">",
"1",
":",
"logger",
".",
"error",
"(",
"\"More than one reduced output for %s\"",
"%",
"prefix",
")",
"file_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"output_dir",
",",
"output_file",
")",
"SaveNexus",
"(",
"Filename",
"=",
"file_path",
",",
"InputWorkspace",
"=",
"output_ws",
")",
"# Put the reflectivity curve together",
"for",
"f",
"in",
"os",
".",
"listdir",
"(",
"output_dir",
")",
":",
"if",
"f",
".",
"startswith",
"(",
"\"REFL_%s\"",
"%",
"first_run_of_set",
")",
"and",
"f",
".",
"endswith",
"(",
"\"auto.nxs\"",
")",
":",
"ws_name",
"=",
"f",
".",
"replace",
"(",
"\"_auto.nxs\"",
",",
"\"\"",
")",
"ws_name",
"=",
"ws_name",
".",
"replace",
"(",
"\"REFL_\"",
",",
"\"\"",
")",
"LoadNexus",
"(",
"Filename",
"=",
"os",
".",
"path",
".",
"join",
"(",
"output_dir",
",",
"f",
")",
",",
"OutputWorkspace",
"=",
"\"reflectivity_%s_auto_ts\"",
"%",
"ws_name",
")",
"ws_list",
"=",
"AnalysisDataService",
".",
"getObjectNames",
"(",
")",
"input_ws_list",
"=",
"[",
"]",
"for",
"ws",
"in",
"ws_list",
":",
"if",
"ws",
".",
"endswith",
"(",
"\"auto_ts\"",
")",
":",
"input_ws_list",
".",
"append",
"(",
"ws",
")",
"if",
"len",
"(",
"input_ws_list",
")",
"==",
"0",
":",
"logger",
".",
"notice",
"(",
"\"No data sets to stitch.\"",
")",
"return",
"input_ws_list",
"=",
"sorted",
"(",
"input_ws_list",
")",
"default_file_name",
"=",
"'REFL_%s_combined_data_auto.txt'",
"%",
"first_run_of_set",
"file_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"output_dir",
",",
"default_file_name",
")",
"scale_to_unity",
"=",
"self",
".",
"getProperty",
"(",
"\"ScaleToUnity\"",
")",
".",
"value",
"wl_cutoff",
"=",
"self",
".",
"getProperty",
"(",
"\"ScalingWavelengthCutoff\"",
")",
".",
"value",
"# The following were the values used in the auto-reduction before 2016",
"# output_binning = [0.005, -0.01, 2.0]",
"output_binning",
"=",
"[",
"data_set",
".",
"q_min",
",",
"-",
"abs",
"(",
"data_set",
".",
"q_step",
")",
",",
"2.0",
"]",
"dQ_constant",
"=",
"data_set",
".",
"fourth_column_dq0",
"dQ_slope",
"=",
"data_set",
".",
"fourth_column_dq_over_q",
"compute_resolution",
"=",
"self",
".",
"getProperty",
"(",
"\"ComputeResolution\"",
")",
".",
"value",
"LRReflectivityOutput",
"(",
"ReducedWorkspaces",
"=",
"input_ws_list",
",",
"ScaleToUnity",
"=",
"scale_to_unity",
",",
"ScalingWavelengthCutoff",
"=",
"wl_cutoff",
",",
"OutputBinning",
"=",
"output_binning",
",",
"DQConstant",
"=",
"dQ_constant",
",",
"DQSlope",
"=",
"dQ_slope",
",",
"ComputeDQ",
"=",
"compute_resolution",
",",
"OutputFilename",
"=",
"file_path",
")",
"for",
"ws",
"in",
"input_ws_list",
":",
"AnalysisDataService",
".",
"remove",
"(",
"str",
"(",
"ws",
")",
")",
"return",
"file_path"
] | https://github.com/mantidproject/mantid/blob/03deeb89254ec4289edb8771e0188c2090a02f32/Framework/PythonInterface/plugins/algorithms/LRAutoReduction.py#L529-L591 | |
wlanjie/AndroidFFmpeg | 7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf | tools/fdk-aac-build/armeabi-v7a/toolchain/lib/python2.7/urllib.py | python | urlencode | (query, doseq=0) | return '&'.join(l) | Encode a sequence of two-element tuples or dictionary into a URL query string.
If any values in the query arg are sequences and doseq is true, each
sequence element is converted to a separate parameter.
If the query arg is a sequence of two-element tuples, the order of the
parameters in the output will match the order of parameters in the
input. | Encode a sequence of two-element tuples or dictionary into a URL query string. | [
"Encode",
"a",
"sequence",
"of",
"two",
"-",
"element",
"tuples",
"or",
"dictionary",
"into",
"a",
"URL",
"query",
"string",
"."
] | def urlencode(query, doseq=0):
"""Encode a sequence of two-element tuples or dictionary into a URL query string.
If any values in the query arg are sequences and doseq is true, each
sequence element is converted to a separate parameter.
If the query arg is a sequence of two-element tuples, the order of the
parameters in the output will match the order of parameters in the
input.
"""
if hasattr(query,"items"):
# mapping objects
query = query.items()
else:
# it's a bother at times that strings and string-like objects are
# sequences...
try:
# non-sequence items should not work with len()
# non-empty strings will fail this
if len(query) and not isinstance(query[0], tuple):
raise TypeError
# zero-length sequences of all types will get here and succeed,
# but that's a minor nit - since the original implementation
# allowed empty dicts that type of behavior probably should be
# preserved for consistency
except TypeError:
ty,va,tb = sys.exc_info()
raise TypeError, "not a valid non-string sequence or mapping object", tb
l = []
if not doseq:
# preserve old behavior
for k, v in query:
k = quote_plus(str(k))
v = quote_plus(str(v))
l.append(k + '=' + v)
else:
for k, v in query:
k = quote_plus(str(k))
if isinstance(v, str):
v = quote_plus(v)
l.append(k + '=' + v)
elif _is_unicode(v):
# is there a reasonable way to convert to ASCII?
# encode generates a string, but "replace" or "ignore"
# lose information and "strict" can raise UnicodeError
v = quote_plus(v.encode("ASCII","replace"))
l.append(k + '=' + v)
else:
try:
# is this a sufficient test for sequence-ness?
len(v)
except TypeError:
# not a sequence
v = quote_plus(str(v))
l.append(k + '=' + v)
else:
# loop over the sequence
for elt in v:
l.append(k + '=' + quote_plus(str(elt)))
return '&'.join(l) | [
"def",
"urlencode",
"(",
"query",
",",
"doseq",
"=",
"0",
")",
":",
"if",
"hasattr",
"(",
"query",
",",
"\"items\"",
")",
":",
"# mapping objects",
"query",
"=",
"query",
".",
"items",
"(",
")",
"else",
":",
"# it's a bother at times that strings and string-like objects are",
"# sequences...",
"try",
":",
"# non-sequence items should not work with len()",
"# non-empty strings will fail this",
"if",
"len",
"(",
"query",
")",
"and",
"not",
"isinstance",
"(",
"query",
"[",
"0",
"]",
",",
"tuple",
")",
":",
"raise",
"TypeError",
"# zero-length sequences of all types will get here and succeed,",
"# but that's a minor nit - since the original implementation",
"# allowed empty dicts that type of behavior probably should be",
"# preserved for consistency",
"except",
"TypeError",
":",
"ty",
",",
"va",
",",
"tb",
"=",
"sys",
".",
"exc_info",
"(",
")",
"raise",
"TypeError",
",",
"\"not a valid non-string sequence or mapping object\"",
",",
"tb",
"l",
"=",
"[",
"]",
"if",
"not",
"doseq",
":",
"# preserve old behavior",
"for",
"k",
",",
"v",
"in",
"query",
":",
"k",
"=",
"quote_plus",
"(",
"str",
"(",
"k",
")",
")",
"v",
"=",
"quote_plus",
"(",
"str",
"(",
"v",
")",
")",
"l",
".",
"append",
"(",
"k",
"+",
"'='",
"+",
"v",
")",
"else",
":",
"for",
"k",
",",
"v",
"in",
"query",
":",
"k",
"=",
"quote_plus",
"(",
"str",
"(",
"k",
")",
")",
"if",
"isinstance",
"(",
"v",
",",
"str",
")",
":",
"v",
"=",
"quote_plus",
"(",
"v",
")",
"l",
".",
"append",
"(",
"k",
"+",
"'='",
"+",
"v",
")",
"elif",
"_is_unicode",
"(",
"v",
")",
":",
"# is there a reasonable way to convert to ASCII?",
"# encode generates a string, but \"replace\" or \"ignore\"",
"# lose information and \"strict\" can raise UnicodeError",
"v",
"=",
"quote_plus",
"(",
"v",
".",
"encode",
"(",
"\"ASCII\"",
",",
"\"replace\"",
")",
")",
"l",
".",
"append",
"(",
"k",
"+",
"'='",
"+",
"v",
")",
"else",
":",
"try",
":",
"# is this a sufficient test for sequence-ness?",
"len",
"(",
"v",
")",
"except",
"TypeError",
":",
"# not a sequence",
"v",
"=",
"quote_plus",
"(",
"str",
"(",
"v",
")",
")",
"l",
".",
"append",
"(",
"k",
"+",
"'='",
"+",
"v",
")",
"else",
":",
"# loop over the sequence",
"for",
"elt",
"in",
"v",
":",
"l",
".",
"append",
"(",
"k",
"+",
"'='",
"+",
"quote_plus",
"(",
"str",
"(",
"elt",
")",
")",
")",
"return",
"'&'",
".",
"join",
"(",
"l",
")"
] | https://github.com/wlanjie/AndroidFFmpeg/blob/7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf/tools/fdk-aac-build/armeabi-v7a/toolchain/lib/python2.7/urllib.py#L1291-L1352 | |
NVIDIAGameWorks/kaolin | e5148d05e9c1e2ce92a07881ce3593b1c5c3f166 | kaolin/io/usd.py | python | get_authored_time_samples | (file_path) | return sorted(res) | r"""
Returns *all* authored time samples within the USD, aggregated across all primitives.
Args:
file_path (str): Path to usd file (\*.usd, \*.usda).
Returns:
(list) | r"""
Returns *all* authored time samples within the USD, aggregated across all primitives. | [
"r",
"Returns",
"*",
"all",
"*",
"authored",
"time",
"samples",
"within",
"the",
"USD",
"aggregated",
"across",
"all",
"primitives",
"."
] | def get_authored_time_samples(file_path):
r"""
Returns *all* authored time samples within the USD, aggregated across all primitives.
Args:
file_path (str): Path to usd file (\*.usd, \*.usda).
Returns:
(list)
"""
stage = Usd.Stage.Open(file_path)
scene_paths = get_scene_paths(file_path)
res = set()
for scene_path in scene_paths:
prim = stage.GetPrimAtPath(scene_path)
attr = prim.GetAttributes()
res.update(set(itertools.chain.from_iterable([x.GetTimeSamples() for x in attr])))
return sorted(res) | [
"def",
"get_authored_time_samples",
"(",
"file_path",
")",
":",
"stage",
"=",
"Usd",
".",
"Stage",
".",
"Open",
"(",
"file_path",
")",
"scene_paths",
"=",
"get_scene_paths",
"(",
"file_path",
")",
"res",
"=",
"set",
"(",
")",
"for",
"scene_path",
"in",
"scene_paths",
":",
"prim",
"=",
"stage",
".",
"GetPrimAtPath",
"(",
"scene_path",
")",
"attr",
"=",
"prim",
".",
"GetAttributes",
"(",
")",
"res",
".",
"update",
"(",
"set",
"(",
"itertools",
".",
"chain",
".",
"from_iterable",
"(",
"[",
"x",
".",
"GetTimeSamples",
"(",
")",
"for",
"x",
"in",
"attr",
"]",
")",
")",
")",
"return",
"sorted",
"(",
"res",
")"
] | https://github.com/NVIDIAGameWorks/kaolin/blob/e5148d05e9c1e2ce92a07881ce3593b1c5c3f166/kaolin/io/usd.py#L338-L355 | |
Tencent/TNN | 7acca99f54c55747b415a4c57677403eebc7b706 | third_party/flatbuffers/python/flatbuffers/builder.py | python | Builder.EndVector | (self) | return self.Offset() | EndVector writes data necessary to finish vector construction. | EndVector writes data necessary to finish vector construction. | [
"EndVector",
"writes",
"data",
"necessary",
"to",
"finish",
"vector",
"construction",
"."
] | def EndVector(self):
"""EndVector writes data necessary to finish vector construction."""
self.assertNested()
## @cond FLATBUFFERS_INTERNAL
self.nested = False
## @endcond
# we already made space for this, so write without PrependUint32
self.PlaceUOffsetT(self.vectorNumElems)
self.vectorNumElems = None
return self.Offset() | [
"def",
"EndVector",
"(",
"self",
")",
":",
"self",
".",
"assertNested",
"(",
")",
"## @cond FLATBUFFERS_INTERNAL",
"self",
".",
"nested",
"=",
"False",
"## @endcond",
"# we already made space for this, so write without PrependUint32",
"self",
".",
"PlaceUOffsetT",
"(",
"self",
".",
"vectorNumElems",
")",
"self",
".",
"vectorNumElems",
"=",
"None",
"return",
"self",
".",
"Offset",
"(",
")"
] | https://github.com/Tencent/TNN/blob/7acca99f54c55747b415a4c57677403eebc7b706/third_party/flatbuffers/python/flatbuffers/builder.py#L380-L390 | |
arangodb/arangodb | 0d658689c7d1b721b314fa3ca27d38303e1570c8 | 3rdParty/V8/gyp/input.py | python | DependencyGraphNode.DeepDependencies | (self, dependencies=None) | return dependencies | Returns an OrderedSet of all of a target's dependencies, recursively. | Returns an OrderedSet of all of a target's dependencies, recursively. | [
"Returns",
"an",
"OrderedSet",
"of",
"all",
"of",
"a",
"target",
"s",
"dependencies",
"recursively",
"."
] | def DeepDependencies(self, dependencies=None):
"""Returns an OrderedSet of all of a target's dependencies, recursively."""
if dependencies is None:
# Using a list to get ordered output and a set to do fast "is it
# already added" checks.
dependencies = OrderedSet()
for dependency in self.dependencies:
# Check for None, corresponding to the root node.
if dependency.ref is None:
continue
if dependency.ref not in dependencies:
dependency.DeepDependencies(dependencies)
dependencies.add(dependency.ref)
return dependencies | [
"def",
"DeepDependencies",
"(",
"self",
",",
"dependencies",
"=",
"None",
")",
":",
"if",
"dependencies",
"is",
"None",
":",
"# Using a list to get ordered output and a set to do fast \"is it",
"# already added\" checks.",
"dependencies",
"=",
"OrderedSet",
"(",
")",
"for",
"dependency",
"in",
"self",
".",
"dependencies",
":",
"# Check for None, corresponding to the root node.",
"if",
"dependency",
".",
"ref",
"is",
"None",
":",
"continue",
"if",
"dependency",
".",
"ref",
"not",
"in",
"dependencies",
":",
"dependency",
".",
"DeepDependencies",
"(",
"dependencies",
")",
"dependencies",
".",
"add",
"(",
"dependency",
".",
"ref",
")",
"return",
"dependencies"
] | https://github.com/arangodb/arangodb/blob/0d658689c7d1b721b314fa3ca27d38303e1570c8/3rdParty/V8/gyp/input.py#L1435-L1450 | |
hpi-xnor/BMXNet-v2 | af2b1859eafc5c721b1397cef02f946aaf2ce20d | ci/build.py | python | Cleanup.__call__ | (self) | Perform cleanup | Perform cleanup | [
"Perform",
"cleanup"
] | def __call__(self):
"""Perform cleanup"""
self._cleanup_containers() | [
"def",
"__call__",
"(",
"self",
")",
":",
"self",
".",
"_cleanup_containers",
"(",
")"
] | https://github.com/hpi-xnor/BMXNet-v2/blob/af2b1859eafc5c721b1397cef02f946aaf2ce20d/ci/build.py#L84-L86 | ||
tensorflow/tensorflow | 419e3a6b650ea4bd1b0cba23c4348f8a69f3272e | tensorflow/python/eager/context.py | python | Context.config | (self) | return config | Return the ConfigProto with all runtime deltas applied. | Return the ConfigProto with all runtime deltas applied. | [
"Return",
"the",
"ConfigProto",
"with",
"all",
"runtime",
"deltas",
"applied",
"."
] | def config(self):
"""Return the ConfigProto with all runtime deltas applied."""
# Ensure physical devices have been discovered and config has been imported
self._initialize_physical_devices()
config = config_pb2.ConfigProto()
if self._config is not None:
config.CopyFrom(self._config)
if self._optimizer_jit is not None:
config.graph_options.optimizer_options.global_jit_level = (
config_pb2.OptimizerOptions.ON_1
if self._optimizer_jit else config_pb2.OptimizerOptions.OFF)
if self._intra_op_parallelism_threads is not None:
config.intra_op_parallelism_threads = self._intra_op_parallelism_threads
if self._inter_op_parallelism_threads is not None:
config.inter_op_parallelism_threads = self._inter_op_parallelism_threads
if self._soft_device_placement is not None:
config.allow_soft_placement = self._soft_device_placement
else:
config.allow_soft_placement = self.executing_eagerly()
if self._log_device_placement is not None:
config.log_device_placement = self._log_device_placement
if self._operation_timeout_in_ms is not None:
config.operation_timeout_in_ms = self._operation_timeout_in_ms
is_mlir_bridge_enabled = pywrap_tfe.TF_IsMlirBridgeEnabled()
config.experimental.mlir_bridge_rollout = is_mlir_bridge_enabled
if (is_mlir_bridge_enabled ==
config_pb2.ConfigProto.Experimental.MLIR_BRIDGE_ROLLOUT_ENABLED):
config.experimental.enable_mlir_bridge = True
if self._enable_mlir_graph_optimization is not None:
config.experimental.enable_mlir_graph_optimization = (
self._enable_mlir_graph_optimization)
def rewriter_toggle(option):
toggle = self._optimizer_experimental_options.get(option, None)
if toggle is None:
return
setattr(config.graph_options.rewrite_options, option,
(rewriter_config_pb2.RewriterConfig.ON
if toggle else rewriter_config_pb2.RewriterConfig.OFF))
def rewriter_bool(option):
toggle = self._optimizer_experimental_options.get(option, None)
if toggle is None:
return
setattr(config.graph_options.rewrite_options, option, toggle)
rewriter_toggle("layout_optimizer")
rewriter_toggle("constant_folding")
rewriter_toggle("shape_optimization")
rewriter_toggle("remapping")
rewriter_toggle("arithmetic_optimization")
rewriter_toggle("dependency_optimization")
rewriter_toggle("loop_optimization")
rewriter_toggle("function_optimization")
rewriter_toggle("debug_stripper")
rewriter_bool("disable_model_pruning")
rewriter_toggle("scoped_allocator_optimization")
rewriter_toggle("pin_to_host_optimization")
rewriter_toggle("implementation_selector")
rewriter_toggle("auto_mixed_precision")
rewriter_toggle("use_plugin_optimizers")
rewriter_bool("disable_meta_optimizer")
nodes = self._optimizer_experimental_options.get("min_graph_nodes", None)
if nodes is not None:
config.graph_options.rewrite_options.min_graph_nodes = nodes
# Compute device counts
config.device_count["CPU"] = 0
config.device_count["GPU"] = 0
for dev in self._physical_devices:
if dev not in self._visible_device_list:
continue
virtual_devices = self._virtual_device_map.get(dev)
if virtual_devices is None:
config.device_count[dev.device_type] += 1
else:
config.device_count[dev.device_type] += len(virtual_devices)
# Configure gpu_options
gpu_options = self._compute_gpu_options()
config.gpu_options.MergeFrom(gpu_options)
# Configure collective ops
if self._collective_leader:
config.experimental.collective_group_leader = self._collective_leader
if self._collective_scoped_allocator_enabled_ops:
rewrite_options = config.graph_options.rewrite_options
rewrite_options.scoped_allocator_optimization = (
rewriter_config_pb2.RewriterConfig.ON)
del rewrite_options.scoped_allocator_opts.enable_op[:]
for op in self._collective_scoped_allocator_enabled_ops:
rewrite_options.scoped_allocator_opts.enable_op.append(op)
if self._collective_use_nccl_communication:
config.experimental.collective_nccl = True
if self._collective_device_filters:
del config.device_filters[:]
for f in self._collective_device_filters:
config.device_filters.append(f)
# Configure coordination service
if self._coordination_service_config:
config.experimental.coordination_config.CopyFrom(
self._coordination_service_config)
return config | [
"def",
"config",
"(",
"self",
")",
":",
"# Ensure physical devices have been discovered and config has been imported",
"self",
".",
"_initialize_physical_devices",
"(",
")",
"config",
"=",
"config_pb2",
".",
"ConfigProto",
"(",
")",
"if",
"self",
".",
"_config",
"is",
"not",
"None",
":",
"config",
".",
"CopyFrom",
"(",
"self",
".",
"_config",
")",
"if",
"self",
".",
"_optimizer_jit",
"is",
"not",
"None",
":",
"config",
".",
"graph_options",
".",
"optimizer_options",
".",
"global_jit_level",
"=",
"(",
"config_pb2",
".",
"OptimizerOptions",
".",
"ON_1",
"if",
"self",
".",
"_optimizer_jit",
"else",
"config_pb2",
".",
"OptimizerOptions",
".",
"OFF",
")",
"if",
"self",
".",
"_intra_op_parallelism_threads",
"is",
"not",
"None",
":",
"config",
".",
"intra_op_parallelism_threads",
"=",
"self",
".",
"_intra_op_parallelism_threads",
"if",
"self",
".",
"_inter_op_parallelism_threads",
"is",
"not",
"None",
":",
"config",
".",
"inter_op_parallelism_threads",
"=",
"self",
".",
"_inter_op_parallelism_threads",
"if",
"self",
".",
"_soft_device_placement",
"is",
"not",
"None",
":",
"config",
".",
"allow_soft_placement",
"=",
"self",
".",
"_soft_device_placement",
"else",
":",
"config",
".",
"allow_soft_placement",
"=",
"self",
".",
"executing_eagerly",
"(",
")",
"if",
"self",
".",
"_log_device_placement",
"is",
"not",
"None",
":",
"config",
".",
"log_device_placement",
"=",
"self",
".",
"_log_device_placement",
"if",
"self",
".",
"_operation_timeout_in_ms",
"is",
"not",
"None",
":",
"config",
".",
"operation_timeout_in_ms",
"=",
"self",
".",
"_operation_timeout_in_ms",
"is_mlir_bridge_enabled",
"=",
"pywrap_tfe",
".",
"TF_IsMlirBridgeEnabled",
"(",
")",
"config",
".",
"experimental",
".",
"mlir_bridge_rollout",
"=",
"is_mlir_bridge_enabled",
"if",
"(",
"is_mlir_bridge_enabled",
"==",
"config_pb2",
".",
"ConfigProto",
".",
"Experimental",
".",
"MLIR_BRIDGE_ROLLOUT_ENABLED",
")",
":",
"config",
".",
"experimental",
".",
"enable_mlir_bridge",
"=",
"True",
"if",
"self",
".",
"_enable_mlir_graph_optimization",
"is",
"not",
"None",
":",
"config",
".",
"experimental",
".",
"enable_mlir_graph_optimization",
"=",
"(",
"self",
".",
"_enable_mlir_graph_optimization",
")",
"def",
"rewriter_toggle",
"(",
"option",
")",
":",
"toggle",
"=",
"self",
".",
"_optimizer_experimental_options",
".",
"get",
"(",
"option",
",",
"None",
")",
"if",
"toggle",
"is",
"None",
":",
"return",
"setattr",
"(",
"config",
".",
"graph_options",
".",
"rewrite_options",
",",
"option",
",",
"(",
"rewriter_config_pb2",
".",
"RewriterConfig",
".",
"ON",
"if",
"toggle",
"else",
"rewriter_config_pb2",
".",
"RewriterConfig",
".",
"OFF",
")",
")",
"def",
"rewriter_bool",
"(",
"option",
")",
":",
"toggle",
"=",
"self",
".",
"_optimizer_experimental_options",
".",
"get",
"(",
"option",
",",
"None",
")",
"if",
"toggle",
"is",
"None",
":",
"return",
"setattr",
"(",
"config",
".",
"graph_options",
".",
"rewrite_options",
",",
"option",
",",
"toggle",
")",
"rewriter_toggle",
"(",
"\"layout_optimizer\"",
")",
"rewriter_toggle",
"(",
"\"constant_folding\"",
")",
"rewriter_toggle",
"(",
"\"shape_optimization\"",
")",
"rewriter_toggle",
"(",
"\"remapping\"",
")",
"rewriter_toggle",
"(",
"\"arithmetic_optimization\"",
")",
"rewriter_toggle",
"(",
"\"dependency_optimization\"",
")",
"rewriter_toggle",
"(",
"\"loop_optimization\"",
")",
"rewriter_toggle",
"(",
"\"function_optimization\"",
")",
"rewriter_toggle",
"(",
"\"debug_stripper\"",
")",
"rewriter_bool",
"(",
"\"disable_model_pruning\"",
")",
"rewriter_toggle",
"(",
"\"scoped_allocator_optimization\"",
")",
"rewriter_toggle",
"(",
"\"pin_to_host_optimization\"",
")",
"rewriter_toggle",
"(",
"\"implementation_selector\"",
")",
"rewriter_toggle",
"(",
"\"auto_mixed_precision\"",
")",
"rewriter_toggle",
"(",
"\"use_plugin_optimizers\"",
")",
"rewriter_bool",
"(",
"\"disable_meta_optimizer\"",
")",
"nodes",
"=",
"self",
".",
"_optimizer_experimental_options",
".",
"get",
"(",
"\"min_graph_nodes\"",
",",
"None",
")",
"if",
"nodes",
"is",
"not",
"None",
":",
"config",
".",
"graph_options",
".",
"rewrite_options",
".",
"min_graph_nodes",
"=",
"nodes",
"# Compute device counts",
"config",
".",
"device_count",
"[",
"\"CPU\"",
"]",
"=",
"0",
"config",
".",
"device_count",
"[",
"\"GPU\"",
"]",
"=",
"0",
"for",
"dev",
"in",
"self",
".",
"_physical_devices",
":",
"if",
"dev",
"not",
"in",
"self",
".",
"_visible_device_list",
":",
"continue",
"virtual_devices",
"=",
"self",
".",
"_virtual_device_map",
".",
"get",
"(",
"dev",
")",
"if",
"virtual_devices",
"is",
"None",
":",
"config",
".",
"device_count",
"[",
"dev",
".",
"device_type",
"]",
"+=",
"1",
"else",
":",
"config",
".",
"device_count",
"[",
"dev",
".",
"device_type",
"]",
"+=",
"len",
"(",
"virtual_devices",
")",
"# Configure gpu_options",
"gpu_options",
"=",
"self",
".",
"_compute_gpu_options",
"(",
")",
"config",
".",
"gpu_options",
".",
"MergeFrom",
"(",
"gpu_options",
")",
"# Configure collective ops",
"if",
"self",
".",
"_collective_leader",
":",
"config",
".",
"experimental",
".",
"collective_group_leader",
"=",
"self",
".",
"_collective_leader",
"if",
"self",
".",
"_collective_scoped_allocator_enabled_ops",
":",
"rewrite_options",
"=",
"config",
".",
"graph_options",
".",
"rewrite_options",
"rewrite_options",
".",
"scoped_allocator_optimization",
"=",
"(",
"rewriter_config_pb2",
".",
"RewriterConfig",
".",
"ON",
")",
"del",
"rewrite_options",
".",
"scoped_allocator_opts",
".",
"enable_op",
"[",
":",
"]",
"for",
"op",
"in",
"self",
".",
"_collective_scoped_allocator_enabled_ops",
":",
"rewrite_options",
".",
"scoped_allocator_opts",
".",
"enable_op",
".",
"append",
"(",
"op",
")",
"if",
"self",
".",
"_collective_use_nccl_communication",
":",
"config",
".",
"experimental",
".",
"collective_nccl",
"=",
"True",
"if",
"self",
".",
"_collective_device_filters",
":",
"del",
"config",
".",
"device_filters",
"[",
":",
"]",
"for",
"f",
"in",
"self",
".",
"_collective_device_filters",
":",
"config",
".",
"device_filters",
".",
"append",
"(",
"f",
")",
"# Configure coordination service",
"if",
"self",
".",
"_coordination_service_config",
":",
"config",
".",
"experimental",
".",
"coordination_config",
".",
"CopyFrom",
"(",
"self",
".",
"_coordination_service_config",
")",
"return",
"config"
] | https://github.com/tensorflow/tensorflow/blob/419e3a6b650ea4bd1b0cba23c4348f8a69f3272e/tensorflow/python/eager/context.py#L1071-L1185 | |
catboost/catboost | 167f64f237114a4d10b2b4ee42adb4569137debe | contrib/tools/python3/src/Lib/_pyio.py | python | FileIO.read | (self, size=None) | Read at most size bytes, returned as bytes.
Only makes one system call, so less data may be returned than requested
In non-blocking mode, returns None if no data is available.
Return an empty bytes object at EOF. | Read at most size bytes, returned as bytes. | [
"Read",
"at",
"most",
"size",
"bytes",
"returned",
"as",
"bytes",
"."
] | def read(self, size=None):
"""Read at most size bytes, returned as bytes.
Only makes one system call, so less data may be returned than requested
In non-blocking mode, returns None if no data is available.
Return an empty bytes object at EOF.
"""
self._checkClosed()
self._checkReadable()
if size is None or size < 0:
return self.readall()
try:
return os.read(self._fd, size)
except BlockingIOError:
return None | [
"def",
"read",
"(",
"self",
",",
"size",
"=",
"None",
")",
":",
"self",
".",
"_checkClosed",
"(",
")",
"self",
".",
"_checkReadable",
"(",
")",
"if",
"size",
"is",
"None",
"or",
"size",
"<",
"0",
":",
"return",
"self",
".",
"readall",
"(",
")",
"try",
":",
"return",
"os",
".",
"read",
"(",
"self",
".",
"_fd",
",",
"size",
")",
"except",
"BlockingIOError",
":",
"return",
"None"
] | https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/tools/python3/src/Lib/_pyio.py#L1638-L1652 | ||
fengbingchun/NN_Test | d6305825d5273e4569ccd1eda9ffa2a9c72e18d2 | src/tiny-dnn/third_party/cpplint.py | python | IsDecltype | (clean_lines, linenum, column) | return False | Check if the token ending on (linenum, column) is decltype().
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: the number of the line to check.
column: end column of the token to check.
Returns:
True if this token is decltype() expression, False otherwise. | Check if the token ending on (linenum, column) is decltype(). | [
"Check",
"if",
"the",
"token",
"ending",
"on",
"(",
"linenum",
"column",
")",
"is",
"decltype",
"()",
"."
] | def IsDecltype(clean_lines, linenum, column):
"""Check if the token ending on (linenum, column) is decltype().
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: the number of the line to check.
column: end column of the token to check.
Returns:
True if this token is decltype() expression, False otherwise.
"""
(text, _, start_col) = ReverseCloseExpression(clean_lines, linenum, column)
if start_col < 0:
return False
if Search(r'\bdecltype\s*$', text[0:start_col]):
return True
return False | [
"def",
"IsDecltype",
"(",
"clean_lines",
",",
"linenum",
",",
"column",
")",
":",
"(",
"text",
",",
"_",
",",
"start_col",
")",
"=",
"ReverseCloseExpression",
"(",
"clean_lines",
",",
"linenum",
",",
"column",
")",
"if",
"start_col",
"<",
"0",
":",
"return",
"False",
"if",
"Search",
"(",
"r'\\bdecltype\\s*$'",
",",
"text",
"[",
"0",
":",
"start_col",
"]",
")",
":",
"return",
"True",
"return",
"False"
] | https://github.com/fengbingchun/NN_Test/blob/d6305825d5273e4569ccd1eda9ffa2a9c72e18d2/src/tiny-dnn/third_party/cpplint.py#L3781-L3796 | |
yrnkrn/zapcc | c6a8aa30006d997eff0d60fd37b0e62b8aa0ea50 | bindings/python/llvm/object.py | python | Section.has_symbol | (self, symbol) | return lib.LLVMGetSectionContainsSymbol(self, symbol) | Returns whether a Symbol instance is present in this Section. | Returns whether a Symbol instance is present in this Section. | [
"Returns",
"whether",
"a",
"Symbol",
"instance",
"is",
"present",
"in",
"this",
"Section",
"."
] | def has_symbol(self, symbol):
"""Returns whether a Symbol instance is present in this Section."""
if self.expired:
raise Exception('Section instance has expired.')
assert isinstance(symbol, Symbol)
return lib.LLVMGetSectionContainsSymbol(self, symbol) | [
"def",
"has_symbol",
"(",
"self",
",",
"symbol",
")",
":",
"if",
"self",
".",
"expired",
":",
"raise",
"Exception",
"(",
"'Section instance has expired.'",
")",
"assert",
"isinstance",
"(",
"symbol",
",",
"Symbol",
")",
"return",
"lib",
".",
"LLVMGetSectionContainsSymbol",
"(",
"self",
",",
"symbol",
")"
] | https://github.com/yrnkrn/zapcc/blob/c6a8aa30006d997eff0d60fd37b0e62b8aa0ea50/bindings/python/llvm/object.py#L232-L238 | |
PaddlePaddle/Paddle | 1252f4bb3e574df80aa6d18c7ddae1b3a90bd81c | python/paddle/nn/functional/conv.py | python | conv3d | (x,
weight,
bias=None,
stride=1,
padding=0,
dilation=1,
groups=1,
data_format="NCDHW",
name=None) | return _conv_nd(x, weight, bias, stride, padding, padding_algorithm,
dilation, groups, data_format, channel_dim, op_type,
use_cudnn, False, name) | r"""
The convolution3D layer calculates the output based on the input, filter
and strides, paddings, dilations, groups parameters. Input(Input) and
Output(Output) are in NCDHW or NDHWC format. Where N is batch size C is the number of
channels, D is the depth of the feature, H is the height of the feature,
and W is the width of the feature. Convlution3D is similar with Convlution2D
but adds one dimension(depth). If bias attribution and activation type are
provided, bias is added to the output of the convolution, and the
corresponding activation function is applied to the final result.
For each input :math:`X`, the equation is:
.. math::
Out = \sigma (W \ast X + b)
In the above equation:
* :math:`X`: Input value, a tensor with NCDHW or NDHWC format.
* :math:`W`: Filter value, a tensor with MCDHW format.
* :math:`\\ast`: Convolution operation.
* :math:`b`: Bias value, a 2-D tensor with shape [M, 1].
* :math:`\\sigma`: Activation function.
* :math:`Out`: Output value, the shape of :math:`Out` and :math:`X` may be different.
Example:
- Input:
Input shape: :math:`(N, C_{in}, D_{in}, H_{in}, W_{in})`
Filter shape: :math:`(C_{out}, C_{in}, D_f, H_f, W_f)`
- Output:
Output shape: :math:`(N, C_{out}, D_{out}, H_{out}, W_{out})`
Where
.. math::
D_{out}&= \\frac{(D_{in} + 2 * paddings[0] - (dilations[0] * (D_f - 1) + 1))}{strides[0]} + 1 \\\\
H_{out}&= \\frac{(H_{in} + 2 * paddings[1] - (dilations[1] * (H_f - 1) + 1))}{strides[1]} + 1 \\\\
W_{out}&= \\frac{(W_{in} + 2 * paddings[2] - (dilations[2] * (W_f - 1) + 1))}{strides[2]} + 1
Args:
x (Tensor): The input is 5-D Tensor with shape [N, C, D, H, W], the data
type of input is float16 or float32 or float64.
weight (Tensor): The convolution kernel, a Tensor with shape [M, C/g, kD, kH, kW],
where M is the number of filters(output channels), g is the number of groups,
kD, kH, kW are the filter's depth, height and width respectively.
bias (Tensor, optional): The bias, a Tensor of shape [M, ].
stride (int|list|tuple): The stride size. It means the stride in convolution. If stride is a
list/tuple, it must contain three integers, (stride_depth, stride_height, stride_width).
Otherwise, stride_depth = stride_height = stride_width = stride. Default: stride = 1.
padding (string|int|list|tuple): The padding size. It means the number of zero-paddings
on both sides for each dimension. If `padding` is a string, either 'VALID' or
'SAME' which is the padding algorithm. If padding size is a tuple or list,
it could be in three forms: `[pad_depth, pad_height, pad_width]` or
`[pad_depth_front, pad_depth_back, pad_height_top, pad_height_bottom, pad_width_left, pad_width_right]`,
and when `data_format` is `"NCDHW"`, `padding` can be in the form
`[[0,0], [0,0], [pad_depth_front, pad_depth_back], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right]]`.
when `data_format` is `"NDHWC"`, `padding` can be in the form
`[[0,0], [pad_depth_front, pad_depth_back], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right], [0,0]]`.
Default: padding = 0.
dilation (int|list|tuple): The dilation size. It means the spacing between the kernel points.
If dilation is a list/tuple, it must contain three integers, (dilation_depth, dilation_height,
dilation_width). Otherwise, dilation_depth = dilation_height = dilation_width = dilation.
Default: dilation = 1.
groups (int): The groups number of the Conv3D Layer. According to grouped
convolution in Alex Krizhevsky's Deep CNN paper: when group=2,
the first half of the filters is only connected to the first half
of the input channels, while the second half of the filters is only
connected to the second half of the input channels. Default: groups=1
data_format (str, optional): Specify the data format of the input, and the data format of the output
will be consistent with that of the input. An optional string from: `"NCHW"`, `"NHWC"`.
The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of:
`[batch_size, input_channels, input_height, input_width]`.
name(str|None): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
Returns:
A Tensor representing the conv3d, whose data type is
the same with input. If act is None, the tensor storing the
convolution result, and if act is not None, the tensor storing
convolution and non-linearity activation result.
Examples:
.. code-block:: python
import paddle
import paddle.nn.functional as F
x_var = paddle.randn((2, 3, 8, 8, 8), dtype='float32')
w_var = paddle.randn((6, 3, 3, 3, 3), dtype='float32')
y_var = F.conv3d(x_var, w_var)
y_np = y_var.numpy()
print(y_np.shape)
# (2, 6, 6, 6, 6) | r""" | [
"r"
] | def conv3d(x,
weight,
bias=None,
stride=1,
padding=0,
dilation=1,
groups=1,
data_format="NCDHW",
name=None):
r"""
The convolution3D layer calculates the output based on the input, filter
and strides, paddings, dilations, groups parameters. Input(Input) and
Output(Output) are in NCDHW or NDHWC format. Where N is batch size C is the number of
channels, D is the depth of the feature, H is the height of the feature,
and W is the width of the feature. Convlution3D is similar with Convlution2D
but adds one dimension(depth). If bias attribution and activation type are
provided, bias is added to the output of the convolution, and the
corresponding activation function is applied to the final result.
For each input :math:`X`, the equation is:
.. math::
Out = \sigma (W \ast X + b)
In the above equation:
* :math:`X`: Input value, a tensor with NCDHW or NDHWC format.
* :math:`W`: Filter value, a tensor with MCDHW format.
* :math:`\\ast`: Convolution operation.
* :math:`b`: Bias value, a 2-D tensor with shape [M, 1].
* :math:`\\sigma`: Activation function.
* :math:`Out`: Output value, the shape of :math:`Out` and :math:`X` may be different.
Example:
- Input:
Input shape: :math:`(N, C_{in}, D_{in}, H_{in}, W_{in})`
Filter shape: :math:`(C_{out}, C_{in}, D_f, H_f, W_f)`
- Output:
Output shape: :math:`(N, C_{out}, D_{out}, H_{out}, W_{out})`
Where
.. math::
D_{out}&= \\frac{(D_{in} + 2 * paddings[0] - (dilations[0] * (D_f - 1) + 1))}{strides[0]} + 1 \\\\
H_{out}&= \\frac{(H_{in} + 2 * paddings[1] - (dilations[1] * (H_f - 1) + 1))}{strides[1]} + 1 \\\\
W_{out}&= \\frac{(W_{in} + 2 * paddings[2] - (dilations[2] * (W_f - 1) + 1))}{strides[2]} + 1
Args:
x (Tensor): The input is 5-D Tensor with shape [N, C, D, H, W], the data
type of input is float16 or float32 or float64.
weight (Tensor): The convolution kernel, a Tensor with shape [M, C/g, kD, kH, kW],
where M is the number of filters(output channels), g is the number of groups,
kD, kH, kW are the filter's depth, height and width respectively.
bias (Tensor, optional): The bias, a Tensor of shape [M, ].
stride (int|list|tuple): The stride size. It means the stride in convolution. If stride is a
list/tuple, it must contain three integers, (stride_depth, stride_height, stride_width).
Otherwise, stride_depth = stride_height = stride_width = stride. Default: stride = 1.
padding (string|int|list|tuple): The padding size. It means the number of zero-paddings
on both sides for each dimension. If `padding` is a string, either 'VALID' or
'SAME' which is the padding algorithm. If padding size is a tuple or list,
it could be in three forms: `[pad_depth, pad_height, pad_width]` or
`[pad_depth_front, pad_depth_back, pad_height_top, pad_height_bottom, pad_width_left, pad_width_right]`,
and when `data_format` is `"NCDHW"`, `padding` can be in the form
`[[0,0], [0,0], [pad_depth_front, pad_depth_back], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right]]`.
when `data_format` is `"NDHWC"`, `padding` can be in the form
`[[0,0], [pad_depth_front, pad_depth_back], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right], [0,0]]`.
Default: padding = 0.
dilation (int|list|tuple): The dilation size. It means the spacing between the kernel points.
If dilation is a list/tuple, it must contain three integers, (dilation_depth, dilation_height,
dilation_width). Otherwise, dilation_depth = dilation_height = dilation_width = dilation.
Default: dilation = 1.
groups (int): The groups number of the Conv3D Layer. According to grouped
convolution in Alex Krizhevsky's Deep CNN paper: when group=2,
the first half of the filters is only connected to the first half
of the input channels, while the second half of the filters is only
connected to the second half of the input channels. Default: groups=1
data_format (str, optional): Specify the data format of the input, and the data format of the output
will be consistent with that of the input. An optional string from: `"NCHW"`, `"NHWC"`.
The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of:
`[batch_size, input_channels, input_height, input_width]`.
name(str|None): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
Returns:
A Tensor representing the conv3d, whose data type is
the same with input. If act is None, the tensor storing the
convolution result, and if act is not None, the tensor storing
convolution and non-linearity activation result.
Examples:
.. code-block:: python
import paddle
import paddle.nn.functional as F
x_var = paddle.randn((2, 3, 8, 8, 8), dtype='float32')
w_var = paddle.randn((6, 3, 3, 3, 3), dtype='float32')
y_var = F.conv3d(x_var, w_var)
y_np = y_var.numpy()
print(y_np.shape)
# (2, 6, 6, 6, 6)
"""
# entry check
if data_format not in ["NCDHW", "NDHWC"]:
raise ValueError(
"Attr(data_format) should be 'NCDHW' or 'NDHWC'. Received "
"Attr(data_format): {}.".format(data_format))
channel_last = (data_format == "NDHWC")
channel_dim = -1 if channel_last else 1
if len(x.shape) != 5:
raise ValueError(
"Input x should be 5D tensor, but received x with the shape of {}".
format(x.shape))
num_channels = x.shape[channel_dim]
num_filters = weight.shape[0]
if num_channels < 0:
raise ValueError(
"The channel dimension of the input({}) should be defined. "
"Received: {}.".format(x.shape, num_channels))
if groups <= 0:
raise ValueError(
"The groups of conv3d should be greater than 0. Received groups: {}".
format(groups))
if num_channels % groups != 0:
raise ValueError(
"The number of input channels must be divisible by Attr(groups). "
"Received: number of channels({}), groups({}).".format(num_channels,
groups))
if num_filters % groups != 0:
raise ValueError(
"The number of filters must be divisible by Attr(groups). "
"Received: number of filters({}), groups({}).".format(num_filters,
groups))
cudnn_version = get_cudnn_version()
use_cudnn = True if (core.is_compiled_with_cuda() and
cudnn_version is not None) else False
padding, padding_algorithm = _update_padding_nd(padding, channel_last, 3)
stride = convert_to_list(stride, 3, 'stride')
dilation = convert_to_list(dilation, 3, 'dilation')
op_type = "conv3d"
return _conv_nd(x, weight, bias, stride, padding, padding_algorithm,
dilation, groups, data_format, channel_dim, op_type,
use_cudnn, False, name) | [
"def",
"conv3d",
"(",
"x",
",",
"weight",
",",
"bias",
"=",
"None",
",",
"stride",
"=",
"1",
",",
"padding",
"=",
"0",
",",
"dilation",
"=",
"1",
",",
"groups",
"=",
"1",
",",
"data_format",
"=",
"\"NCDHW\"",
",",
"name",
"=",
"None",
")",
":",
"# entry check",
"if",
"data_format",
"not",
"in",
"[",
"\"NCDHW\"",
",",
"\"NDHWC\"",
"]",
":",
"raise",
"ValueError",
"(",
"\"Attr(data_format) should be 'NCDHW' or 'NDHWC'. Received \"",
"\"Attr(data_format): {}.\"",
".",
"format",
"(",
"data_format",
")",
")",
"channel_last",
"=",
"(",
"data_format",
"==",
"\"NDHWC\"",
")",
"channel_dim",
"=",
"-",
"1",
"if",
"channel_last",
"else",
"1",
"if",
"len",
"(",
"x",
".",
"shape",
")",
"!=",
"5",
":",
"raise",
"ValueError",
"(",
"\"Input x should be 5D tensor, but received x with the shape of {}\"",
".",
"format",
"(",
"x",
".",
"shape",
")",
")",
"num_channels",
"=",
"x",
".",
"shape",
"[",
"channel_dim",
"]",
"num_filters",
"=",
"weight",
".",
"shape",
"[",
"0",
"]",
"if",
"num_channels",
"<",
"0",
":",
"raise",
"ValueError",
"(",
"\"The channel dimension of the input({}) should be defined. \"",
"\"Received: {}.\"",
".",
"format",
"(",
"x",
".",
"shape",
",",
"num_channels",
")",
")",
"if",
"groups",
"<=",
"0",
":",
"raise",
"ValueError",
"(",
"\"The groups of conv3d should be greater than 0. Received groups: {}\"",
".",
"format",
"(",
"groups",
")",
")",
"if",
"num_channels",
"%",
"groups",
"!=",
"0",
":",
"raise",
"ValueError",
"(",
"\"The number of input channels must be divisible by Attr(groups). \"",
"\"Received: number of channels({}), groups({}).\"",
".",
"format",
"(",
"num_channels",
",",
"groups",
")",
")",
"if",
"num_filters",
"%",
"groups",
"!=",
"0",
":",
"raise",
"ValueError",
"(",
"\"The number of filters must be divisible by Attr(groups). \"",
"\"Received: number of filters({}), groups({}).\"",
".",
"format",
"(",
"num_filters",
",",
"groups",
")",
")",
"cudnn_version",
"=",
"get_cudnn_version",
"(",
")",
"use_cudnn",
"=",
"True",
"if",
"(",
"core",
".",
"is_compiled_with_cuda",
"(",
")",
"and",
"cudnn_version",
"is",
"not",
"None",
")",
"else",
"False",
"padding",
",",
"padding_algorithm",
"=",
"_update_padding_nd",
"(",
"padding",
",",
"channel_last",
",",
"3",
")",
"stride",
"=",
"convert_to_list",
"(",
"stride",
",",
"3",
",",
"'stride'",
")",
"dilation",
"=",
"convert_to_list",
"(",
"dilation",
",",
"3",
",",
"'dilation'",
")",
"op_type",
"=",
"\"conv3d\"",
"return",
"_conv_nd",
"(",
"x",
",",
"weight",
",",
"bias",
",",
"stride",
",",
"padding",
",",
"padding_algorithm",
",",
"dilation",
",",
"groups",
",",
"data_format",
",",
"channel_dim",
",",
"op_type",
",",
"use_cudnn",
",",
"False",
",",
"name",
")"
] | https://github.com/PaddlePaddle/Paddle/blob/1252f4bb3e574df80aa6d18c7ddae1b3a90bd81c/python/paddle/nn/functional/conv.py#L1099-L1255 | |
pmq20/node-packer | 12c46c6e44fbc14d9ee645ebd17d5296b324f7e0 | current/tools/inspector_protocol/jinja2/environment.py | python | Template.render_async | (self, *args, **kwargs) | This works similar to :meth:`render` but returns a coroutine
that when awaited returns the entire rendered template string. This
requires the async feature to be enabled.
Example usage::
await template.render_async(knights='that say nih; asynchronously') | This works similar to :meth:`render` but returns a coroutine
that when awaited returns the entire rendered template string. This
requires the async feature to be enabled. | [
"This",
"works",
"similar",
"to",
":",
"meth",
":",
"render",
"but",
"returns",
"a",
"coroutine",
"that",
"when",
"awaited",
"returns",
"the",
"entire",
"rendered",
"template",
"string",
".",
"This",
"requires",
"the",
"async",
"feature",
"to",
"be",
"enabled",
"."
] | def render_async(self, *args, **kwargs):
"""This works similar to :meth:`render` but returns a coroutine
that when awaited returns the entire rendered template string. This
requires the async feature to be enabled.
Example usage::
await template.render_async(knights='that say nih; asynchronously')
"""
# see asyncsupport for the actual implementation
raise NotImplementedError('This feature is not available for this '
'version of Python') | [
"def",
"render_async",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"# see asyncsupport for the actual implementation",
"raise",
"NotImplementedError",
"(",
"'This feature is not available for this '",
"'version of Python'",
")"
] | https://github.com/pmq20/node-packer/blob/12c46c6e44fbc14d9ee645ebd17d5296b324f7e0/current/tools/inspector_protocol/jinja2/environment.py#L1010-L1021 | ||
raymondlu/super-animation-samples | 04234269112ff0dc32447f27a761dbbb00b8ba17 | samples/cocos2d-x-3.1/CocosLuaGame2/frameworks/cocos2d-x/tools/bindings-generator/clang/cindex.py | python | Cursor.get_bitfield_width | (self) | return conf.lib.clang_getFieldDeclBitWidth(self) | Retrieve the width of a bitfield. | Retrieve the width of a bitfield. | [
"Retrieve",
"the",
"width",
"of",
"a",
"bitfield",
"."
] | def get_bitfield_width(self):
"""
Retrieve the width of a bitfield.
"""
return conf.lib.clang_getFieldDeclBitWidth(self) | [
"def",
"get_bitfield_width",
"(",
"self",
")",
":",
"return",
"conf",
".",
"lib",
".",
"clang_getFieldDeclBitWidth",
"(",
"self",
")"
] | https://github.com/raymondlu/super-animation-samples/blob/04234269112ff0dc32447f27a761dbbb00b8ba17/samples/cocos2d-x-3.1/CocosLuaGame2/frameworks/cocos2d-x/tools/bindings-generator/clang/cindex.py#L1489-L1493 | |
facebook/fbthrift | fb9c8562aba04c4fd9b17716eb5d970cc88a75bb | thrift/lib/py/util/remote.py | python | RemoteClient._get_client | (self, options) | Get the thrift client that will be used to make method calls | Get the thrift client that will be used to make method calls | [
"Get",
"the",
"thrift",
"client",
"that",
"will",
"be",
"used",
"to",
"make",
"method",
"calls"
] | def _get_client(self, options):
"""Get the thrift client that will be used to make method calls"""
raise TypeError("_get_client should be called on "
"a subclass of RemoteClient") | [
"def",
"_get_client",
"(",
"self",
",",
"options",
")",
":",
"raise",
"TypeError",
"(",
"\"_get_client should be called on \"",
"\"a subclass of RemoteClient\"",
")"
] | https://github.com/facebook/fbthrift/blob/fb9c8562aba04c4fd9b17716eb5d970cc88a75bb/thrift/lib/py/util/remote.py#L393-L396 | ||
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Gems/CloudGemMetric/v1/AWS/python/windows/Lib/llvmlite/binding/ffi.py | python | _lib_wrapper._name | (self) | return self._lib._name | The name of the library passed in the CDLL constructor.
For duck-typing a ctypes.CDLL | The name of the library passed in the CDLL constructor. | [
"The",
"name",
"of",
"the",
"library",
"passed",
"in",
"the",
"CDLL",
"constructor",
"."
] | def _name(self):
"""The name of the library passed in the CDLL constructor.
For duck-typing a ctypes.CDLL
"""
return self._lib._name | [
"def",
"_name",
"(",
"self",
")",
":",
"return",
"self",
".",
"_lib",
".",
"_name"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemMetric/v1/AWS/python/windows/Lib/llvmlite/binding/ffi.py#L67-L72 | |
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Gems/CloudGemMetric/v1/AWS/python/windows/Lib/pandas/io/formats/style.py | python | _is_visible | (idx_row, idx_col, lengths) | return (idx_col, idx_row) in lengths | Index -> {(idx_row, idx_col): bool}). | Index -> {(idx_row, idx_col): bool}). | [
"Index",
"-",
">",
"{",
"(",
"idx_row",
"idx_col",
")",
":",
"bool",
"}",
")",
"."
] | def _is_visible(idx_row, idx_col, lengths):
"""
Index -> {(idx_row, idx_col): bool}).
"""
return (idx_col, idx_row) in lengths | [
"def",
"_is_visible",
"(",
"idx_row",
",",
"idx_col",
",",
"lengths",
")",
":",
"return",
"(",
"idx_col",
",",
"idx_row",
")",
"in",
"lengths"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemMetric/v1/AWS/python/windows/Lib/pandas/io/formats/style.py#L1463-L1467 | |
pytorch/pytorch | 7176c92687d3cc847cc046bf002269c6949a21c2 | torch/distributed/elastic/timer/api.py | python | TimerServer.register_timers | (self, timer_requests: List[TimerRequest]) | Processes the incoming timer requests and registers them with the server.
The timer request can either be a acquire-timer or release-timer request.
Timer requests with a negative expiration_time should be interpreted
as a release-timer request. | Processes the incoming timer requests and registers them with the server.
The timer request can either be a acquire-timer or release-timer request.
Timer requests with a negative expiration_time should be interpreted
as a release-timer request. | [
"Processes",
"the",
"incoming",
"timer",
"requests",
"and",
"registers",
"them",
"with",
"the",
"server",
".",
"The",
"timer",
"request",
"can",
"either",
"be",
"a",
"acquire",
"-",
"timer",
"or",
"release",
"-",
"timer",
"request",
".",
"Timer",
"requests",
"with",
"a",
"negative",
"expiration_time",
"should",
"be",
"interpreted",
"as",
"a",
"release",
"-",
"timer",
"request",
"."
] | def register_timers(self, timer_requests: List[TimerRequest]) -> None:
"""
Processes the incoming timer requests and registers them with the server.
The timer request can either be a acquire-timer or release-timer request.
Timer requests with a negative expiration_time should be interpreted
as a release-timer request.
"""
pass | [
"def",
"register_timers",
"(",
"self",
",",
"timer_requests",
":",
"List",
"[",
"TimerRequest",
"]",
")",
"->",
"None",
":",
"pass"
] | https://github.com/pytorch/pytorch/blob/7176c92687d3cc847cc046bf002269c6949a21c2/torch/distributed/elastic/timer/api.py#L128-L135 | ||
wlanjie/AndroidFFmpeg | 7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf | tools/fdk-aac-build/armeabi/toolchain/lib/python2.7/smtplib.py | python | quoteaddr | (addr) | Quote a subset of the email addresses defined by RFC 821.
Should be able to handle anything rfc822.parseaddr can handle. | Quote a subset of the email addresses defined by RFC 821. | [
"Quote",
"a",
"subset",
"of",
"the",
"email",
"addresses",
"defined",
"by",
"RFC",
"821",
"."
] | def quoteaddr(addr):
"""Quote a subset of the email addresses defined by RFC 821.
Should be able to handle anything rfc822.parseaddr can handle.
"""
m = (None, None)
try:
m = email.utils.parseaddr(addr)[1]
except AttributeError:
pass
if m == (None, None): # Indicates parse failure or AttributeError
# something weird here.. punt -ddm
return "<%s>" % addr
elif m is None:
# the sender wants an empty return address
return "<>"
else:
return "<%s>" % m | [
"def",
"quoteaddr",
"(",
"addr",
")",
":",
"m",
"=",
"(",
"None",
",",
"None",
")",
"try",
":",
"m",
"=",
"email",
".",
"utils",
".",
"parseaddr",
"(",
"addr",
")",
"[",
"1",
"]",
"except",
"AttributeError",
":",
"pass",
"if",
"m",
"==",
"(",
"None",
",",
"None",
")",
":",
"# Indicates parse failure or AttributeError",
"# something weird here.. punt -ddm",
"return",
"\"<%s>\"",
"%",
"addr",
"elif",
"m",
"is",
"None",
":",
"# the sender wants an empty return address",
"return",
"\"<>\"",
"else",
":",
"return",
"\"<%s>\"",
"%",
"m"
] | https://github.com/wlanjie/AndroidFFmpeg/blob/7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf/tools/fdk-aac-build/armeabi/toolchain/lib/python2.7/smtplib.py#L133-L150 | ||
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/osx_cocoa/_controls.py | python | PickerBase.GetTextCtrlProportion | (*args, **kwargs) | return _controls_.PickerBase_GetTextCtrlProportion(*args, **kwargs) | GetTextCtrlProportion(self) -> int
Returns the proportion between the text control and the picker. | GetTextCtrlProportion(self) -> int | [
"GetTextCtrlProportion",
"(",
"self",
")",
"-",
">",
"int"
] | def GetTextCtrlProportion(*args, **kwargs):
"""
GetTextCtrlProportion(self) -> int
Returns the proportion between the text control and the picker.
"""
return _controls_.PickerBase_GetTextCtrlProportion(*args, **kwargs) | [
"def",
"GetTextCtrlProportion",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_controls_",
".",
"PickerBase_GetTextCtrlProportion",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_cocoa/_controls.py#L6766-L6772 | |
tensorflow/tensorflow | 419e3a6b650ea4bd1b0cba23c4348f8a69f3272e | tensorflow/python/data/experimental/ops/prefetching_ops.py | python | map_on_gpu | (map_func) | return _apply_fn | Maps `map_func` across the elements of this dataset.
NOTE: This is a highly experimental version of `tf.data.Dataset.map` that runs
`map_func` on GPU. It must be used after applying the
`tf.data.experimental.copy_to_device` transformation with a GPU device
argument.
Args:
map_func: A function mapping a nested structure of tensors (having shapes
and types defined by `self.output_shapes` and `self.output_types`) to
another nested structure of tensors.
Returns:
A `Dataset` transformation function, which can be passed to
`tf.data.Dataset.apply`. | Maps `map_func` across the elements of this dataset. | [
"Maps",
"map_func",
"across",
"the",
"elements",
"of",
"this",
"dataset",
"."
] | def map_on_gpu(map_func):
"""Maps `map_func` across the elements of this dataset.
NOTE: This is a highly experimental version of `tf.data.Dataset.map` that runs
`map_func` on GPU. It must be used after applying the
`tf.data.experimental.copy_to_device` transformation with a GPU device
argument.
Args:
map_func: A function mapping a nested structure of tensors (having shapes
and types defined by `self.output_shapes` and `self.output_types`) to
another nested structure of tensors.
Returns:
A `Dataset` transformation function, which can be passed to
`tf.data.Dataset.apply`.
"""
def _apply_fn(dataset):
return _MapOnGpuDataset(dataset, map_func)
return _apply_fn | [
"def",
"map_on_gpu",
"(",
"map_func",
")",
":",
"def",
"_apply_fn",
"(",
"dataset",
")",
":",
"return",
"_MapOnGpuDataset",
"(",
"dataset",
",",
"map_func",
")",
"return",
"_apply_fn"
] | https://github.com/tensorflow/tensorflow/blob/419e3a6b650ea4bd1b0cba23c4348f8a69f3272e/tensorflow/python/data/experimental/ops/prefetching_ops.py#L263-L284 | |
catboost/catboost | 167f64f237114a4d10b2b4ee42adb4569137debe | contrib/python/numpy/py2/numpy/core/multiarray.py | python | dot | (a, b, out=None) | return (a, b, out) | dot(a, b, out=None)
Dot product of two arrays. Specifically,
- If both `a` and `b` are 1-D arrays, it is inner product of vectors
(without complex conjugation).
- If both `a` and `b` are 2-D arrays, it is matrix multiplication,
but using :func:`matmul` or ``a @ b`` is preferred.
- If either `a` or `b` is 0-D (scalar), it is equivalent to :func:`multiply`
and using ``numpy.multiply(a, b)`` or ``a * b`` is preferred.
- If `a` is an N-D array and `b` is a 1-D array, it is a sum product over
the last axis of `a` and `b`.
- If `a` is an N-D array and `b` is an M-D array (where ``M>=2``), it is a
sum product over the last axis of `a` and the second-to-last axis of `b`::
dot(a, b)[i,j,k,m] = sum(a[i,j,:] * b[k,:,m])
Parameters
----------
a : array_like
First argument.
b : array_like
Second argument.
out : ndarray, optional
Output argument. This must have the exact kind that would be returned
if it was not used. In particular, it must have the right type, must be
C-contiguous, and its dtype must be the dtype that would be returned
for `dot(a,b)`. This is a performance feature. Therefore, if these
conditions are not met, an exception is raised, instead of attempting
to be flexible.
Returns
-------
output : ndarray
Returns the dot product of `a` and `b`. If `a` and `b` are both
scalars or both 1-D arrays then a scalar is returned; otherwise
an array is returned.
If `out` is given, then it is returned.
Raises
------
ValueError
If the last dimension of `a` is not the same size as
the second-to-last dimension of `b`.
See Also
--------
vdot : Complex-conjugating dot product.
tensordot : Sum products over arbitrary axes.
einsum : Einstein summation convention.
matmul : '@' operator as method with out parameter.
Examples
--------
>>> np.dot(3, 4)
12
Neither argument is complex-conjugated:
>>> np.dot([2j, 3j], [2j, 3j])
(-13+0j)
For 2-D arrays it is the matrix product:
>>> a = [[1, 0], [0, 1]]
>>> b = [[4, 1], [2, 2]]
>>> np.dot(a, b)
array([[4, 1],
[2, 2]])
>>> a = np.arange(3*4*5*6).reshape((3,4,5,6))
>>> b = np.arange(3*4*5*6)[::-1].reshape((5,4,6,3))
>>> np.dot(a, b)[2,3,2,1,2,2]
499128
>>> sum(a[2,3,2,:] * b[1,2,:,2])
499128 | dot(a, b, out=None) | [
"dot",
"(",
"a",
"b",
"out",
"=",
"None",
")"
] | def dot(a, b, out=None):
"""
dot(a, b, out=None)
Dot product of two arrays. Specifically,
- If both `a` and `b` are 1-D arrays, it is inner product of vectors
(without complex conjugation).
- If both `a` and `b` are 2-D arrays, it is matrix multiplication,
but using :func:`matmul` or ``a @ b`` is preferred.
- If either `a` or `b` is 0-D (scalar), it is equivalent to :func:`multiply`
and using ``numpy.multiply(a, b)`` or ``a * b`` is preferred.
- If `a` is an N-D array and `b` is a 1-D array, it is a sum product over
the last axis of `a` and `b`.
- If `a` is an N-D array and `b` is an M-D array (where ``M>=2``), it is a
sum product over the last axis of `a` and the second-to-last axis of `b`::
dot(a, b)[i,j,k,m] = sum(a[i,j,:] * b[k,:,m])
Parameters
----------
a : array_like
First argument.
b : array_like
Second argument.
out : ndarray, optional
Output argument. This must have the exact kind that would be returned
if it was not used. In particular, it must have the right type, must be
C-contiguous, and its dtype must be the dtype that would be returned
for `dot(a,b)`. This is a performance feature. Therefore, if these
conditions are not met, an exception is raised, instead of attempting
to be flexible.
Returns
-------
output : ndarray
Returns the dot product of `a` and `b`. If `a` and `b` are both
scalars or both 1-D arrays then a scalar is returned; otherwise
an array is returned.
If `out` is given, then it is returned.
Raises
------
ValueError
If the last dimension of `a` is not the same size as
the second-to-last dimension of `b`.
See Also
--------
vdot : Complex-conjugating dot product.
tensordot : Sum products over arbitrary axes.
einsum : Einstein summation convention.
matmul : '@' operator as method with out parameter.
Examples
--------
>>> np.dot(3, 4)
12
Neither argument is complex-conjugated:
>>> np.dot([2j, 3j], [2j, 3j])
(-13+0j)
For 2-D arrays it is the matrix product:
>>> a = [[1, 0], [0, 1]]
>>> b = [[4, 1], [2, 2]]
>>> np.dot(a, b)
array([[4, 1],
[2, 2]])
>>> a = np.arange(3*4*5*6).reshape((3,4,5,6))
>>> b = np.arange(3*4*5*6)[::-1].reshape((5,4,6,3))
>>> np.dot(a, b)[2,3,2,1,2,2]
499128
>>> sum(a[2,3,2,:] * b[1,2,:,2])
499128
"""
return (a, b, out) | [
"def",
"dot",
"(",
"a",
",",
"b",
",",
"out",
"=",
"None",
")",
":",
"return",
"(",
"a",
",",
"b",
",",
"out",
")"
] | https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/numpy/py2/numpy/core/multiarray.py#L701-L785 | |
CNevd/Difacto_DMLC | f16862e35062707b1cf7e37d04d9b6ae34bbfd28 | dmlc-core/scripts/lint3.py | python | get_header_guard_dmlc | (filename) | return re.sub(r'[-./\s]', '_', file_path_from_root).upper() + '_' | Get Header Guard Convention for DMLC Projects.
For headers in include, directly use the path
For headers in src, use project name plus path
Examples: with project-name = dmlc
include/dmlc/timer.h -> DMLC_TIMTER_H_
src/io/libsvm_parser.h -> DMLC_IO_LIBSVM_PARSER_H_ | Get Header Guard Convention for DMLC Projects. | [
"Get",
"Header",
"Guard",
"Convention",
"for",
"DMLC",
"Projects",
"."
] | def get_header_guard_dmlc(filename):
"""Get Header Guard Convention for DMLC Projects.
For headers in include, directly use the path
For headers in src, use project name plus path
Examples: with project-name = dmlc
include/dmlc/timer.h -> DMLC_TIMTER_H_
src/io/libsvm_parser.h -> DMLC_IO_LIBSVM_PARSER_H_
"""
fileinfo = cpplint.FileInfo(filename)
file_path_from_root = fileinfo.RepositoryName()
inc_list = ['include', 'api', 'wrapper']
if file_path_from_root.startswith('src') and _HELPER.project_name is not None:
file_path_from_root = re.sub('^src', _HELPER.project_name, file_path_from_root)
else:
for spath in inc_list:
prefix = spath + os.sep
if file_path_from_root.startswith(prefix):
file_path_from_root = re.sub('^' + prefix, '', file_path_from_root)
break
return re.sub(r'[-./\s]', '_', file_path_from_root).upper() + '_' | [
"def",
"get_header_guard_dmlc",
"(",
"filename",
")",
":",
"fileinfo",
"=",
"cpplint",
".",
"FileInfo",
"(",
"filename",
")",
"file_path_from_root",
"=",
"fileinfo",
".",
"RepositoryName",
"(",
")",
"inc_list",
"=",
"[",
"'include'",
",",
"'api'",
",",
"'wrapper'",
"]",
"if",
"file_path_from_root",
".",
"startswith",
"(",
"'src'",
")",
"and",
"_HELPER",
".",
"project_name",
"is",
"not",
"None",
":",
"file_path_from_root",
"=",
"re",
".",
"sub",
"(",
"'^src'",
",",
"_HELPER",
".",
"project_name",
",",
"file_path_from_root",
")",
"else",
":",
"for",
"spath",
"in",
"inc_list",
":",
"prefix",
"=",
"spath",
"+",
"os",
".",
"sep",
"if",
"file_path_from_root",
".",
"startswith",
"(",
"prefix",
")",
":",
"file_path_from_root",
"=",
"re",
".",
"sub",
"(",
"'^'",
"+",
"prefix",
",",
"''",
",",
"file_path_from_root",
")",
"break",
"return",
"re",
".",
"sub",
"(",
"r'[-./\\s]'",
",",
"'_'",
",",
"file_path_from_root",
")",
".",
"upper",
"(",
")",
"+",
"'_'"
] | https://github.com/CNevd/Difacto_DMLC/blob/f16862e35062707b1cf7e37d04d9b6ae34bbfd28/dmlc-core/scripts/lint3.py#L103-L125 | |
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Gems/CloudGemMetric/v1/AWS/common-code/Lib/numpy/core/_string_helpers.py | python | english_capitalize | (s) | Apply English case rules to convert the first character of an ASCII
string to upper case.
This is an internal utility function to replace calls to str.capitalize()
such that we can avoid changing behavior with changing locales.
Parameters
----------
s : str
Returns
-------
capitalized : str
Examples
--------
>>> from numpy.core.numerictypes import english_capitalize
>>> english_capitalize('int8')
'Int8'
>>> english_capitalize('Int8')
'Int8'
>>> english_capitalize('')
'' | Apply English case rules to convert the first character of an ASCII
string to upper case. | [
"Apply",
"English",
"case",
"rules",
"to",
"convert",
"the",
"first",
"character",
"of",
"an",
"ASCII",
"string",
"to",
"upper",
"case",
"."
] | def english_capitalize(s):
""" Apply English case rules to convert the first character of an ASCII
string to upper case.
This is an internal utility function to replace calls to str.capitalize()
such that we can avoid changing behavior with changing locales.
Parameters
----------
s : str
Returns
-------
capitalized : str
Examples
--------
>>> from numpy.core.numerictypes import english_capitalize
>>> english_capitalize('int8')
'Int8'
>>> english_capitalize('Int8')
'Int8'
>>> english_capitalize('')
''
"""
if s:
return english_upper(s[0]) + s[1:]
else:
return s | [
"def",
"english_capitalize",
"(",
"s",
")",
":",
"if",
"s",
":",
"return",
"english_upper",
"(",
"s",
"[",
"0",
"]",
")",
"+",
"s",
"[",
"1",
":",
"]",
"else",
":",
"return",
"s"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemMetric/v1/AWS/common-code/Lib/numpy/core/_string_helpers.py#L72-L100 | ||
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/msw/stc.py | python | StyledTextCtrl.CmdKeyAssign | (*args, **kwargs) | return _stc.StyledTextCtrl_CmdKeyAssign(*args, **kwargs) | CmdKeyAssign(self, int key, int modifiers, int cmd)
When key+modifier combination km is pressed perform msg. | CmdKeyAssign(self, int key, int modifiers, int cmd) | [
"CmdKeyAssign",
"(",
"self",
"int",
"key",
"int",
"modifiers",
"int",
"cmd",
")"
] | def CmdKeyAssign(*args, **kwargs):
"""
CmdKeyAssign(self, int key, int modifiers, int cmd)
When key+modifier combination km is pressed perform msg.
"""
return _stc.StyledTextCtrl_CmdKeyAssign(*args, **kwargs) | [
"def",
"CmdKeyAssign",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_stc",
".",
"StyledTextCtrl_CmdKeyAssign",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/msw/stc.py#L2779-L2785 | |
benoitsteiner/tensorflow-opencl | cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5 | tensorflow/python/eager/context.py | python | Context.device_spec | (self) | return self._eager_context.device_spec | Returns the device spec for the current thread. | Returns the device spec for the current thread. | [
"Returns",
"the",
"device",
"spec",
"for",
"the",
"current",
"thread",
"."
] | def device_spec(self):
"""Returns the device spec for the current thread."""
return self._eager_context.device_spec | [
"def",
"device_spec",
"(",
"self",
")",
":",
"return",
"self",
".",
"_eager_context",
".",
"device_spec"
] | https://github.com/benoitsteiner/tensorflow-opencl/blob/cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5/tensorflow/python/eager/context.py#L229-L231 | |
catboost/catboost | 167f64f237114a4d10b2b4ee42adb4569137debe | contrib/python/enum34/enum/__init__.py | python | EnumMeta._get_mixins_ | (bases) | return member_type, first_enum | Returns the type for creating enum members, and the first inherited
enum class.
bases: the tuple of bases that was given to __new__ | Returns the type for creating enum members, and the first inherited
enum class. | [
"Returns",
"the",
"type",
"for",
"creating",
"enum",
"members",
"and",
"the",
"first",
"inherited",
"enum",
"class",
"."
] | def _get_mixins_(bases):
"""Returns the type for creating enum members, and the first inherited
enum class.
bases: the tuple of bases that was given to __new__
"""
if not bases or Enum is None:
return object, Enum
# double check that we are not subclassing a class with existing
# enumeration members; while we're at it, see if any other data
# type has been mixed in so we can use the correct __new__
member_type = first_enum = None
for base in bases:
if (base is not Enum and
issubclass(base, Enum) and
base._member_names_):
raise TypeError("Cannot extend enumerations")
# base is now the last base in bases
if not issubclass(base, Enum):
raise TypeError("new enumerations must be created as "
"`ClassName([mixin_type,] enum_type)`")
# get correct mix-in type (either mix-in type of Enum subclass, or
# first base if last base is Enum)
if not issubclass(bases[0], Enum):
member_type = bases[0] # first data type
first_enum = bases[-1] # enum type
else:
for base in bases[0].__mro__:
# most common: (IntEnum, int, Enum, object)
# possible: (<Enum 'AutoIntEnum'>, <Enum 'IntEnum'>,
# <class 'int'>, <Enum 'Enum'>,
# <class 'object'>)
if issubclass(base, Enum):
if first_enum is None:
first_enum = base
else:
if member_type is None:
member_type = base
return member_type, first_enum | [
"def",
"_get_mixins_",
"(",
"bases",
")",
":",
"if",
"not",
"bases",
"or",
"Enum",
"is",
"None",
":",
"return",
"object",
",",
"Enum",
"# double check that we are not subclassing a class with existing",
"# enumeration members; while we're at it, see if any other data",
"# type has been mixed in so we can use the correct __new__",
"member_type",
"=",
"first_enum",
"=",
"None",
"for",
"base",
"in",
"bases",
":",
"if",
"(",
"base",
"is",
"not",
"Enum",
"and",
"issubclass",
"(",
"base",
",",
"Enum",
")",
"and",
"base",
".",
"_member_names_",
")",
":",
"raise",
"TypeError",
"(",
"\"Cannot extend enumerations\"",
")",
"# base is now the last base in bases",
"if",
"not",
"issubclass",
"(",
"base",
",",
"Enum",
")",
":",
"raise",
"TypeError",
"(",
"\"new enumerations must be created as \"",
"\"`ClassName([mixin_type,] enum_type)`\"",
")",
"# get correct mix-in type (either mix-in type of Enum subclass, or",
"# first base if last base is Enum)",
"if",
"not",
"issubclass",
"(",
"bases",
"[",
"0",
"]",
",",
"Enum",
")",
":",
"member_type",
"=",
"bases",
"[",
"0",
"]",
"# first data type",
"first_enum",
"=",
"bases",
"[",
"-",
"1",
"]",
"# enum type",
"else",
":",
"for",
"base",
"in",
"bases",
"[",
"0",
"]",
".",
"__mro__",
":",
"# most common: (IntEnum, int, Enum, object)",
"# possible: (<Enum 'AutoIntEnum'>, <Enum 'IntEnum'>,",
"# <class 'int'>, <Enum 'Enum'>,",
"# <class 'object'>)",
"if",
"issubclass",
"(",
"base",
",",
"Enum",
")",
":",
"if",
"first_enum",
"is",
"None",
":",
"first_enum",
"=",
"base",
"else",
":",
"if",
"member_type",
"is",
"None",
":",
"member_type",
"=",
"base",
"return",
"member_type",
",",
"first_enum"
] | https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/enum34/enum/__init__.py#L499-L542 | |
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Tools/build/waf-1.7.13/lmbrwaflib/project_settings.py | python | get_bootstrap_assets | (self, platform=None) | return assets | :param self:
:param platform: optional, defaults to current build's platform
:return: Asset type requested for the supplied platform in bootstrap.cfg | :param self:
:param platform: optional, defaults to current build's platform
:return: Asset type requested for the supplied platform in bootstrap.cfg | [
":",
"param",
"self",
":",
":",
"param",
"platform",
":",
"optional",
"defaults",
"to",
"current",
"build",
"s",
"platform",
":",
"return",
":",
"Asset",
"type",
"requested",
"for",
"the",
"supplied",
"platform",
"in",
"bootstrap",
".",
"cfg"
] | def get_bootstrap_assets(self, platform=None):
"""
:param self:
:param platform: optional, defaults to current build's platform
:return: Asset type requested for the supplied platform in bootstrap.cfg
"""
project_folder_node = getattr(self, 'srcnode', self.path)
bootstrap_cfg = project_folder_node.make_node('bootstrap.cfg')
bootstrap_contents = bootstrap_cfg.read()
assets = 'pc'
game_platform = self.get_game_platform(platform)
try:
assets = re.search('^\s*assets\s*=\s*(\w+)', bootstrap_contents, re.MULTILINE).group(1)
assets = re.search('^\s*%s_assets\s*=\s*(\w+)' % (game_platform), bootstrap_contents, re.MULTILINE).group(1)
except:
pass
return assets | [
"def",
"get_bootstrap_assets",
"(",
"self",
",",
"platform",
"=",
"None",
")",
":",
"project_folder_node",
"=",
"getattr",
"(",
"self",
",",
"'srcnode'",
",",
"self",
".",
"path",
")",
"bootstrap_cfg",
"=",
"project_folder_node",
".",
"make_node",
"(",
"'bootstrap.cfg'",
")",
"bootstrap_contents",
"=",
"bootstrap_cfg",
".",
"read",
"(",
")",
"assets",
"=",
"'pc'",
"game_platform",
"=",
"self",
".",
"get_game_platform",
"(",
"platform",
")",
"try",
":",
"assets",
"=",
"re",
".",
"search",
"(",
"'^\\s*assets\\s*=\\s*(\\w+)'",
",",
"bootstrap_contents",
",",
"re",
".",
"MULTILINE",
")",
".",
"group",
"(",
"1",
")",
"assets",
"=",
"re",
".",
"search",
"(",
"'^\\s*%s_assets\\s*=\\s*(\\w+)'",
"%",
"(",
"game_platform",
")",
",",
"bootstrap_contents",
",",
"re",
".",
"MULTILINE",
")",
".",
"group",
"(",
"1",
")",
"except",
":",
"pass",
"return",
"assets"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/build/waf-1.7.13/lmbrwaflib/project_settings.py#L688-L707 | |
bairdzhang/smallhardface | 76fa1d87a9602d9b13d7a7fe693fc7aec91cab80 | caffe/scripts/cpp_lint.py | python | CleanseRawStrings | (raw_lines) | return lines_without_raw_strings | Removes C++11 raw strings from lines.
Before:
static const char kData[] = R"(
multi-line string
)";
After:
static const char kData[] = ""
(replaced by blank line)
"";
Args:
raw_lines: list of raw lines.
Returns:
list of lines with C++11 raw strings replaced by empty strings. | Removes C++11 raw strings from lines. | [
"Removes",
"C",
"++",
"11",
"raw",
"strings",
"from",
"lines",
"."
] | def CleanseRawStrings(raw_lines):
"""Removes C++11 raw strings from lines.
Before:
static const char kData[] = R"(
multi-line string
)";
After:
static const char kData[] = ""
(replaced by blank line)
"";
Args:
raw_lines: list of raw lines.
Returns:
list of lines with C++11 raw strings replaced by empty strings.
"""
delimiter = None
lines_without_raw_strings = []
for line in raw_lines:
if delimiter:
# Inside a raw string, look for the end
end = line.find(delimiter)
if end >= 0:
# Found the end of the string, match leading space for this
# line and resume copying the original lines, and also insert
# a "" on the last line.
leading_space = Match(r'^(\s*)\S', line)
line = leading_space.group(1) + '""' + line[end + len(delimiter):]
delimiter = None
else:
# Haven't found the end yet, append a blank line.
line = ''
else:
# Look for beginning of a raw string.
# See 2.14.15 [lex.string] for syntax.
matched = Match(r'^(.*)\b(?:R|u8R|uR|UR|LR)"([^\s\\()]*)\((.*)$', line)
if matched:
delimiter = ')' + matched.group(2) + '"'
end = matched.group(3).find(delimiter)
if end >= 0:
# Raw string ended on same line
line = (matched.group(1) + '""' +
matched.group(3)[end + len(delimiter):])
delimiter = None
else:
# Start of a multi-line raw string
line = matched.group(1) + '""'
lines_without_raw_strings.append(line)
# TODO(unknown): if delimiter is not None here, we might want to
# emit a warning for unterminated string.
return lines_without_raw_strings | [
"def",
"CleanseRawStrings",
"(",
"raw_lines",
")",
":",
"delimiter",
"=",
"None",
"lines_without_raw_strings",
"=",
"[",
"]",
"for",
"line",
"in",
"raw_lines",
":",
"if",
"delimiter",
":",
"# Inside a raw string, look for the end",
"end",
"=",
"line",
".",
"find",
"(",
"delimiter",
")",
"if",
"end",
">=",
"0",
":",
"# Found the end of the string, match leading space for this",
"# line and resume copying the original lines, and also insert",
"# a \"\" on the last line.",
"leading_space",
"=",
"Match",
"(",
"r'^(\\s*)\\S'",
",",
"line",
")",
"line",
"=",
"leading_space",
".",
"group",
"(",
"1",
")",
"+",
"'\"\"'",
"+",
"line",
"[",
"end",
"+",
"len",
"(",
"delimiter",
")",
":",
"]",
"delimiter",
"=",
"None",
"else",
":",
"# Haven't found the end yet, append a blank line.",
"line",
"=",
"''",
"else",
":",
"# Look for beginning of a raw string.",
"# See 2.14.15 [lex.string] for syntax.",
"matched",
"=",
"Match",
"(",
"r'^(.*)\\b(?:R|u8R|uR|UR|LR)\"([^\\s\\\\()]*)\\((.*)$'",
",",
"line",
")",
"if",
"matched",
":",
"delimiter",
"=",
"')'",
"+",
"matched",
".",
"group",
"(",
"2",
")",
"+",
"'\"'",
"end",
"=",
"matched",
".",
"group",
"(",
"3",
")",
".",
"find",
"(",
"delimiter",
")",
"if",
"end",
">=",
"0",
":",
"# Raw string ended on same line",
"line",
"=",
"(",
"matched",
".",
"group",
"(",
"1",
")",
"+",
"'\"\"'",
"+",
"matched",
".",
"group",
"(",
"3",
")",
"[",
"end",
"+",
"len",
"(",
"delimiter",
")",
":",
"]",
")",
"delimiter",
"=",
"None",
"else",
":",
"# Start of a multi-line raw string",
"line",
"=",
"matched",
".",
"group",
"(",
"1",
")",
"+",
"'\"\"'",
"lines_without_raw_strings",
".",
"append",
"(",
"line",
")",
"# TODO(unknown): if delimiter is not None here, we might want to",
"# emit a warning for unterminated string.",
"return",
"lines_without_raw_strings"
] | https://github.com/bairdzhang/smallhardface/blob/76fa1d87a9602d9b13d7a7fe693fc7aec91cab80/caffe/scripts/cpp_lint.py#L1066-L1124 | |
hpi-xnor/BMXNet-v2 | af2b1859eafc5c721b1397cef02f946aaf2ce20d | python/mxnet/contrib/autograd.py | python | train_section | () | return TrainingStateScope(True) | Returns a training scope context to be used in 'with' statement
and captures training code.
Example::
with autograd.train_section():
y = model(x)
compute_gradient([y])
metric.update(...)
optim.step(...) | Returns a training scope context to be used in 'with' statement
and captures training code. | [
"Returns",
"a",
"training",
"scope",
"context",
"to",
"be",
"used",
"in",
"with",
"statement",
"and",
"captures",
"training",
"code",
"."
] | def train_section():
"""Returns a training scope context to be used in 'with' statement
and captures training code.
Example::
with autograd.train_section():
y = model(x)
compute_gradient([y])
metric.update(...)
optim.step(...)
"""
return TrainingStateScope(True) | [
"def",
"train_section",
"(",
")",
":",
"return",
"TrainingStateScope",
"(",
"True",
")"
] | https://github.com/hpi-xnor/BMXNet-v2/blob/af2b1859eafc5c721b1397cef02f946aaf2ce20d/python/mxnet/contrib/autograd.py#L74-L85 | |
adobe/chromium | cfe5bf0b51b1f6b9fe239c2a3c2f2364da9967d7 | build/android/android_commands.py | python | AndroidCommands.StartActivity | (self, package, activity,
action='android.intent.action.VIEW', data=None,
extras=None, trace_file_name=None) | Starts |package|'s activity on the device.
Args:
package: Name of package to start (e.g. 'com.android.chrome').
activity: Name of activity (e.g. '.Main' or 'com.android.chrome.Main').
data: Data string to pass to activity (e.g. 'http://www.example.com/').
extras: Dict of extras to pass to activity.
trace_file_name: If used, turns on and saves the trace to this file name. | Starts |package|'s activity on the device. | [
"Starts",
"|package|",
"s",
"activity",
"on",
"the",
"device",
"."
] | def StartActivity(self, package, activity,
action='android.intent.action.VIEW', data=None,
extras=None, trace_file_name=None):
"""Starts |package|'s activity on the device.
Args:
package: Name of package to start (e.g. 'com.android.chrome').
activity: Name of activity (e.g. '.Main' or 'com.android.chrome.Main').
data: Data string to pass to activity (e.g. 'http://www.example.com/').
extras: Dict of extras to pass to activity.
trace_file_name: If used, turns on and saves the trace to this file name.
"""
cmd = 'am start -a %s -n %s/%s' % (action, package, activity)
if data:
cmd += ' -d "%s"' % data
if extras:
cmd += ' -e'
for key in extras:
cmd += ' %s %s' % (key, extras[key])
if trace_file_name:
cmd += ' -S -P ' + trace_file_name
self.RunShellCommand(cmd) | [
"def",
"StartActivity",
"(",
"self",
",",
"package",
",",
"activity",
",",
"action",
"=",
"'android.intent.action.VIEW'",
",",
"data",
"=",
"None",
",",
"extras",
"=",
"None",
",",
"trace_file_name",
"=",
"None",
")",
":",
"cmd",
"=",
"'am start -a %s -n %s/%s'",
"%",
"(",
"action",
",",
"package",
",",
"activity",
")",
"if",
"data",
":",
"cmd",
"+=",
"' -d \"%s\"'",
"%",
"data",
"if",
"extras",
":",
"cmd",
"+=",
"' -e'",
"for",
"key",
"in",
"extras",
":",
"cmd",
"+=",
"' %s %s'",
"%",
"(",
"key",
",",
"extras",
"[",
"key",
"]",
")",
"if",
"trace_file_name",
":",
"cmd",
"+=",
"' -S -P '",
"+",
"trace_file_name",
"self",
".",
"RunShellCommand",
"(",
"cmd",
")"
] | https://github.com/adobe/chromium/blob/cfe5bf0b51b1f6b9fe239c2a3c2f2364da9967d7/build/android/android_commands.py#L341-L362 | ||
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Gems/CloudGemMetric/v1/AWS/common-code/Lib/psutil/_pssunos.py | python | users | () | return retlist | Return currently connected users as a list of namedtuples. | Return currently connected users as a list of namedtuples. | [
"Return",
"currently",
"connected",
"users",
"as",
"a",
"list",
"of",
"namedtuples",
"."
] | def users():
"""Return currently connected users as a list of namedtuples."""
retlist = []
rawlist = cext.users()
localhost = (':0.0', ':0')
for item in rawlist:
user, tty, hostname, tstamp, user_process, pid = item
# note: the underlying C function includes entries about
# system boot, run level and others. We might want
# to use them in the future.
if not user_process:
continue
if hostname in localhost:
hostname = 'localhost'
nt = _common.suser(user, tty, hostname, tstamp, pid)
retlist.append(nt)
return retlist | [
"def",
"users",
"(",
")",
":",
"retlist",
"=",
"[",
"]",
"rawlist",
"=",
"cext",
".",
"users",
"(",
")",
"localhost",
"=",
"(",
"':0.0'",
",",
"':0'",
")",
"for",
"item",
"in",
"rawlist",
":",
"user",
",",
"tty",
",",
"hostname",
",",
"tstamp",
",",
"user_process",
",",
"pid",
"=",
"item",
"# note: the underlying C function includes entries about",
"# system boot, run level and others. We might want",
"# to use them in the future.",
"if",
"not",
"user_process",
":",
"continue",
"if",
"hostname",
"in",
"localhost",
":",
"hostname",
"=",
"'localhost'",
"nt",
"=",
"_common",
".",
"suser",
"(",
"user",
",",
"tty",
",",
"hostname",
",",
"tstamp",
",",
"pid",
")",
"retlist",
".",
"append",
"(",
"nt",
")",
"return",
"retlist"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemMetric/v1/AWS/common-code/Lib/psutil/_pssunos.py#L308-L324 | |
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Tools/Python/3.7.10/windows/Lib/site-packages/botocore/docs/__init__.py | python | generate_docs | (root_dir, session) | Generates the reference documentation for botocore
This will go through every available AWS service and output ReSTructured
text files documenting each service.
:param root_dir: The directory to write the reference files to. Each
service's reference documentation is loacated at
root_dir/reference/services/service-name.rst | Generates the reference documentation for botocore | [
"Generates",
"the",
"reference",
"documentation",
"for",
"botocore"
] | def generate_docs(root_dir, session):
"""Generates the reference documentation for botocore
This will go through every available AWS service and output ReSTructured
text files documenting each service.
:param root_dir: The directory to write the reference files to. Each
service's reference documentation is loacated at
root_dir/reference/services/service-name.rst
"""
services_doc_path = os.path.join(root_dir, 'reference', 'services')
if not os.path.exists(services_doc_path):
os.makedirs(services_doc_path)
# Generate reference docs and write them out.
for service_name in session.get_available_services():
docs = ServiceDocumenter(service_name, session).document_service()
service_doc_path = os.path.join(
services_doc_path, service_name + '.rst')
with open(service_doc_path, 'wb') as f:
f.write(docs) | [
"def",
"generate_docs",
"(",
"root_dir",
",",
"session",
")",
":",
"services_doc_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"root_dir",
",",
"'reference'",
",",
"'services'",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"services_doc_path",
")",
":",
"os",
".",
"makedirs",
"(",
"services_doc_path",
")",
"# Generate reference docs and write them out.",
"for",
"service_name",
"in",
"session",
".",
"get_available_services",
"(",
")",
":",
"docs",
"=",
"ServiceDocumenter",
"(",
"service_name",
",",
"session",
")",
".",
"document_service",
"(",
")",
"service_doc_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"services_doc_path",
",",
"service_name",
"+",
"'.rst'",
")",
"with",
"open",
"(",
"service_doc_path",
",",
"'wb'",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"docs",
")"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/windows/Lib/site-packages/botocore/docs/__init__.py#L18-L38 | ||
jubatus/jubatus | 1251ce551bac980488a6313728e72b3fe0b79a9f | tools/codestyle/cpplint/cpplint.py | python | PrintCategories | () | Prints a list of all the error-categories used by error messages.
These are the categories used to filter messages via --filter. | Prints a list of all the error-categories used by error messages. | [
"Prints",
"a",
"list",
"of",
"all",
"the",
"error",
"-",
"categories",
"used",
"by",
"error",
"messages",
"."
] | def PrintCategories():
"""Prints a list of all the error-categories used by error messages.
These are the categories used to filter messages via --filter.
"""
sys.stderr.write(''.join(' %s\n' % cat for cat in _ERROR_CATEGORIES))
sys.exit(0) | [
"def",
"PrintCategories",
"(",
")",
":",
"sys",
".",
"stderr",
".",
"write",
"(",
"''",
".",
"join",
"(",
"' %s\\n'",
"%",
"cat",
"for",
"cat",
"in",
"_ERROR_CATEGORIES",
")",
")",
"sys",
".",
"exit",
"(",
"0",
")"
] | https://github.com/jubatus/jubatus/blob/1251ce551bac980488a6313728e72b3fe0b79a9f/tools/codestyle/cpplint/cpplint.py#L3313-L3319 | ||
neoml-lib/neoml | a0d370fba05269a1b2258cef126f77bbd2054a3e | NeoML/Python/neoml/Dnn/AccumulativeLookup.py | python | AccumulativeLookup.size | (self) | return self._internal.get_size() | Gets the vector length. | Gets the vector length. | [
"Gets",
"the",
"vector",
"length",
"."
] | def size(self):
"""Gets the vector length.
"""
return self._internal.get_size() | [
"def",
"size",
"(",
"self",
")",
":",
"return",
"self",
".",
"_internal",
".",
"get_size",
"(",
")"
] | https://github.com/neoml-lib/neoml/blob/a0d370fba05269a1b2258cef126f77bbd2054a3e/NeoML/Python/neoml/Dnn/AccumulativeLookup.py#L68-L71 | |
hughperkins/tf-coriander | 970d3df6c11400ad68405f22b0c42a52374e94ca | tensorflow/python/framework/docs.py | python | Library._remove_docstring_indent | (self, docstring) | return lines | Remove indenting.
We follow Python's convention and remove the minimum indent of the lines
after the first, see:
https://www.python.org/dev/peps/pep-0257/#handling-docstring-indentation
preserving relative indentation.
Args:
docstring: A docstring.
Returns:
A list of strings, one per line, with the minimum indent stripped. | Remove indenting. | [
"Remove",
"indenting",
"."
] | def _remove_docstring_indent(self, docstring):
"""Remove indenting.
We follow Python's convention and remove the minimum indent of the lines
after the first, see:
https://www.python.org/dev/peps/pep-0257/#handling-docstring-indentation
preserving relative indentation.
Args:
docstring: A docstring.
Returns:
A list of strings, one per line, with the minimum indent stripped.
"""
docstring = docstring or ""
lines = docstring.strip().split("\n")
min_indent = len(docstring)
for l in lines[1:]:
l = l.rstrip()
if l:
i = 0
while i < len(l) and l[i] == " ":
i += 1
if i < min_indent: min_indent = i
for i in range(1, len(lines)):
l = lines[i].rstrip()
if len(l) >= min_indent:
l = l[min_indent:]
lines[i] = l
return lines | [
"def",
"_remove_docstring_indent",
"(",
"self",
",",
"docstring",
")",
":",
"docstring",
"=",
"docstring",
"or",
"\"\"",
"lines",
"=",
"docstring",
".",
"strip",
"(",
")",
".",
"split",
"(",
"\"\\n\"",
")",
"min_indent",
"=",
"len",
"(",
"docstring",
")",
"for",
"l",
"in",
"lines",
"[",
"1",
":",
"]",
":",
"l",
"=",
"l",
".",
"rstrip",
"(",
")",
"if",
"l",
":",
"i",
"=",
"0",
"while",
"i",
"<",
"len",
"(",
"l",
")",
"and",
"l",
"[",
"i",
"]",
"==",
"\" \"",
":",
"i",
"+=",
"1",
"if",
"i",
"<",
"min_indent",
":",
"min_indent",
"=",
"i",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"len",
"(",
"lines",
")",
")",
":",
"l",
"=",
"lines",
"[",
"i",
"]",
".",
"rstrip",
"(",
")",
"if",
"len",
"(",
"l",
")",
">=",
"min_indent",
":",
"l",
"=",
"l",
"[",
"min_indent",
":",
"]",
"lines",
"[",
"i",
"]",
"=",
"l",
"return",
"lines"
] | https://github.com/hughperkins/tf-coriander/blob/970d3df6c11400ad68405f22b0c42a52374e94ca/tensorflow/python/framework/docs.py#L329-L359 | |
Xilinx/Vitis-AI | fc74d404563d9951b57245443c73bef389f3657f | tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/contrib/eager/python/examples/gan/mnist.py | python | Discriminator.call | (self, inputs) | return x | Return two logits per image estimating input authenticity.
Users should invoke __call__ to run the network, which delegates to this
method (and not call this method directly).
Args:
inputs: A batch of images as a Tensor with shape [batch_size, 28, 28, 1]
or [batch_size, 1, 28, 28]
Returns:
A Tensor with shape [batch_size] containing logits estimating
the probability that corresponding digit is real. | Return two logits per image estimating input authenticity. | [
"Return",
"two",
"logits",
"per",
"image",
"estimating",
"input",
"authenticity",
"."
] | def call(self, inputs):
"""Return two logits per image estimating input authenticity.
Users should invoke __call__ to run the network, which delegates to this
method (and not call this method directly).
Args:
inputs: A batch of images as a Tensor with shape [batch_size, 28, 28, 1]
or [batch_size, 1, 28, 28]
Returns:
A Tensor with shape [batch_size] containing logits estimating
the probability that corresponding digit is real.
"""
x = tf.reshape(inputs, self._input_shape)
x = self.conv1(x)
x = self.pool1(x)
x = self.conv2(x)
x = self.pool2(x)
x = self.flatten(x)
x = self.fc1(x)
x = self.fc2(x)
return x | [
"def",
"call",
"(",
"self",
",",
"inputs",
")",
":",
"x",
"=",
"tf",
".",
"reshape",
"(",
"inputs",
",",
"self",
".",
"_input_shape",
")",
"x",
"=",
"self",
".",
"conv1",
"(",
"x",
")",
"x",
"=",
"self",
".",
"pool1",
"(",
"x",
")",
"x",
"=",
"self",
".",
"conv2",
"(",
"x",
")",
"x",
"=",
"self",
".",
"pool2",
"(",
"x",
")",
"x",
"=",
"self",
".",
"flatten",
"(",
"x",
")",
"x",
"=",
"self",
".",
"fc1",
"(",
"x",
")",
"x",
"=",
"self",
".",
"fc2",
"(",
"x",
")",
"return",
"x"
] | https://github.com/Xilinx/Vitis-AI/blob/fc74d404563d9951b57245443c73bef389f3657f/tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/contrib/eager/python/examples/gan/mnist.py#L69-L91 | |
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | wx/tools/Editra/src/ebmlib/searcheng.py | python | SearchEngine.SearchInBuffer | (self, sbuffer) | Search in the buffer
@param sbuffer: buffer like object
@todo: implement | Search in the buffer
@param sbuffer: buffer like object
@todo: implement | [
"Search",
"in",
"the",
"buffer",
"@param",
"sbuffer",
":",
"buffer",
"like",
"object",
"@todo",
":",
"implement"
] | def SearchInBuffer(self, sbuffer):
"""Search in the buffer
@param sbuffer: buffer like object
@todo: implement
"""
raise NotImplementedError | [
"def",
"SearchInBuffer",
"(",
"self",
",",
"sbuffer",
")",
":",
"raise",
"NotImplementedError"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/wx/tools/Editra/src/ebmlib/searcheng.py#L263-L269 | ||
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/threading.py | python | Thread._set_tstate_lock | (self) | Set a lock object which will be released by the interpreter when
the underlying thread state (see pystate.h) gets deleted. | Set a lock object which will be released by the interpreter when
the underlying thread state (see pystate.h) gets deleted. | [
"Set",
"a",
"lock",
"object",
"which",
"will",
"be",
"released",
"by",
"the",
"interpreter",
"when",
"the",
"underlying",
"thread",
"state",
"(",
"see",
"pystate",
".",
"h",
")",
"gets",
"deleted",
"."
] | def _set_tstate_lock(self):
"""
Set a lock object which will be released by the interpreter when
the underlying thread state (see pystate.h) gets deleted.
"""
self._tstate_lock = _set_sentinel()
self._tstate_lock.acquire()
if not self.daemon:
with _shutdown_locks_lock:
_shutdown_locks.add(self._tstate_lock) | [
"def",
"_set_tstate_lock",
"(",
"self",
")",
":",
"self",
".",
"_tstate_lock",
"=",
"_set_sentinel",
"(",
")",
"self",
".",
"_tstate_lock",
".",
"acquire",
"(",
")",
"if",
"not",
"self",
".",
"daemon",
":",
"with",
"_shutdown_locks_lock",
":",
"_shutdown_locks",
".",
"add",
"(",
"self",
".",
"_tstate_lock",
")"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/threading.py#L899-L909 | ||
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Code/Tools/AzCodeGenerator/Scripts/az_code_gen/clang_cpp.py | python | expand_annotations | (source_dictionary) | Takes a partially extracted JSON tree generated by C++ and parses
the annotations fields, expanding them into python dictionary
trees.
@param source_dictionary - The dictionary containing the annotation
fields to expand. | Takes a partially extracted JSON tree generated by C++ and parses
the annotations fields, expanding them into python dictionary
trees. | [
"Takes",
"a",
"partially",
"extracted",
"JSON",
"tree",
"generated",
"by",
"C",
"++",
"and",
"parses",
"the",
"annotations",
"fields",
"expanding",
"them",
"into",
"python",
"dictionary",
"trees",
"."
] | def expand_annotations(source_dictionary):
"""Takes a partially extracted JSON tree generated by C++ and parses
the annotations fields, expanding them into python dictionary
trees.
@param source_dictionary - The dictionary containing the annotation
fields to expand.
"""
def expand_and_store_annotation(dest, key, tag, value):
# extract any template params if they exist
match = TEMPLATE_TAG_PATTERN.match(tag)
template_params = None
if match:
tag = match.group('tag')
template_params = match.group('template_params')
if template_params: # if there are template params, nest them
value = { 'params': value, 'template_params': re.split('[\s,]+', template_params) }
store_annotation(dest, tag, value)
annotations = {}
for annotation_key, annotation_value in source_dictionary['annotations'].items():
for attribute_name, attribute_value in annotation_value.items():
# The tree returned might be collapsible into a list
# or a string. Check and perform the appropriate adjustment.
result = build_tree_from_string(attribute_value)
if not isinstance(result, list):
if is_simple_string(result):
result = convert_key_to_string(result)
elif is_list(result):
result = convert_keys_to_list(result)
elif result is None: # tags with no arguments default to a true value
result = "true"
expand_and_store_annotation(annotations, annotation_key, attribute_name, result)
# update annotations with converted hierarchy
source_dictionary['annotations'] = annotations | [
"def",
"expand_annotations",
"(",
"source_dictionary",
")",
":",
"def",
"expand_and_store_annotation",
"(",
"dest",
",",
"key",
",",
"tag",
",",
"value",
")",
":",
"# extract any template params if they exist",
"match",
"=",
"TEMPLATE_TAG_PATTERN",
".",
"match",
"(",
"tag",
")",
"template_params",
"=",
"None",
"if",
"match",
":",
"tag",
"=",
"match",
".",
"group",
"(",
"'tag'",
")",
"template_params",
"=",
"match",
".",
"group",
"(",
"'template_params'",
")",
"if",
"template_params",
":",
"# if there are template params, nest them",
"value",
"=",
"{",
"'params'",
":",
"value",
",",
"'template_params'",
":",
"re",
".",
"split",
"(",
"'[\\s,]+'",
",",
"template_params",
")",
"}",
"store_annotation",
"(",
"dest",
",",
"tag",
",",
"value",
")",
"annotations",
"=",
"{",
"}",
"for",
"annotation_key",
",",
"annotation_value",
"in",
"source_dictionary",
"[",
"'annotations'",
"]",
".",
"items",
"(",
")",
":",
"for",
"attribute_name",
",",
"attribute_value",
"in",
"annotation_value",
".",
"items",
"(",
")",
":",
"# The tree returned might be collapsible into a list",
"# or a string. Check and perform the appropriate adjustment.",
"result",
"=",
"build_tree_from_string",
"(",
"attribute_value",
")",
"if",
"not",
"isinstance",
"(",
"result",
",",
"list",
")",
":",
"if",
"is_simple_string",
"(",
"result",
")",
":",
"result",
"=",
"convert_key_to_string",
"(",
"result",
")",
"elif",
"is_list",
"(",
"result",
")",
":",
"result",
"=",
"convert_keys_to_list",
"(",
"result",
")",
"elif",
"result",
"is",
"None",
":",
"# tags with no arguments default to a true value",
"result",
"=",
"\"true\"",
"expand_and_store_annotation",
"(",
"annotations",
",",
"annotation_key",
",",
"attribute_name",
",",
"result",
")",
"# update annotations with converted hierarchy",
"source_dictionary",
"[",
"'annotations'",
"]",
"=",
"annotations"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Code/Tools/AzCodeGenerator/Scripts/az_code_gen/clang_cpp.py#L200-L233 | ||
windystrife/UnrealEngine_NVIDIAGameWorks | b50e6338a7c5b26374d66306ebc7807541ff815e | Engine/Source/ThirdParty/CEF3/cef_source/tools/cef_parser.py | python | obj_class.get_analysis | (self, value, named = True) | return obj_analysis([self, self.parent], value, named) | Return an analysis of the value based on the class definition
context. | Return an analysis of the value based on the class definition
context. | [
"Return",
"an",
"analysis",
"of",
"the",
"value",
"based",
"on",
"the",
"class",
"definition",
"context",
"."
] | def get_analysis(self, value, named = True):
""" Return an analysis of the value based on the class definition
context.
"""
return obj_analysis([self, self.parent], value, named) | [
"def",
"get_analysis",
"(",
"self",
",",
"value",
",",
"named",
"=",
"True",
")",
":",
"return",
"obj_analysis",
"(",
"[",
"self",
",",
"self",
".",
"parent",
"]",
",",
"value",
",",
"named",
")"
] | https://github.com/windystrife/UnrealEngine_NVIDIAGameWorks/blob/b50e6338a7c5b26374d66306ebc7807541ff815e/Engine/Source/ThirdParty/CEF3/cef_source/tools/cef_parser.py#L977-L981 | |
luliyucoordinate/Leetcode | 96afcdc54807d1d184e881a075d1dbf3371e31fb | src/0071-Simplify-Path/0071.py | python | Solution.simplifyPath | (self, path) | return '/'+'/'.join(stack) | :type path: str
:rtype: str | :type path: str
:rtype: str | [
":",
"type",
"path",
":",
"str",
":",
"rtype",
":",
"str"
] | def simplifyPath(self, path):
"""
:type path: str
:rtype: str
"""
stack = list()
path = [p for p in path.split('/') if p]
for f in path:
if f == '.':
continue
elif f == '..':
if stack:
stack.pop()
else:
stack.append(f)
return '/'+'/'.join(stack) | [
"def",
"simplifyPath",
"(",
"self",
",",
"path",
")",
":",
"stack",
"=",
"list",
"(",
")",
"path",
"=",
"[",
"p",
"for",
"p",
"in",
"path",
".",
"split",
"(",
"'/'",
")",
"if",
"p",
"]",
"for",
"f",
"in",
"path",
":",
"if",
"f",
"==",
"'.'",
":",
"continue",
"elif",
"f",
"==",
"'..'",
":",
"if",
"stack",
":",
"stack",
".",
"pop",
"(",
")",
"else",
":",
"stack",
".",
"append",
"(",
"f",
")",
"return",
"'/'",
"+",
"'/'",
".",
"join",
"(",
"stack",
")"
] | https://github.com/luliyucoordinate/Leetcode/blob/96afcdc54807d1d184e881a075d1dbf3371e31fb/src/0071-Simplify-Path/0071.py#L2-L18 | |
SpenceKonde/megaTinyCore | 1c4a70b18a149fe6bcb551dfa6db11ca50b8997b | megaavr/tools/libs/yaml/scanner.py | python | Scanner.__init__ | (self) | Initialize the scanner. | Initialize the scanner. | [
"Initialize",
"the",
"scanner",
"."
] | def __init__(self):
"""Initialize the scanner."""
# It is assumed that Scanner and Reader will have a common descendant.
# Reader do the dirty work of checking for BOM and converting the
# input data to Unicode. It also adds NUL to the end.
#
# Reader supports the following methods
# self.peek(i=0) # peek the next i-th character
# self.prefix(l=1) # peek the next l characters
# self.forward(l=1) # read the next l characters and move the pointer.
# Had we reached the end of the stream?
self.done = False
# The number of unclosed '{' and '['. `flow_level == 0` means block
# context.
self.flow_level = 0
# List of processed tokens that are not yet emitted.
self.tokens = []
# Add the STREAM-START token.
self.fetch_stream_start()
# Number of tokens that were emitted through the `get_token` method.
self.tokens_taken = 0
# The current indentation level.
self.indent = -1
# Past indentation levels.
self.indents = []
# Variables related to simple keys treatment.
# A simple key is a key that is not denoted by the '?' indicator.
# Example of simple keys:
# ---
# block simple key: value
# ? not a simple key:
# : { flow simple key: value }
# We emit the KEY token before all keys, so when we find a potential
# simple key, we try to locate the corresponding ':' indicator.
# Simple keys should be limited to a single line and 1024 characters.
# Can a simple key start at the current position? A simple key may
# start:
# - at the beginning of the line, not counting indentation spaces
# (in block context),
# - after '{', '[', ',' (in the flow context),
# - after '?', ':', '-' (in the block context).
# In the block context, this flag also signifies if a block collection
# may start at the current position.
self.allow_simple_key = True
# Keep track of possible simple keys. This is a dictionary. The key
# is `flow_level`; there can be no more that one possible simple key
# for each level. The value is a SimpleKey record:
# (token_number, required, index, line, column, mark)
# A simple key may start with ALIAS, ANCHOR, TAG, SCALAR(flow),
# '[', or '{' tokens.
self.possible_simple_keys = {} | [
"def",
"__init__",
"(",
"self",
")",
":",
"# It is assumed that Scanner and Reader will have a common descendant.",
"# Reader do the dirty work of checking for BOM and converting the",
"# input data to Unicode. It also adds NUL to the end.",
"#",
"# Reader supports the following methods",
"# self.peek(i=0) # peek the next i-th character",
"# self.prefix(l=1) # peek the next l characters",
"# self.forward(l=1) # read the next l characters and move the pointer.",
"# Had we reached the end of the stream?",
"self",
".",
"done",
"=",
"False",
"# The number of unclosed '{' and '['. `flow_level == 0` means block",
"# context.",
"self",
".",
"flow_level",
"=",
"0",
"# List of processed tokens that are not yet emitted.",
"self",
".",
"tokens",
"=",
"[",
"]",
"# Add the STREAM-START token.",
"self",
".",
"fetch_stream_start",
"(",
")",
"# Number of tokens that were emitted through the `get_token` method.",
"self",
".",
"tokens_taken",
"=",
"0",
"# The current indentation level.",
"self",
".",
"indent",
"=",
"-",
"1",
"# Past indentation levels.",
"self",
".",
"indents",
"=",
"[",
"]",
"# Variables related to simple keys treatment.",
"# A simple key is a key that is not denoted by the '?' indicator.",
"# Example of simple keys:",
"# ---",
"# block simple key: value",
"# ? not a simple key:",
"# : { flow simple key: value }",
"# We emit the KEY token before all keys, so when we find a potential",
"# simple key, we try to locate the corresponding ':' indicator.",
"# Simple keys should be limited to a single line and 1024 characters.",
"# Can a simple key start at the current position? A simple key may",
"# start:",
"# - at the beginning of the line, not counting indentation spaces",
"# (in block context),",
"# - after '{', '[', ',' (in the flow context),",
"# - after '?', ':', '-' (in the block context).",
"# In the block context, this flag also signifies if a block collection",
"# may start at the current position.",
"self",
".",
"allow_simple_key",
"=",
"True",
"# Keep track of possible simple keys. This is a dictionary. The key",
"# is `flow_level`; there can be no more that one possible simple key",
"# for each level. The value is a SimpleKey record:",
"# (token_number, required, index, line, column, mark)",
"# A simple key may start with ALIAS, ANCHOR, TAG, SCALAR(flow),",
"# '[', or '{' tokens.",
"self",
".",
"possible_simple_keys",
"=",
"{",
"}"
] | https://github.com/SpenceKonde/megaTinyCore/blob/1c4a70b18a149fe6bcb551dfa6db11ca50b8997b/megaavr/tools/libs/yaml/scanner.py#L48-L109 | ||
raymondlu/super-animation-samples | 04234269112ff0dc32447f27a761dbbb00b8ba17 | samples/cocos2d-x-3.1/CocosLuaGame2/frameworks/cocos2d-x/tools/bindings-generator/backup/clang-llvm-3.3-pybinding/cindex.py | python | Diagnostic.option | (self) | return conf.lib.clang_getDiagnosticOption(self, None) | The command-line option that enables this diagnostic. | The command-line option that enables this diagnostic. | [
"The",
"command",
"-",
"line",
"option",
"that",
"enables",
"this",
"diagnostic",
"."
] | def option(self):
"""The command-line option that enables this diagnostic."""
return conf.lib.clang_getDiagnosticOption(self, None) | [
"def",
"option",
"(",
"self",
")",
":",
"return",
"conf",
".",
"lib",
".",
"clang_getDiagnosticOption",
"(",
"self",
",",
"None",
")"
] | https://github.com/raymondlu/super-animation-samples/blob/04234269112ff0dc32447f27a761dbbb00b8ba17/samples/cocos2d-x-3.1/CocosLuaGame2/frameworks/cocos2d-x/tools/bindings-generator/backup/clang-llvm-3.3-pybinding/cindex.py#L350-L352 | |
generalized-intelligence/GAAS | 29ab17d3e8a4ba18edef3a57c36d8db6329fac73 | algorithms/src/LocalizationAndMapping/icp_lidar_localization/fast_gicp/thirdparty/Sophus/py/sophus/so2.py | python | So2.matrix | (self) | return sympy.Matrix([
[self.z.real, -self.z.imag],
[self.z.imag, self.z.real]]) | returns matrix representation | returns matrix representation | [
"returns",
"matrix",
"representation"
] | def matrix(self):
""" returns matrix representation """
return sympy.Matrix([
[self.z.real, -self.z.imag],
[self.z.imag, self.z.real]]) | [
"def",
"matrix",
"(",
"self",
")",
":",
"return",
"sympy",
".",
"Matrix",
"(",
"[",
"[",
"self",
".",
"z",
".",
"real",
",",
"-",
"self",
".",
"z",
".",
"imag",
"]",
",",
"[",
"self",
".",
"z",
".",
"imag",
",",
"self",
".",
"z",
".",
"real",
"]",
"]",
")"
] | https://github.com/generalized-intelligence/GAAS/blob/29ab17d3e8a4ba18edef3a57c36d8db6329fac73/algorithms/src/LocalizationAndMapping/icp_lidar_localization/fast_gicp/thirdparty/Sophus/py/sophus/so2.py#L35-L39 | |
openvinotoolkit/openvino | dedcbeafa8b84cccdc55ca64b8da516682b381c7 | samples/python/speech_sample/utils.py | python | set_scale_factors | (plugin_config: dict, scale_factors: list) | Set a scale factor provided for each input | Set a scale factor provided for each input | [
"Set",
"a",
"scale",
"factor",
"provided",
"for",
"each",
"input"
] | def set_scale_factors(plugin_config: dict, scale_factors: list):
"""Set a scale factor provided for each input"""
for i, scale_factor in enumerate(scale_factors):
log.info(f'For input {i} using scale factor of {scale_factor:.7f}')
plugin_config[f'GNA_SCALE_FACTOR_{i}'] = str(scale_factor) | [
"def",
"set_scale_factors",
"(",
"plugin_config",
":",
"dict",
",",
"scale_factors",
":",
"list",
")",
":",
"for",
"i",
",",
"scale_factor",
"in",
"enumerate",
"(",
"scale_factors",
")",
":",
"log",
".",
"info",
"(",
"f'For input {i} using scale factor of {scale_factor:.7f}'",
")",
"plugin_config",
"[",
"f'GNA_SCALE_FACTOR_{i}'",
"]",
"=",
"str",
"(",
"scale_factor",
")"
] | https://github.com/openvinotoolkit/openvino/blob/dedcbeafa8b84cccdc55ca64b8da516682b381c7/samples/python/speech_sample/utils.py#L47-L51 | ||
mapnik/mapnik | f3da900c355e1d15059c4a91b00203dcc9d9f0ef | scons/scons-local-4.1.0/SCons/Tool/mslink.py | python | _dllEmitter | (target, source, env, paramtp) | return (target+extratargets, source+extrasources) | Common implementation of dll emitter. | Common implementation of dll emitter. | [
"Common",
"implementation",
"of",
"dll",
"emitter",
"."
] | def _dllEmitter(target, source, env, paramtp):
"""Common implementation of dll emitter."""
SCons.Tool.msvc.validate_vars(env)
extratargets = []
extrasources = []
dll = env.FindIxes(target, '%sPREFIX' % paramtp, '%sSUFFIX' % paramtp)
no_import_lib = env.get('no_import_lib', 0)
if not dll:
raise SCons.Errors.UserError('A shared library should have exactly one target with the suffix: %s' % env.subst('$%sSUFFIX' % paramtp))
insert_def = env.subst("$WINDOWS_INSERT_DEF")
if insert_def not in ['', '0', 0] and \
not env.FindIxes(source, "WINDOWSDEFPREFIX", "WINDOWSDEFSUFFIX"):
# append a def file to the list of sources
extrasources.append(
env.ReplaceIxes(dll,
'%sPREFIX' % paramtp, '%sSUFFIX' % paramtp,
"WINDOWSDEFPREFIX", "WINDOWSDEFSUFFIX"))
version_num, suite = SCons.Tool.msvs.msvs_parse_version(env.get('MSVS_VERSION', '6.0'))
if version_num >= 8.0 and \
(env.get('WINDOWS_INSERT_MANIFEST', 0) or env.get('WINDOWS_EMBED_MANIFEST', 0)):
# MSVC 8 and above automatically generate .manifest files that must be installed
extratargets.append(
env.ReplaceIxes(dll,
'%sPREFIX' % paramtp, '%sSUFFIX' % paramtp,
"WINDOWSSHLIBMANIFESTPREFIX", "WINDOWSSHLIBMANIFESTSUFFIX"))
if 'PDB' in env and env['PDB']:
pdb = env.arg2nodes('$PDB', target=target, source=source)[0]
extratargets.append(pdb)
target[0].attributes.pdb = pdb
if version_num >= 11.0 and env.get('PCH', 0):
# MSVC 11 and above need the PCH object file to be added to the link line,
# otherwise you get link error LNK2011.
pchobj = SCons.Util.splitext(str(env['PCH']))[0] + '.obj'
# print "prog_emitter, version %s, appending pchobj %s"%(version_num, pchobj)
if pchobj not in extrasources:
extrasources.append(pchobj)
if not no_import_lib and \
not env.FindIxes(target, "LIBPREFIX", "LIBSUFFIX"):
# Append an import library to the list of targets.
extratargets.append(
env.ReplaceIxes(dll,
'%sPREFIX' % paramtp, '%sSUFFIX' % paramtp,
"LIBPREFIX", "LIBSUFFIX"))
# and .exp file is created if there are exports from a DLL
extratargets.append(
env.ReplaceIxes(dll,
'%sPREFIX' % paramtp, '%sSUFFIX' % paramtp,
"WINDOWSEXPPREFIX", "WINDOWSEXPSUFFIX"))
return (target+extratargets, source+extrasources) | [
"def",
"_dllEmitter",
"(",
"target",
",",
"source",
",",
"env",
",",
"paramtp",
")",
":",
"SCons",
".",
"Tool",
".",
"msvc",
".",
"validate_vars",
"(",
"env",
")",
"extratargets",
"=",
"[",
"]",
"extrasources",
"=",
"[",
"]",
"dll",
"=",
"env",
".",
"FindIxes",
"(",
"target",
",",
"'%sPREFIX'",
"%",
"paramtp",
",",
"'%sSUFFIX'",
"%",
"paramtp",
")",
"no_import_lib",
"=",
"env",
".",
"get",
"(",
"'no_import_lib'",
",",
"0",
")",
"if",
"not",
"dll",
":",
"raise",
"SCons",
".",
"Errors",
".",
"UserError",
"(",
"'A shared library should have exactly one target with the suffix: %s'",
"%",
"env",
".",
"subst",
"(",
"'$%sSUFFIX'",
"%",
"paramtp",
")",
")",
"insert_def",
"=",
"env",
".",
"subst",
"(",
"\"$WINDOWS_INSERT_DEF\"",
")",
"if",
"insert_def",
"not",
"in",
"[",
"''",
",",
"'0'",
",",
"0",
"]",
"and",
"not",
"env",
".",
"FindIxes",
"(",
"source",
",",
"\"WINDOWSDEFPREFIX\"",
",",
"\"WINDOWSDEFSUFFIX\"",
")",
":",
"# append a def file to the list of sources",
"extrasources",
".",
"append",
"(",
"env",
".",
"ReplaceIxes",
"(",
"dll",
",",
"'%sPREFIX'",
"%",
"paramtp",
",",
"'%sSUFFIX'",
"%",
"paramtp",
",",
"\"WINDOWSDEFPREFIX\"",
",",
"\"WINDOWSDEFSUFFIX\"",
")",
")",
"version_num",
",",
"suite",
"=",
"SCons",
".",
"Tool",
".",
"msvs",
".",
"msvs_parse_version",
"(",
"env",
".",
"get",
"(",
"'MSVS_VERSION'",
",",
"'6.0'",
")",
")",
"if",
"version_num",
">=",
"8.0",
"and",
"(",
"env",
".",
"get",
"(",
"'WINDOWS_INSERT_MANIFEST'",
",",
"0",
")",
"or",
"env",
".",
"get",
"(",
"'WINDOWS_EMBED_MANIFEST'",
",",
"0",
")",
")",
":",
"# MSVC 8 and above automatically generate .manifest files that must be installed",
"extratargets",
".",
"append",
"(",
"env",
".",
"ReplaceIxes",
"(",
"dll",
",",
"'%sPREFIX'",
"%",
"paramtp",
",",
"'%sSUFFIX'",
"%",
"paramtp",
",",
"\"WINDOWSSHLIBMANIFESTPREFIX\"",
",",
"\"WINDOWSSHLIBMANIFESTSUFFIX\"",
")",
")",
"if",
"'PDB'",
"in",
"env",
"and",
"env",
"[",
"'PDB'",
"]",
":",
"pdb",
"=",
"env",
".",
"arg2nodes",
"(",
"'$PDB'",
",",
"target",
"=",
"target",
",",
"source",
"=",
"source",
")",
"[",
"0",
"]",
"extratargets",
".",
"append",
"(",
"pdb",
")",
"target",
"[",
"0",
"]",
".",
"attributes",
".",
"pdb",
"=",
"pdb",
"if",
"version_num",
">=",
"11.0",
"and",
"env",
".",
"get",
"(",
"'PCH'",
",",
"0",
")",
":",
"# MSVC 11 and above need the PCH object file to be added to the link line,",
"# otherwise you get link error LNK2011.",
"pchobj",
"=",
"SCons",
".",
"Util",
".",
"splitext",
"(",
"str",
"(",
"env",
"[",
"'PCH'",
"]",
")",
")",
"[",
"0",
"]",
"+",
"'.obj'",
"# print \"prog_emitter, version %s, appending pchobj %s\"%(version_num, pchobj)",
"if",
"pchobj",
"not",
"in",
"extrasources",
":",
"extrasources",
".",
"append",
"(",
"pchobj",
")",
"if",
"not",
"no_import_lib",
"and",
"not",
"env",
".",
"FindIxes",
"(",
"target",
",",
"\"LIBPREFIX\"",
",",
"\"LIBSUFFIX\"",
")",
":",
"# Append an import library to the list of targets.",
"extratargets",
".",
"append",
"(",
"env",
".",
"ReplaceIxes",
"(",
"dll",
",",
"'%sPREFIX'",
"%",
"paramtp",
",",
"'%sSUFFIX'",
"%",
"paramtp",
",",
"\"LIBPREFIX\"",
",",
"\"LIBSUFFIX\"",
")",
")",
"# and .exp file is created if there are exports from a DLL",
"extratargets",
".",
"append",
"(",
"env",
".",
"ReplaceIxes",
"(",
"dll",
",",
"'%sPREFIX'",
"%",
"paramtp",
",",
"'%sSUFFIX'",
"%",
"paramtp",
",",
"\"WINDOWSEXPPREFIX\"",
",",
"\"WINDOWSEXPSUFFIX\"",
")",
")",
"return",
"(",
"target",
"+",
"extratargets",
",",
"source",
"+",
"extrasources",
")"
] | https://github.com/mapnik/mapnik/blob/f3da900c355e1d15059c4a91b00203dcc9d9f0ef/scons/scons-local-4.1.0/SCons/Tool/mslink.py#L92-L150 | |
apple/turicreate | cce55aa5311300e3ce6af93cb45ba791fd1bdf49 | src/python/turicreate/util/_cloudpickle/_cloudpickle.py | python | cell_set | (cell, value) | Set the value of a closure cell.
The point of this function is to set the cell_contents attribute of a cell
after its creation. This operation is necessary in case the cell contains a
reference to the function the cell belongs to, as when calling the
function's constructor
``f = types.FunctionType(code, globals, name, argdefs, closure)``,
closure will not be able to contain the yet-to-be-created f.
In Python3.7, cell_contents is writeable, so setting the contents of a cell
can be done simply using
>>> cell.cell_contents = value
In earlier Python3 versions, the cell_contents attribute of a cell is read
only, but this limitation can be worked around by leveraging the Python 3
``nonlocal`` keyword.
In Python2 however, this attribute is read only, and there is no
``nonlocal`` keyword. For this reason, we need to come up with more
complicated hacks to set this attribute.
The chosen approach is to create a function with a STORE_DEREF opcode,
which sets the content of a closure variable. Typically:
>>> def inner(value):
... lambda: cell # the lambda makes cell a closure
... cell = value # cell is a closure, so this triggers a STORE_DEREF
(Note that in Python2, A STORE_DEREF can never be triggered from an inner
function. The function g for example here
>>> def f(var):
... def g():
... var += 1
... return g
will not modify the closure variable ``var```inplace, but instead try to
load a local variable var and increment it. As g does not assign the local
variable ``var`` any initial value, calling f(1)() will fail at runtime.)
Our objective is to set the value of a given cell ``cell``. So we need to
somewhat reference our ``cell`` object into the ``inner`` function so that
this object (and not the smoke cell of the lambda function) gets affected
by the STORE_DEREF operation.
In inner, ``cell`` is referenced as a cell variable (an enclosing variable
that is referenced by the inner function). If we create a new function
cell_set with the exact same code as ``inner``, but with ``cell`` marked as
a free variable instead, the STORE_DEREF will be applied on its closure -
``cell``, which we can specify explicitly during construction! The new
cell_set variable thus actually sets the contents of a specified cell!
Note: we do not make use of the ``nonlocal`` keyword to set the contents of
a cell in early python3 versions to limit possible syntax errors in case
test and checker libraries decide to parse the whole file. | Set the value of a closure cell. | [
"Set",
"the",
"value",
"of",
"a",
"closure",
"cell",
"."
] | def cell_set(cell, value):
"""Set the value of a closure cell.
The point of this function is to set the cell_contents attribute of a cell
after its creation. This operation is necessary in case the cell contains a
reference to the function the cell belongs to, as when calling the
function's constructor
``f = types.FunctionType(code, globals, name, argdefs, closure)``,
closure will not be able to contain the yet-to-be-created f.
In Python3.7, cell_contents is writeable, so setting the contents of a cell
can be done simply using
>>> cell.cell_contents = value
In earlier Python3 versions, the cell_contents attribute of a cell is read
only, but this limitation can be worked around by leveraging the Python 3
``nonlocal`` keyword.
In Python2 however, this attribute is read only, and there is no
``nonlocal`` keyword. For this reason, we need to come up with more
complicated hacks to set this attribute.
The chosen approach is to create a function with a STORE_DEREF opcode,
which sets the content of a closure variable. Typically:
>>> def inner(value):
... lambda: cell # the lambda makes cell a closure
... cell = value # cell is a closure, so this triggers a STORE_DEREF
(Note that in Python2, A STORE_DEREF can never be triggered from an inner
function. The function g for example here
>>> def f(var):
... def g():
... var += 1
... return g
will not modify the closure variable ``var```inplace, but instead try to
load a local variable var and increment it. As g does not assign the local
variable ``var`` any initial value, calling f(1)() will fail at runtime.)
Our objective is to set the value of a given cell ``cell``. So we need to
somewhat reference our ``cell`` object into the ``inner`` function so that
this object (and not the smoke cell of the lambda function) gets affected
by the STORE_DEREF operation.
In inner, ``cell`` is referenced as a cell variable (an enclosing variable
that is referenced by the inner function). If we create a new function
cell_set with the exact same code as ``inner``, but with ``cell`` marked as
a free variable instead, the STORE_DEREF will be applied on its closure -
``cell``, which we can specify explicitly during construction! The new
cell_set variable thus actually sets the contents of a specified cell!
Note: we do not make use of the ``nonlocal`` keyword to set the contents of
a cell in early python3 versions to limit possible syntax errors in case
test and checker libraries decide to parse the whole file.
"""
if sys.version_info[:2] >= (3, 7): # pragma: no branch
cell.cell_contents = value
else:
_cell_set = types.FunctionType(
_cell_set_template_code, {}, '_cell_set', (), (cell,),)
_cell_set(value) | [
"def",
"cell_set",
"(",
"cell",
",",
"value",
")",
":",
"if",
"sys",
".",
"version_info",
"[",
":",
"2",
"]",
">=",
"(",
"3",
",",
"7",
")",
":",
"# pragma: no branch",
"cell",
".",
"cell_contents",
"=",
"value",
"else",
":",
"_cell_set",
"=",
"types",
".",
"FunctionType",
"(",
"_cell_set_template_code",
",",
"{",
"}",
",",
"'_cell_set'",
",",
"(",
")",
",",
"(",
"cell",
",",
")",
",",
")",
"_cell_set",
"(",
"value",
")"
] | https://github.com/apple/turicreate/blob/cce55aa5311300e3ce6af93cb45ba791fd1bdf49/src/python/turicreate/util/_cloudpickle/_cloudpickle.py#L308-L370 | ||
xiaolonw/caffe-video_triplet | c39ea1ad6e937ccf7deba4510b7e555165abf05f | scripts/cpp_lint.py | python | ProcessLine | (filename, file_extension, clean_lines, line,
include_state, function_state, nesting_state, error,
extra_check_functions=[]) | Processes a single line in the file.
Args:
filename: Filename of the file that is being processed.
file_extension: The extension (dot not included) of the file.
clean_lines: An array of strings, each representing a line of the file,
with comments stripped.
line: Number of line being processed.
include_state: An _IncludeState instance in which the headers are inserted.
function_state: A _FunctionState instance which counts function lines, etc.
nesting_state: A _NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: A callable to which errors are reported, which takes 4 arguments:
filename, line number, error level, and message
extra_check_functions: An array of additional check functions that will be
run on each source line. Each function takes 4
arguments: filename, clean_lines, line, error | Processes a single line in the file. | [
"Processes",
"a",
"single",
"line",
"in",
"the",
"file",
"."
] | def ProcessLine(filename, file_extension, clean_lines, line,
include_state, function_state, nesting_state, error,
extra_check_functions=[]):
"""Processes a single line in the file.
Args:
filename: Filename of the file that is being processed.
file_extension: The extension (dot not included) of the file.
clean_lines: An array of strings, each representing a line of the file,
with comments stripped.
line: Number of line being processed.
include_state: An _IncludeState instance in which the headers are inserted.
function_state: A _FunctionState instance which counts function lines, etc.
nesting_state: A _NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: A callable to which errors are reported, which takes 4 arguments:
filename, line number, error level, and message
extra_check_functions: An array of additional check functions that will be
run on each source line. Each function takes 4
arguments: filename, clean_lines, line, error
"""
raw_lines = clean_lines.raw_lines
ParseNolintSuppressions(filename, raw_lines[line], line, error)
nesting_state.Update(filename, clean_lines, line, error)
if nesting_state.stack and nesting_state.stack[-1].inline_asm != _NO_ASM:
return
CheckForFunctionLengths(filename, clean_lines, line, function_state, error)
CheckForMultilineCommentsAndStrings(filename, clean_lines, line, error)
CheckStyle(filename, clean_lines, line, file_extension, nesting_state, error)
CheckLanguage(filename, clean_lines, line, file_extension, include_state,
nesting_state, error)
CheckForNonConstReference(filename, clean_lines, line, nesting_state, error)
CheckForNonStandardConstructs(filename, clean_lines, line,
nesting_state, error)
CheckVlogArguments(filename, clean_lines, line, error)
CheckCaffeAlternatives(filename, clean_lines, line, error)
CheckCaffeDataLayerSetUp(filename, clean_lines, line, error)
CheckCaffeRandom(filename, clean_lines, line, error)
CheckPosixThreading(filename, clean_lines, line, error)
CheckInvalidIncrement(filename, clean_lines, line, error)
CheckMakePairUsesDeduction(filename, clean_lines, line, error)
for check_fn in extra_check_functions:
check_fn(filename, clean_lines, line, error) | [
"def",
"ProcessLine",
"(",
"filename",
",",
"file_extension",
",",
"clean_lines",
",",
"line",
",",
"include_state",
",",
"function_state",
",",
"nesting_state",
",",
"error",
",",
"extra_check_functions",
"=",
"[",
"]",
")",
":",
"raw_lines",
"=",
"clean_lines",
".",
"raw_lines",
"ParseNolintSuppressions",
"(",
"filename",
",",
"raw_lines",
"[",
"line",
"]",
",",
"line",
",",
"error",
")",
"nesting_state",
".",
"Update",
"(",
"filename",
",",
"clean_lines",
",",
"line",
",",
"error",
")",
"if",
"nesting_state",
".",
"stack",
"and",
"nesting_state",
".",
"stack",
"[",
"-",
"1",
"]",
".",
"inline_asm",
"!=",
"_NO_ASM",
":",
"return",
"CheckForFunctionLengths",
"(",
"filename",
",",
"clean_lines",
",",
"line",
",",
"function_state",
",",
"error",
")",
"CheckForMultilineCommentsAndStrings",
"(",
"filename",
",",
"clean_lines",
",",
"line",
",",
"error",
")",
"CheckStyle",
"(",
"filename",
",",
"clean_lines",
",",
"line",
",",
"file_extension",
",",
"nesting_state",
",",
"error",
")",
"CheckLanguage",
"(",
"filename",
",",
"clean_lines",
",",
"line",
",",
"file_extension",
",",
"include_state",
",",
"nesting_state",
",",
"error",
")",
"CheckForNonConstReference",
"(",
"filename",
",",
"clean_lines",
",",
"line",
",",
"nesting_state",
",",
"error",
")",
"CheckForNonStandardConstructs",
"(",
"filename",
",",
"clean_lines",
",",
"line",
",",
"nesting_state",
",",
"error",
")",
"CheckVlogArguments",
"(",
"filename",
",",
"clean_lines",
",",
"line",
",",
"error",
")",
"CheckCaffeAlternatives",
"(",
"filename",
",",
"clean_lines",
",",
"line",
",",
"error",
")",
"CheckCaffeDataLayerSetUp",
"(",
"filename",
",",
"clean_lines",
",",
"line",
",",
"error",
")",
"CheckCaffeRandom",
"(",
"filename",
",",
"clean_lines",
",",
"line",
",",
"error",
")",
"CheckPosixThreading",
"(",
"filename",
",",
"clean_lines",
",",
"line",
",",
"error",
")",
"CheckInvalidIncrement",
"(",
"filename",
",",
"clean_lines",
",",
"line",
",",
"error",
")",
"CheckMakePairUsesDeduction",
"(",
"filename",
",",
"clean_lines",
",",
"line",
",",
"error",
")",
"for",
"check_fn",
"in",
"extra_check_functions",
":",
"check_fn",
"(",
"filename",
",",
"clean_lines",
",",
"line",
",",
"error",
")"
] | https://github.com/xiaolonw/caffe-video_triplet/blob/c39ea1ad6e937ccf7deba4510b7e555165abf05f/scripts/cpp_lint.py#L4600-L4642 | ||
catboost/catboost | 167f64f237114a4d10b2b4ee42adb4569137debe | contrib/python/prompt-toolkit/py2/prompt_toolkit/eventloop/posix.py | python | PosixEventLoop.add_reader | (self, fd, callback) | Add read file descriptor to the event loop. | Add read file descriptor to the event loop. | [
"Add",
"read",
"file",
"descriptor",
"to",
"the",
"event",
"loop",
"."
] | def add_reader(self, fd, callback):
" Add read file descriptor to the event loop. "
fd = fd_to_int(fd)
self._read_fds[fd] = callback
self.selector.register(fd) | [
"def",
"add_reader",
"(",
"self",
",",
"fd",
",",
"callback",
")",
":",
"fd",
"=",
"fd_to_int",
"(",
"fd",
")",
"self",
".",
"_read_fds",
"[",
"fd",
"]",
"=",
"callback",
"self",
".",
"selector",
".",
"register",
"(",
"fd",
")"
] | https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/prompt-toolkit/py2/prompt_toolkit/eventloop/posix.py#L271-L275 | ||
llvm-mirror/lldb | d01083a850f577b85501a0902b52fd0930de72c7 | utils/vim-lldb/python-vim-lldb/vim_ui.py | python | UI.__init__ | (self) | Declare UI state variables | Declare UI state variables | [
"Declare",
"UI",
"state",
"variables"
] | def __init__(self):
""" Declare UI state variables """
# Default panes to display
self.defaultPanes = [
'breakpoints',
'backtrace',
'locals',
'threads',
'registers',
'disassembly']
# map of tuples (filename, line) --> SBBreakpoint
self.markedBreakpoints = {}
# Currently shown signs
self.breakpointSigns = {}
self.pcSigns = []
# Container for panes
self.paneCol = PaneLayout()
# All possible LLDB panes
self.backtracePane = BacktracePane(self.paneCol)
self.threadPane = ThreadPane(self.paneCol)
self.disassemblyPane = DisassemblyPane(self.paneCol)
self.localsPane = LocalsPane(self.paneCol)
self.registersPane = RegistersPane(self.paneCol)
self.breakPane = BreakpointsPane(self.paneCol) | [
"def",
"__init__",
"(",
"self",
")",
":",
"# Default panes to display",
"self",
".",
"defaultPanes",
"=",
"[",
"'breakpoints'",
",",
"'backtrace'",
",",
"'locals'",
",",
"'threads'",
",",
"'registers'",
",",
"'disassembly'",
"]",
"# map of tuples (filename, line) --> SBBreakpoint",
"self",
".",
"markedBreakpoints",
"=",
"{",
"}",
"# Currently shown signs",
"self",
".",
"breakpointSigns",
"=",
"{",
"}",
"self",
".",
"pcSigns",
"=",
"[",
"]",
"# Container for panes",
"self",
".",
"paneCol",
"=",
"PaneLayout",
"(",
")",
"# All possible LLDB panes",
"self",
".",
"backtracePane",
"=",
"BacktracePane",
"(",
"self",
".",
"paneCol",
")",
"self",
".",
"threadPane",
"=",
"ThreadPane",
"(",
"self",
".",
"paneCol",
")",
"self",
".",
"disassemblyPane",
"=",
"DisassemblyPane",
"(",
"self",
".",
"paneCol",
")",
"self",
".",
"localsPane",
"=",
"LocalsPane",
"(",
"self",
".",
"paneCol",
")",
"self",
".",
"registersPane",
"=",
"RegistersPane",
"(",
"self",
".",
"paneCol",
")",
"self",
".",
"breakPane",
"=",
"BreakpointsPane",
"(",
"self",
".",
"paneCol",
")"
] | https://github.com/llvm-mirror/lldb/blob/d01083a850f577b85501a0902b52fd0930de72c7/utils/vim-lldb/python-vim-lldb/vim_ui.py#L24-L52 | ||
google/or-tools | 2cb85b4eead4c38e1c54b48044f92087cf165bce | ortools/constraint_solver/doc/routing_svg.py | python | SVGPrinter.draw_routes | (self) | Draws the routes. | Draws the routes. | [
"Draws",
"the",
"routes",
"."
] | def draw_routes(self):
"""Draws the routes."""
print(r'<!-- Print routes -->')
for route_idx, route in enumerate(self.routes()):
print(r'<!-- Print route {idx} -->'.format(idx=route_idx))
color = self._color_palette.value(route_idx)
colorname = self._color_palette.name(route_idx)
self.draw_route(route, color, colorname) | [
"def",
"draw_routes",
"(",
"self",
")",
":",
"print",
"(",
"r'<!-- Print routes -->'",
")",
"for",
"route_idx",
",",
"route",
"in",
"enumerate",
"(",
"self",
".",
"routes",
"(",
")",
")",
":",
"print",
"(",
"r'<!-- Print route {idx} -->'",
".",
"format",
"(",
"idx",
"=",
"route_idx",
")",
")",
"color",
"=",
"self",
".",
"_color_palette",
".",
"value",
"(",
"route_idx",
")",
"colorname",
"=",
"self",
".",
"_color_palette",
".",
"name",
"(",
"route_idx",
")",
"self",
".",
"draw_route",
"(",
"route",
",",
"color",
",",
"colorname",
")"
] | https://github.com/google/or-tools/blob/2cb85b4eead4c38e1c54b48044f92087cf165bce/ortools/constraint_solver/doc/routing_svg.py#L586-L593 | ||
miyosuda/TensorFlowAndroidMNIST | 7b5a4603d2780a8a2834575706e9001977524007 | jni-build/jni/include/external/bazel_tools/third_party/py/gflags/__init__.py | python | DECLARE_key_flag | (flag_name, flag_values=FLAGS) | Declares one flag as key to the current module.
Key flags are flags that are deemed really important for a module.
They are important when listing help messages; e.g., if the
--helpshort command-line flag is used, then only the key flags of the
main module are listed (instead of all flags, as in the case of
--help).
Sample usage:
gflags.DECLARED_key_flag('flag_1')
Args:
flag_name: A string, the name of an already declared flag.
(Redeclaring flags as key, including flags implicitly key
because they were declared in this module, is a no-op.)
flag_values: A FlagValues object. This should almost never
need to be overridden. | Declares one flag as key to the current module. | [
"Declares",
"one",
"flag",
"as",
"key",
"to",
"the",
"current",
"module",
"."
] | def DECLARE_key_flag(flag_name, flag_values=FLAGS):
"""Declares one flag as key to the current module.
Key flags are flags that are deemed really important for a module.
They are important when listing help messages; e.g., if the
--helpshort command-line flag is used, then only the key flags of the
main module are listed (instead of all flags, as in the case of
--help).
Sample usage:
gflags.DECLARED_key_flag('flag_1')
Args:
flag_name: A string, the name of an already declared flag.
(Redeclaring flags as key, including flags implicitly key
because they were declared in this module, is a no-op.)
flag_values: A FlagValues object. This should almost never
need to be overridden.
"""
if flag_name in _SPECIAL_FLAGS:
# Take care of the special flags, e.g., --flagfile, --undefok.
# These flags are defined in _SPECIAL_FLAGS, and are treated
# specially during flag parsing, taking precedence over the
# user-defined flags.
_InternalDeclareKeyFlags([flag_name],
flag_values=_SPECIAL_FLAGS,
key_flag_values=flag_values)
return
_InternalDeclareKeyFlags([flag_name], flag_values=flag_values) | [
"def",
"DECLARE_key_flag",
"(",
"flag_name",
",",
"flag_values",
"=",
"FLAGS",
")",
":",
"if",
"flag_name",
"in",
"_SPECIAL_FLAGS",
":",
"# Take care of the special flags, e.g., --flagfile, --undefok.",
"# These flags are defined in _SPECIAL_FLAGS, and are treated",
"# specially during flag parsing, taking precedence over the",
"# user-defined flags.",
"_InternalDeclareKeyFlags",
"(",
"[",
"flag_name",
"]",
",",
"flag_values",
"=",
"_SPECIAL_FLAGS",
",",
"key_flag_values",
"=",
"flag_values",
")",
"return",
"_InternalDeclareKeyFlags",
"(",
"[",
"flag_name",
"]",
",",
"flag_values",
"=",
"flag_values",
")"
] | https://github.com/miyosuda/TensorFlowAndroidMNIST/blob/7b5a4603d2780a8a2834575706e9001977524007/jni-build/jni/include/external/bazel_tools/third_party/py/gflags/__init__.py#L2238-L2267 | ||
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Tools/Python/3.7.10/windows/Lib/distutils/command/register.py | python | register.check_metadata | (self) | Deprecated API. | Deprecated API. | [
"Deprecated",
"API",
"."
] | def check_metadata(self):
"""Deprecated API."""
warn("distutils.command.register.check_metadata is deprecated, \
use the check command instead", PendingDeprecationWarning)
check = self.distribution.get_command_obj('check')
check.ensure_finalized()
check.strict = self.strict
check.restructuredtext = 1
check.run() | [
"def",
"check_metadata",
"(",
"self",
")",
":",
"warn",
"(",
"\"distutils.command.register.check_metadata is deprecated, \\\n use the check command instead\"",
",",
"PendingDeprecationWarning",
")",
"check",
"=",
"self",
".",
"distribution",
".",
"get_command_obj",
"(",
"'check'",
")",
"check",
".",
"ensure_finalized",
"(",
")",
"check",
".",
"strict",
"=",
"self",
".",
"strict",
"check",
".",
"restructuredtext",
"=",
"1",
"check",
".",
"run",
"(",
")"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/windows/Lib/distutils/command/register.py#L58-L66 | ||
hakuna-m/wubiuefi | caec1af0a09c78fd5a345180ada1fe45e0c63493 | src/openpgp/sap/util/strnum.py | python | int2str | (n) | return binascii.unhexlify(h) | Convert an integer to a string.
:Parameters:
- `n`: integer to convert to string
:Returns: string
This is a simple transformation using the builtin hex() function
to return the number.
**Note:** I'm not sure what the relationship between hex()
representations and endian issues are, these need to be tested.
Example:
>>> strnums.int2str(34728919023)
'\\x08\\x16\\x01?\\xef' | Convert an integer to a string. | [
"Convert",
"an",
"integer",
"to",
"a",
"string",
"."
] | def int2str(n):
"""Convert an integer to a string.
:Parameters:
- `n`: integer to convert to string
:Returns: string
This is a simple transformation using the builtin hex() function
to return the number.
**Note:** I'm not sure what the relationship between hex()
representations and endian issues are, these need to be tested.
Example:
>>> strnums.int2str(34728919023)
'\\x08\\x16\\x01?\\xef'
"""
h = hex(n)[2:] # chop off the '0x'
if h[-1] in ['l', 'L']:
h = h[:-1]
if 1 == len(h) % 2: # odd string, add '0' to beginning
h = ''.join(['0', h])
return binascii.unhexlify(h) | [
"def",
"int2str",
"(",
"n",
")",
":",
"h",
"=",
"hex",
"(",
"n",
")",
"[",
"2",
":",
"]",
"# chop off the '0x' ",
"if",
"h",
"[",
"-",
"1",
"]",
"in",
"[",
"'l'",
",",
"'L'",
"]",
":",
"h",
"=",
"h",
"[",
":",
"-",
"1",
"]",
"if",
"1",
"==",
"len",
"(",
"h",
")",
"%",
"2",
":",
"# odd string, add '0' to beginning",
"h",
"=",
"''",
".",
"join",
"(",
"[",
"'0'",
",",
"h",
"]",
")",
"return",
"binascii",
".",
"unhexlify",
"(",
"h",
")"
] | https://github.com/hakuna-m/wubiuefi/blob/caec1af0a09c78fd5a345180ada1fe45e0c63493/src/openpgp/sap/util/strnum.py#L79-L103 | |
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/gtk/_misc.py | python | DateTime_GetEnglishWeekDayName | (*args, **kwargs) | return _misc_.DateTime_GetEnglishWeekDayName(*args, **kwargs) | DateTime_GetEnglishWeekDayName(int weekday, int flags=Name_Full) -> String | DateTime_GetEnglishWeekDayName(int weekday, int flags=Name_Full) -> String | [
"DateTime_GetEnglishWeekDayName",
"(",
"int",
"weekday",
"int",
"flags",
"=",
"Name_Full",
")",
"-",
">",
"String"
] | def DateTime_GetEnglishWeekDayName(*args, **kwargs):
"""DateTime_GetEnglishWeekDayName(int weekday, int flags=Name_Full) -> String"""
return _misc_.DateTime_GetEnglishWeekDayName(*args, **kwargs) | [
"def",
"DateTime_GetEnglishWeekDayName",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_misc_",
".",
"DateTime_GetEnglishWeekDayName",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/gtk/_misc.py#L4277-L4279 | |
catboost/catboost | 167f64f237114a4d10b2b4ee42adb4569137debe | contrib/tools/python/src/Lib/idlelib/IdleHistory.py | python | History.history_next | (self, event) | return "break" | Fetch later statement; start with ealiest if cyclic. | Fetch later statement; start with ealiest if cyclic. | [
"Fetch",
"later",
"statement",
";",
"start",
"with",
"ealiest",
"if",
"cyclic",
"."
] | def history_next(self, event):
"Fetch later statement; start with ealiest if cyclic."
self.fetch(reverse=False)
return "break" | [
"def",
"history_next",
"(",
"self",
",",
"event",
")",
":",
"self",
".",
"fetch",
"(",
"reverse",
"=",
"False",
")",
"return",
"\"break\""
] | https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/tools/python/src/Lib/idlelib/IdleHistory.py#L30-L33 | |
bsdnoobz/opencv-code | d3bd05d9f29d7c602d560d59f627760f654a83c7 | opencv-qt-integration-2/python/ImageApp.py | python | ImageApp.do_canny | (self) | Perform Gaussian blurring on original image and display the result. | Perform Gaussian blurring on original image and display the result. | [
"Perform",
"Gaussian",
"blurring",
"on",
"original",
"image",
"and",
"display",
"the",
"result",
"."
] | def do_canny(self):
"""Perform Gaussian blurring on original image and display the result."""
img = cv2.cvtColor(self.original_img, cv2.COLOR_RGB2GRAY)
img = cv2.Canny(img, 150, 150)
img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB)
self.show_image(img) | [
"def",
"do_canny",
"(",
"self",
")",
":",
"img",
"=",
"cv2",
".",
"cvtColor",
"(",
"self",
".",
"original_img",
",",
"cv2",
".",
"COLOR_RGB2GRAY",
")",
"img",
"=",
"cv2",
".",
"Canny",
"(",
"img",
",",
"150",
",",
"150",
")",
"img",
"=",
"cv2",
".",
"cvtColor",
"(",
"img",
",",
"cv2",
".",
"COLOR_GRAY2RGB",
")",
"self",
".",
"show_image",
"(",
"img",
")"
] | https://github.com/bsdnoobz/opencv-code/blob/d3bd05d9f29d7c602d560d59f627760f654a83c7/opencv-qt-integration-2/python/ImageApp.py#L63-L68 | ||
PaddlePaddle/Paddle | 1252f4bb3e574df80aa6d18c7ddae1b3a90bd81c | python/paddle/distributed/fleet/utils/internal_storage.py | python | GradStorage._array_grads | (self) | Given the parameters gradients which have been registered previously, rebuild the whole InternalStorage. | Given the parameters gradients which have been registered previously, rebuild the whole InternalStorage. | [
"Given",
"the",
"parameters",
"gradients",
"which",
"have",
"been",
"registered",
"previously",
"rebuild",
"the",
"whole",
"InternalStorage",
"."
] | def _array_grads(self):
"""
Given the parameters gradients which have been registered previously, rebuild the whole InternalStorage.
"""
if len(self._params) > 0:
self._fill = 0
for p in self._params:
self._add_grad_as_view(p, self._parm2align[p.name]) | [
"def",
"_array_grads",
"(",
"self",
")",
":",
"if",
"len",
"(",
"self",
".",
"_params",
")",
">",
"0",
":",
"self",
".",
"_fill",
"=",
"0",
"for",
"p",
"in",
"self",
".",
"_params",
":",
"self",
".",
"_add_grad_as_view",
"(",
"p",
",",
"self",
".",
"_parm2align",
"[",
"p",
".",
"name",
"]",
")"
] | https://github.com/PaddlePaddle/Paddle/blob/1252f4bb3e574df80aa6d18c7ddae1b3a90bd81c/python/paddle/distributed/fleet/utils/internal_storage.py#L291-L298 | ||
y123456yz/reading-and-annotate-mongodb-3.6 | 93280293672ca7586dc24af18132aa61e4ed7fcf | mongo/src/third_party/scons-2.5.0/scons-local-2.5.0/SCons/SConf.py | python | CheckContext.Result | (self, res) | Inform about the result of the test. If res is not a string, displays
'yes' or 'no' depending on whether res is evaluated as true or false.
The result is only displayed when self.did_show_result is not set. | Inform about the result of the test. If res is not a string, displays
'yes' or 'no' depending on whether res is evaluated as true or false.
The result is only displayed when self.did_show_result is not set. | [
"Inform",
"about",
"the",
"result",
"of",
"the",
"test",
".",
"If",
"res",
"is",
"not",
"a",
"string",
"displays",
"yes",
"or",
"no",
"depending",
"on",
"whether",
"res",
"is",
"evaluated",
"as",
"true",
"or",
"false",
".",
"The",
"result",
"is",
"only",
"displayed",
"when",
"self",
".",
"did_show_result",
"is",
"not",
"set",
"."
] | def Result(self, res):
"""Inform about the result of the test. If res is not a string, displays
'yes' or 'no' depending on whether res is evaluated as true or false.
The result is only displayed when self.did_show_result is not set.
"""
if isinstance(res, str):
text = res
elif res:
text = "yes"
else:
text = "no"
if self.did_show_result == 0:
# Didn't show result yet, do it now.
self.Display(text + "\n")
self.did_show_result = 1 | [
"def",
"Result",
"(",
"self",
",",
"res",
")",
":",
"if",
"isinstance",
"(",
"res",
",",
"str",
")",
":",
"text",
"=",
"res",
"elif",
"res",
":",
"text",
"=",
"\"yes\"",
"else",
":",
"text",
"=",
"\"no\"",
"if",
"self",
".",
"did_show_result",
"==",
"0",
":",
"# Didn't show result yet, do it now.",
"self",
".",
"Display",
"(",
"text",
"+",
"\"\\n\"",
")",
"self",
".",
"did_show_result",
"=",
"1"
] | https://github.com/y123456yz/reading-and-annotate-mongodb-3.6/blob/93280293672ca7586dc24af18132aa61e4ed7fcf/mongo/src/third_party/scons-2.5.0/scons-local-2.5.0/SCons/SConf.py#L795-L810 | ||
catboost/catboost | 167f64f237114a4d10b2b4ee42adb4569137debe | contrib/tools/python3/src/Lib/importlib/resources.py | python | read_text | (package: Package,
resource: Resource,
encoding: str = 'utf-8',
errors: str = 'strict') | Return the decoded string of the resource.
The decoding-related arguments have the same semantics as those of
bytes.decode(). | Return the decoded string of the resource. | [
"Return",
"the",
"decoded",
"string",
"of",
"the",
"resource",
"."
] | def read_text(package: Package,
resource: Resource,
encoding: str = 'utf-8',
errors: str = 'strict') -> str:
"""Return the decoded string of the resource.
The decoding-related arguments have the same semantics as those of
bytes.decode().
"""
with open_text(package, resource, encoding, errors) as fp:
return fp.read() | [
"def",
"read_text",
"(",
"package",
":",
"Package",
",",
"resource",
":",
"Resource",
",",
"encoding",
":",
"str",
"=",
"'utf-8'",
",",
"errors",
":",
"str",
"=",
"'strict'",
")",
"->",
"str",
":",
"with",
"open_text",
"(",
"package",
",",
"resource",
",",
"encoding",
",",
"errors",
")",
"as",
"fp",
":",
"return",
"fp",
".",
"read",
"(",
")"
] | https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/tools/python3/src/Lib/importlib/resources.py#L130-L140 | ||
GeometryCollective/boundary-first-flattening | 8250e5a0e85980ec50b5e8aa8f49dd6519f915cd | deps/nanogui/ext/pybind11/tools/clang/cindex.py | python | TypeKind.spelling | (self) | return conf.lib.clang_getTypeKindSpelling(self.value) | Retrieve the spelling of this TypeKind. | Retrieve the spelling of this TypeKind. | [
"Retrieve",
"the",
"spelling",
"of",
"this",
"TypeKind",
"."
] | def spelling(self):
"""Retrieve the spelling of this TypeKind."""
return conf.lib.clang_getTypeKindSpelling(self.value) | [
"def",
"spelling",
"(",
"self",
")",
":",
"return",
"conf",
".",
"lib",
".",
"clang_getTypeKindSpelling",
"(",
"self",
".",
"value",
")"
] | https://github.com/GeometryCollective/boundary-first-flattening/blob/8250e5a0e85980ec50b5e8aa8f49dd6519f915cd/deps/nanogui/ext/pybind11/tools/clang/cindex.py#L1697-L1699 | |
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/osx_cocoa/stc.py | python | StyledTextCtrl.MarginSetStyles | (*args, **kwargs) | return _stc.StyledTextCtrl_MarginSetStyles(*args, **kwargs) | MarginSetStyles(self, int line, String styles)
Set the style in the text margin for a line | MarginSetStyles(self, int line, String styles) | [
"MarginSetStyles",
"(",
"self",
"int",
"line",
"String",
"styles",
")"
] | def MarginSetStyles(*args, **kwargs):
"""
MarginSetStyles(self, int line, String styles)
Set the style in the text margin for a line
"""
return _stc.StyledTextCtrl_MarginSetStyles(*args, **kwargs) | [
"def",
"MarginSetStyles",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_stc",
".",
"StyledTextCtrl_MarginSetStyles",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_cocoa/stc.py#L5871-L5877 | |
catboost/catboost | 167f64f237114a4d10b2b4ee42adb4569137debe | contrib/python/scipy/py3/scipy/stats/mstats_extras.py | python | hdquantiles | (data, prob=list([.25,.5,.75]), axis=None, var=False,) | return ma.fix_invalid(result, copy=False) | Computes quantile estimates with the Harrell-Davis method.
The quantile estimates are calculated as a weighted linear combination
of order statistics.
Parameters
----------
data : array_like
Data array.
prob : sequence, optional
Sequence of quantiles to compute.
axis : int or None, optional
Axis along which to compute the quantiles. If None, use a flattened
array.
var : bool, optional
Whether to return the variance of the estimate.
Returns
-------
hdquantiles : MaskedArray
A (p,) array of quantiles (if `var` is False), or a (2,p) array of
quantiles and variances (if `var` is True), where ``p`` is the
number of quantiles.
See Also
--------
hdquantiles_sd | Computes quantile estimates with the Harrell-Davis method. | [
"Computes",
"quantile",
"estimates",
"with",
"the",
"Harrell",
"-",
"Davis",
"method",
"."
] | def hdquantiles(data, prob=list([.25,.5,.75]), axis=None, var=False,):
"""
Computes quantile estimates with the Harrell-Davis method.
The quantile estimates are calculated as a weighted linear combination
of order statistics.
Parameters
----------
data : array_like
Data array.
prob : sequence, optional
Sequence of quantiles to compute.
axis : int or None, optional
Axis along which to compute the quantiles. If None, use a flattened
array.
var : bool, optional
Whether to return the variance of the estimate.
Returns
-------
hdquantiles : MaskedArray
A (p,) array of quantiles (if `var` is False), or a (2,p) array of
quantiles and variances (if `var` is True), where ``p`` is the
number of quantiles.
See Also
--------
hdquantiles_sd
"""
def _hd_1D(data,prob,var):
"Computes the HD quantiles for a 1D array. Returns nan for invalid data."
xsorted = np.squeeze(np.sort(data.compressed().view(ndarray)))
# Don't use length here, in case we have a numpy scalar
n = xsorted.size
hd = np.empty((2,len(prob)), float_)
if n < 2:
hd.flat = np.nan
if var:
return hd
return hd[0]
v = np.arange(n+1) / float(n)
betacdf = beta.cdf
for (i,p) in enumerate(prob):
_w = betacdf(v, (n+1)*p, (n+1)*(1-p))
w = _w[1:] - _w[:-1]
hd_mean = np.dot(w, xsorted)
hd[0,i] = hd_mean
#
hd[1,i] = np.dot(w, (xsorted-hd_mean)**2)
#
hd[0, prob == 0] = xsorted[0]
hd[0, prob == 1] = xsorted[-1]
if var:
hd[1, prob == 0] = hd[1, prob == 1] = np.nan
return hd
return hd[0]
# Initialization & checks
data = ma.array(data, copy=False, dtype=float_)
p = np.array(prob, copy=False, ndmin=1)
# Computes quantiles along axis (or globally)
if (axis is None) or (data.ndim == 1):
result = _hd_1D(data, p, var)
else:
if data.ndim > 2:
raise ValueError("Array 'data' must be at most two dimensional, "
"but got data.ndim = %d" % data.ndim)
result = ma.apply_along_axis(_hd_1D, axis, data, p, var)
return ma.fix_invalid(result, copy=False) | [
"def",
"hdquantiles",
"(",
"data",
",",
"prob",
"=",
"list",
"(",
"[",
".25",
",",
".5",
",",
".75",
"]",
")",
",",
"axis",
"=",
"None",
",",
"var",
"=",
"False",
",",
")",
":",
"def",
"_hd_1D",
"(",
"data",
",",
"prob",
",",
"var",
")",
":",
"\"Computes the HD quantiles for a 1D array. Returns nan for invalid data.\"",
"xsorted",
"=",
"np",
".",
"squeeze",
"(",
"np",
".",
"sort",
"(",
"data",
".",
"compressed",
"(",
")",
".",
"view",
"(",
"ndarray",
")",
")",
")",
"# Don't use length here, in case we have a numpy scalar",
"n",
"=",
"xsorted",
".",
"size",
"hd",
"=",
"np",
".",
"empty",
"(",
"(",
"2",
",",
"len",
"(",
"prob",
")",
")",
",",
"float_",
")",
"if",
"n",
"<",
"2",
":",
"hd",
".",
"flat",
"=",
"np",
".",
"nan",
"if",
"var",
":",
"return",
"hd",
"return",
"hd",
"[",
"0",
"]",
"v",
"=",
"np",
".",
"arange",
"(",
"n",
"+",
"1",
")",
"/",
"float",
"(",
"n",
")",
"betacdf",
"=",
"beta",
".",
"cdf",
"for",
"(",
"i",
",",
"p",
")",
"in",
"enumerate",
"(",
"prob",
")",
":",
"_w",
"=",
"betacdf",
"(",
"v",
",",
"(",
"n",
"+",
"1",
")",
"*",
"p",
",",
"(",
"n",
"+",
"1",
")",
"*",
"(",
"1",
"-",
"p",
")",
")",
"w",
"=",
"_w",
"[",
"1",
":",
"]",
"-",
"_w",
"[",
":",
"-",
"1",
"]",
"hd_mean",
"=",
"np",
".",
"dot",
"(",
"w",
",",
"xsorted",
")",
"hd",
"[",
"0",
",",
"i",
"]",
"=",
"hd_mean",
"#",
"hd",
"[",
"1",
",",
"i",
"]",
"=",
"np",
".",
"dot",
"(",
"w",
",",
"(",
"xsorted",
"-",
"hd_mean",
")",
"**",
"2",
")",
"#",
"hd",
"[",
"0",
",",
"prob",
"==",
"0",
"]",
"=",
"xsorted",
"[",
"0",
"]",
"hd",
"[",
"0",
",",
"prob",
"==",
"1",
"]",
"=",
"xsorted",
"[",
"-",
"1",
"]",
"if",
"var",
":",
"hd",
"[",
"1",
",",
"prob",
"==",
"0",
"]",
"=",
"hd",
"[",
"1",
",",
"prob",
"==",
"1",
"]",
"=",
"np",
".",
"nan",
"return",
"hd",
"return",
"hd",
"[",
"0",
"]",
"# Initialization & checks",
"data",
"=",
"ma",
".",
"array",
"(",
"data",
",",
"copy",
"=",
"False",
",",
"dtype",
"=",
"float_",
")",
"p",
"=",
"np",
".",
"array",
"(",
"prob",
",",
"copy",
"=",
"False",
",",
"ndmin",
"=",
"1",
")",
"# Computes quantiles along axis (or globally)",
"if",
"(",
"axis",
"is",
"None",
")",
"or",
"(",
"data",
".",
"ndim",
"==",
"1",
")",
":",
"result",
"=",
"_hd_1D",
"(",
"data",
",",
"p",
",",
"var",
")",
"else",
":",
"if",
"data",
".",
"ndim",
">",
"2",
":",
"raise",
"ValueError",
"(",
"\"Array 'data' must be at most two dimensional, \"",
"\"but got data.ndim = %d\"",
"%",
"data",
".",
"ndim",
")",
"result",
"=",
"ma",
".",
"apply_along_axis",
"(",
"_hd_1D",
",",
"axis",
",",
"data",
",",
"p",
",",
"var",
")",
"return",
"ma",
".",
"fix_invalid",
"(",
"result",
",",
"copy",
"=",
"False",
")"
] | https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/scipy/py3/scipy/stats/mstats_extras.py#L31-L103 | |
google/tink | 59bb34495d1cb8f9d9dbc0f0a52c4f9e21491a14 | python/tink/jwt/_jwt_hmac_key_manager.py | python | _JwtHmac.verify_mac_and_decode_with_kid | (
self, compact: str, validator: _jwt_validator.JwtValidator,
kid: Optional[str]) | return _verified_jwt.VerifiedJwt._create(raw_jwt) | Verifies, validates and decodes a MACed compact JWT token. | Verifies, validates and decodes a MACed compact JWT token. | [
"Verifies",
"validates",
"and",
"decodes",
"a",
"MACed",
"compact",
"JWT",
"token",
"."
] | def verify_mac_and_decode_with_kid(
self, compact: str, validator: _jwt_validator.JwtValidator,
kid: Optional[str]) -> _verified_jwt.VerifiedJwt:
"""Verifies, validates and decodes a MACed compact JWT token."""
parts = _jwt_format.split_signed_compact(compact)
unsigned_compact, json_header, json_payload, mac = parts
self._verify_mac(mac, unsigned_compact)
header = _json_util.json_loads(json_header)
_jwt_format.validate_header(
header=header,
algorithm=self._algorithm,
tink_kid=kid,
custom_kid=self._custom_kid)
raw_jwt = _raw_jwt.raw_jwt_from_json(
_jwt_format.get_type_header(header), json_payload)
_jwt_validator.validate(validator, raw_jwt)
return _verified_jwt.VerifiedJwt._create(raw_jwt) | [
"def",
"verify_mac_and_decode_with_kid",
"(",
"self",
",",
"compact",
":",
"str",
",",
"validator",
":",
"_jwt_validator",
".",
"JwtValidator",
",",
"kid",
":",
"Optional",
"[",
"str",
"]",
")",
"->",
"_verified_jwt",
".",
"VerifiedJwt",
":",
"parts",
"=",
"_jwt_format",
".",
"split_signed_compact",
"(",
"compact",
")",
"unsigned_compact",
",",
"json_header",
",",
"json_payload",
",",
"mac",
"=",
"parts",
"self",
".",
"_verify_mac",
"(",
"mac",
",",
"unsigned_compact",
")",
"header",
"=",
"_json_util",
".",
"json_loads",
"(",
"json_header",
")",
"_jwt_format",
".",
"validate_header",
"(",
"header",
"=",
"header",
",",
"algorithm",
"=",
"self",
".",
"_algorithm",
",",
"tink_kid",
"=",
"kid",
",",
"custom_kid",
"=",
"self",
".",
"_custom_kid",
")",
"raw_jwt",
"=",
"_raw_jwt",
".",
"raw_jwt_from_json",
"(",
"_jwt_format",
".",
"get_type_header",
"(",
"header",
")",
",",
"json_payload",
")",
"_jwt_validator",
".",
"validate",
"(",
"validator",
",",
"raw_jwt",
")",
"return",
"_verified_jwt",
".",
"VerifiedJwt",
".",
"_create",
"(",
"raw_jwt",
")"
] | https://github.com/google/tink/blob/59bb34495d1cb8f9d9dbc0f0a52c4f9e21491a14/python/tink/jwt/_jwt_hmac_key_manager.py#L80-L96 | |
hfinkel/llvm-project-cxxjit | 91084ef018240bbb8e24235ff5cd8c355a9c1a1e | clang/bindings/python/clang/cindex.py | python | Cursor.get_bitfield_width | (self) | return conf.lib.clang_getFieldDeclBitWidth(self) | Retrieve the width of a bitfield. | Retrieve the width of a bitfield. | [
"Retrieve",
"the",
"width",
"of",
"a",
"bitfield",
"."
] | def get_bitfield_width(self):
"""
Retrieve the width of a bitfield.
"""
return conf.lib.clang_getFieldDeclBitWidth(self) | [
"def",
"get_bitfield_width",
"(",
"self",
")",
":",
"return",
"conf",
".",
"lib",
".",
"clang_getFieldDeclBitWidth",
"(",
"self",
")"
] | https://github.com/hfinkel/llvm-project-cxxjit/blob/91084ef018240bbb8e24235ff5cd8c355a9c1a1e/clang/bindings/python/clang/cindex.py#L1878-L1882 | |
NVIDIA/thrust | 627dccb359a635afdd69e95a6cc59698f23f70e2 | internal/benchmark/compare_benchmark_results.py | python | record_aggregator.__iter__ | (self) | return self | Return an iterator to the output sequence of separated distinguishing
variables and dependent variables (a tuple of two `dict`s).
This is a requirement for the `Iterable` protocol. | Return an iterator to the output sequence of separated distinguishing
variables and dependent variables (a tuple of two `dict`s). | [
"Return",
"an",
"iterator",
"to",
"the",
"output",
"sequence",
"of",
"separated",
"distinguishing",
"variables",
"and",
"dependent",
"variables",
"(",
"a",
"tuple",
"of",
"two",
"dict",
"s",
")",
"."
] | def __iter__(self):
"""Return an iterator to the output sequence of separated distinguishing
variables and dependent variables (a tuple of two `dict`s).
This is a requirement for the `Iterable` protocol.
"""
return self | [
"def",
"__iter__",
"(",
"self",
")",
":",
"return",
"self"
] | https://github.com/NVIDIA/thrust/blob/627dccb359a635afdd69e95a6cc59698f23f70e2/internal/benchmark/compare_benchmark_results.py#L1018-L1024 | |
apple/swift-lldb | d74be846ef3e62de946df343e8c234bde93a8912 | third_party/Python/module/ptyprocess-0.6.0/ptyprocess/ptyprocess.py | python | PtyProcess.isatty | (self) | return os.isatty(self.fd) | This returns True if the file descriptor is open and connected to a
tty(-like) device, else False.
On SVR4-style platforms implementing streams, such as SunOS and HP-UX,
the child pty may not appear as a terminal device. This means
methods such as setecho(), setwinsize(), getwinsize() may raise an
IOError. | This returns True if the file descriptor is open and connected to a
tty(-like) device, else False. | [
"This",
"returns",
"True",
"if",
"the",
"file",
"descriptor",
"is",
"open",
"and",
"connected",
"to",
"a",
"tty",
"(",
"-",
"like",
")",
"device",
"else",
"False",
"."
] | def isatty(self):
'''This returns True if the file descriptor is open and connected to a
tty(-like) device, else False.
On SVR4-style platforms implementing streams, such as SunOS and HP-UX,
the child pty may not appear as a terminal device. This means
methods such as setecho(), setwinsize(), getwinsize() may raise an
IOError. '''
return os.isatty(self.fd) | [
"def",
"isatty",
"(",
"self",
")",
":",
"return",
"os",
".",
"isatty",
"(",
"self",
".",
"fd",
")"
] | https://github.com/apple/swift-lldb/blob/d74be846ef3e62de946df343e8c234bde93a8912/third_party/Python/module/ptyprocess-0.6.0/ptyprocess/ptyprocess.py#L411-L420 | |
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/osx_carbon/_windows.py | python | VScrolledWindow.EstimateTotalHeight | (*args, **kwargs) | return _windows_.VScrolledWindow_EstimateTotalHeight(*args, **kwargs) | EstimateTotalHeight(self) -> int | EstimateTotalHeight(self) -> int | [
"EstimateTotalHeight",
"(",
"self",
")",
"-",
">",
"int"
] | def EstimateTotalHeight(*args, **kwargs):
"""EstimateTotalHeight(self) -> int"""
return _windows_.VScrolledWindow_EstimateTotalHeight(*args, **kwargs) | [
"def",
"EstimateTotalHeight",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_windows_",
".",
"VScrolledWindow_EstimateTotalHeight",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_carbon/_windows.py#L2442-L2444 | |
BitMEX/api-connectors | 37a3a5b806ad5d0e0fc975ab86d9ed43c3bcd812 | auto-generated/python/swagger_client/models/user_preferences.py | python | UserPreferences.hide_name_from_leaderboard | (self, hide_name_from_leaderboard) | Sets the hide_name_from_leaderboard of this UserPreferences.
:param hide_name_from_leaderboard: The hide_name_from_leaderboard of this UserPreferences. # noqa: E501
:type: bool | Sets the hide_name_from_leaderboard of this UserPreferences. | [
"Sets",
"the",
"hide_name_from_leaderboard",
"of",
"this",
"UserPreferences",
"."
] | def hide_name_from_leaderboard(self, hide_name_from_leaderboard):
"""Sets the hide_name_from_leaderboard of this UserPreferences.
:param hide_name_from_leaderboard: The hide_name_from_leaderboard of this UserPreferences. # noqa: E501
:type: bool
"""
self._hide_name_from_leaderboard = hide_name_from_leaderboard | [
"def",
"hide_name_from_leaderboard",
"(",
"self",
",",
"hide_name_from_leaderboard",
")",
":",
"self",
".",
"_hide_name_from_leaderboard",
"=",
"hide_name_from_leaderboard"
] | https://github.com/BitMEX/api-connectors/blob/37a3a5b806ad5d0e0fc975ab86d9ed43c3bcd812/auto-generated/python/swagger_client/models/user_preferences.py#L443-L451 | ||
numenta/nupic.core | 949950cf2c6d8d894c7eabfa2860aae679bf91f7 | bindings/py/setup.py | python | fixPath | (path) | return path | Ensures paths are correct for linux and windows | Ensures paths are correct for linux and windows | [
"Ensures",
"paths",
"are",
"correct",
"for",
"linux",
"and",
"windows"
] | def fixPath(path):
"""
Ensures paths are correct for linux and windows
"""
path = os.path.abspath(os.path.expanduser(path))
if path.startswith("\\"):
return "C:" + path
return path | [
"def",
"fixPath",
"(",
"path",
")",
":",
"path",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"os",
".",
"path",
".",
"expanduser",
"(",
"path",
")",
")",
"if",
"path",
".",
"startswith",
"(",
"\"\\\\\"",
")",
":",
"return",
"\"C:\"",
"+",
"path",
"return",
"path"
] | https://github.com/numenta/nupic.core/blob/949950cf2c6d8d894c7eabfa2860aae679bf91f7/bindings/py/setup.py#L79-L87 | |
p4lang/PI | 38d87e81253feff9fff0660d662c885be78fb719 | tools/cpplint.py | python | _IncludeState.CheckNextIncludeOrder | (self, header_type) | return '' | Returns a non-empty error message if the next header is out of order.
This function also updates the internal state to be ready to check
the next include.
Args:
header_type: One of the _XXX_HEADER constants defined above.
Returns:
The empty string if the header is in the right order, or an
error message describing what's wrong. | Returns a non-empty error message if the next header is out of order. | [
"Returns",
"a",
"non",
"-",
"empty",
"error",
"message",
"if",
"the",
"next",
"header",
"is",
"out",
"of",
"order",
"."
] | def CheckNextIncludeOrder(self, header_type):
"""Returns a non-empty error message if the next header is out of order.
This function also updates the internal state to be ready to check
the next include.
Args:
header_type: One of the _XXX_HEADER constants defined above.
Returns:
The empty string if the header is in the right order, or an
error message describing what's wrong.
"""
error_message = ('Found %s after %s' %
(self._TYPE_NAMES[header_type],
self._SECTION_NAMES[self._section]))
last_section = self._section
if header_type == _C_SYS_HEADER:
if self._section <= self._C_SECTION:
self._section = self._C_SECTION
else:
self._last_header = ''
return error_message
elif header_type == _CPP_SYS_HEADER:
if self._section <= self._CPP_SECTION:
self._section = self._CPP_SECTION
else:
self._last_header = ''
return error_message
elif header_type == _OTHER_SYS_HEADER:
if self._section <= self._OTHER_SYS_SECTION:
self._section = self._OTHER_SYS_SECTION
else:
self._last_header = ''
return error_message
elif header_type == _LIKELY_MY_HEADER:
if self._section <= self._MY_H_SECTION:
self._section = self._MY_H_SECTION
else:
self._section = self._OTHER_H_SECTION
elif header_type == _POSSIBLE_MY_HEADER:
if self._section <= self._MY_H_SECTION:
self._section = self._MY_H_SECTION
else:
# This will always be the fallback because we're not sure
# enough that the header is associated with this file.
self._section = self._OTHER_H_SECTION
else:
assert header_type == _OTHER_HEADER
self._section = self._OTHER_H_SECTION
if last_section != self._section:
self._last_header = ''
return '' | [
"def",
"CheckNextIncludeOrder",
"(",
"self",
",",
"header_type",
")",
":",
"error_message",
"=",
"(",
"'Found %s after %s'",
"%",
"(",
"self",
".",
"_TYPE_NAMES",
"[",
"header_type",
"]",
",",
"self",
".",
"_SECTION_NAMES",
"[",
"self",
".",
"_section",
"]",
")",
")",
"last_section",
"=",
"self",
".",
"_section",
"if",
"header_type",
"==",
"_C_SYS_HEADER",
":",
"if",
"self",
".",
"_section",
"<=",
"self",
".",
"_C_SECTION",
":",
"self",
".",
"_section",
"=",
"self",
".",
"_C_SECTION",
"else",
":",
"self",
".",
"_last_header",
"=",
"''",
"return",
"error_message",
"elif",
"header_type",
"==",
"_CPP_SYS_HEADER",
":",
"if",
"self",
".",
"_section",
"<=",
"self",
".",
"_CPP_SECTION",
":",
"self",
".",
"_section",
"=",
"self",
".",
"_CPP_SECTION",
"else",
":",
"self",
".",
"_last_header",
"=",
"''",
"return",
"error_message",
"elif",
"header_type",
"==",
"_OTHER_SYS_HEADER",
":",
"if",
"self",
".",
"_section",
"<=",
"self",
".",
"_OTHER_SYS_SECTION",
":",
"self",
".",
"_section",
"=",
"self",
".",
"_OTHER_SYS_SECTION",
"else",
":",
"self",
".",
"_last_header",
"=",
"''",
"return",
"error_message",
"elif",
"header_type",
"==",
"_LIKELY_MY_HEADER",
":",
"if",
"self",
".",
"_section",
"<=",
"self",
".",
"_MY_H_SECTION",
":",
"self",
".",
"_section",
"=",
"self",
".",
"_MY_H_SECTION",
"else",
":",
"self",
".",
"_section",
"=",
"self",
".",
"_OTHER_H_SECTION",
"elif",
"header_type",
"==",
"_POSSIBLE_MY_HEADER",
":",
"if",
"self",
".",
"_section",
"<=",
"self",
".",
"_MY_H_SECTION",
":",
"self",
".",
"_section",
"=",
"self",
".",
"_MY_H_SECTION",
"else",
":",
"# This will always be the fallback because we're not sure",
"# enough that the header is associated with this file.",
"self",
".",
"_section",
"=",
"self",
".",
"_OTHER_H_SECTION",
"else",
":",
"assert",
"header_type",
"==",
"_OTHER_HEADER",
"self",
".",
"_section",
"=",
"self",
".",
"_OTHER_H_SECTION",
"if",
"last_section",
"!=",
"self",
".",
"_section",
":",
"self",
".",
"_last_header",
"=",
"''",
"return",
"''"
] | https://github.com/p4lang/PI/blob/38d87e81253feff9fff0660d662c885be78fb719/tools/cpplint.py#L1185-L1242 | |
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/gtk/_misc.py | python | DateTime.ParseISOTime | (*args, **kwargs) | return _misc_.DateTime_ParseISOTime(*args, **kwargs) | ParseISOTime(self, String time) -> bool | ParseISOTime(self, String time) -> bool | [
"ParseISOTime",
"(",
"self",
"String",
"time",
")",
"-",
">",
"bool"
] | def ParseISOTime(*args, **kwargs):
"""ParseISOTime(self, String time) -> bool"""
return _misc_.DateTime_ParseISOTime(*args, **kwargs) | [
"def",
"ParseISOTime",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_misc_",
".",
"DateTime_ParseISOTime",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/gtk/_misc.py#L4142-L4144 | |
pytorch/pytorch | 7176c92687d3cc847cc046bf002269c6949a21c2 | caffe2/python/rnn_cell.py | python | RNNCell.get_output_state_index | (self) | return 0 | Return index into state list of the "primary" step-wise output. | Return index into state list of the "primary" step-wise output. | [
"Return",
"index",
"into",
"state",
"list",
"of",
"the",
"primary",
"step",
"-",
"wise",
"output",
"."
] | def get_output_state_index(self):
'''
Return index into state list of the "primary" step-wise output.
'''
return 0 | [
"def",
"get_output_state_index",
"(",
"self",
")",
":",
"return",
"0"
] | https://github.com/pytorch/pytorch/blob/7176c92687d3cc847cc046bf002269c6949a21c2/caffe2/python/rnn_cell.py#L227-L231 | |
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/site-packages/pip/_internal/req/req_uninstall.py | python | UninstallPathSet.remove | (self, auto_confirm=False, verbose=False) | Remove paths in ``self.paths`` with confirmation (unless
``auto_confirm`` is True). | Remove paths in ``self.paths`` with confirmation (unless
``auto_confirm`` is True). | [
"Remove",
"paths",
"in",
"self",
".",
"paths",
"with",
"confirmation",
"(",
"unless",
"auto_confirm",
"is",
"True",
")",
"."
] | def remove(self, auto_confirm=False, verbose=False):
# type: (bool, bool) -> None
"""Remove paths in ``self.paths`` with confirmation (unless
``auto_confirm`` is True)."""
if not self.paths:
logger.info(
"Can't uninstall '%s'. No files were found to uninstall.",
self.dist.project_name,
)
return
dist_name_version = (
self.dist.project_name + "-" + self.dist.version
)
logger.info('Uninstalling %s:', dist_name_version)
with indent_log():
if auto_confirm or self._allowed_to_proceed(verbose):
moved = self._moved_paths
for_rename = compress_for_rename(self.paths)
for path in sorted(compact(for_rename)):
moved.stash(path)
logger.debug('Removing file or directory %s', path)
for pth in self.pth.values():
pth.remove()
logger.info('Successfully uninstalled %s', dist_name_version) | [
"def",
"remove",
"(",
"self",
",",
"auto_confirm",
"=",
"False",
",",
"verbose",
"=",
"False",
")",
":",
"# type: (bool, bool) -> None",
"if",
"not",
"self",
".",
"paths",
":",
"logger",
".",
"info",
"(",
"\"Can't uninstall '%s'. No files were found to uninstall.\"",
",",
"self",
".",
"dist",
".",
"project_name",
",",
")",
"return",
"dist_name_version",
"=",
"(",
"self",
".",
"dist",
".",
"project_name",
"+",
"\"-\"",
"+",
"self",
".",
"dist",
".",
"version",
")",
"logger",
".",
"info",
"(",
"'Uninstalling %s:'",
",",
"dist_name_version",
")",
"with",
"indent_log",
"(",
")",
":",
"if",
"auto_confirm",
"or",
"self",
".",
"_allowed_to_proceed",
"(",
"verbose",
")",
":",
"moved",
"=",
"self",
".",
"_moved_paths",
"for_rename",
"=",
"compress_for_rename",
"(",
"self",
".",
"paths",
")",
"for",
"path",
"in",
"sorted",
"(",
"compact",
"(",
"for_rename",
")",
")",
":",
"moved",
".",
"stash",
"(",
"path",
")",
"logger",
".",
"debug",
"(",
"'Removing file or directory %s'",
",",
"path",
")",
"for",
"pth",
"in",
"self",
".",
"pth",
".",
"values",
"(",
")",
":",
"pth",
".",
"remove",
"(",
")",
"logger",
".",
"info",
"(",
"'Successfully uninstalled %s'",
",",
"dist_name_version",
")"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/site-packages/pip/_internal/req/req_uninstall.py#L376-L406 | ||
turi-code/SFrame | 796b9bdfb2fa1b881d82080754643c7e68629cd2 | oss_src/unity/python/sframe/util/file_util.py | python | upload_to_local | (src_path, dst_path, is_dir=False, silent=False) | Copies a file/dir to a local path | Copies a file/dir to a local path | [
"Copies",
"a",
"file",
"/",
"dir",
"to",
"a",
"local",
"path"
] | def upload_to_local(src_path, dst_path, is_dir=False, silent=False):
'''Copies a file/dir to a local path'''
if not silent:
__logger__.info('Uploading local path %s to path: %s' % (src_path, dst_path))
if not os.path.exists(src_path):
raise RuntimeError("Cannot find file/path: %s" % src_path)
if not is_dir and not os.path.isfile(src_path):
raise RuntimeError("Path %s is not a file" % src_path)
if is_dir and not os.path.isdir(src_path):
raise RuntimeError("Path %s is not a directory" % src_path)
if not is_local_path(dst_path):
raise RuntimeError("Path %s is not a valid dest path" % dst_path)
# now upload
if is_dir:
shutil.copytree(src_path, dst_path)
else:
shutil.copy(src_path, dst_path)
if not silent:
__logger__.info("Successfully uploaded to path %s" % dst_path) | [
"def",
"upload_to_local",
"(",
"src_path",
",",
"dst_path",
",",
"is_dir",
"=",
"False",
",",
"silent",
"=",
"False",
")",
":",
"if",
"not",
"silent",
":",
"__logger__",
".",
"info",
"(",
"'Uploading local path %s to path: %s'",
"%",
"(",
"src_path",
",",
"dst_path",
")",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"src_path",
")",
":",
"raise",
"RuntimeError",
"(",
"\"Cannot find file/path: %s\"",
"%",
"src_path",
")",
"if",
"not",
"is_dir",
"and",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"src_path",
")",
":",
"raise",
"RuntimeError",
"(",
"\"Path %s is not a file\"",
"%",
"src_path",
")",
"if",
"is_dir",
"and",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"src_path",
")",
":",
"raise",
"RuntimeError",
"(",
"\"Path %s is not a directory\"",
"%",
"src_path",
")",
"if",
"not",
"is_local_path",
"(",
"dst_path",
")",
":",
"raise",
"RuntimeError",
"(",
"\"Path %s is not a valid dest path\"",
"%",
"dst_path",
")",
"# now upload",
"if",
"is_dir",
":",
"shutil",
".",
"copytree",
"(",
"src_path",
",",
"dst_path",
")",
"else",
":",
"shutil",
".",
"copy",
"(",
"src_path",
",",
"dst_path",
")",
"if",
"not",
"silent",
":",
"__logger__",
".",
"info",
"(",
"\"Successfully uploaded to path %s\"",
"%",
"dst_path",
")"
] | https://github.com/turi-code/SFrame/blob/796b9bdfb2fa1b881d82080754643c7e68629cd2/oss_src/unity/python/sframe/util/file_util.py#L137-L161 | ||
catboost/catboost | 167f64f237114a4d10b2b4ee42adb4569137debe | contrib/tools/python3/src/Lib/lib2to3/fixer_util.py | python | ListComp | (xp, fp, it, test=None) | return Node(syms.atom,
[Leaf(token.LBRACE, "["),
inner,
Leaf(token.RBRACE, "]")]) | A list comprehension of the form [xp for fp in it if test].
If test is None, the "if test" part is omitted. | A list comprehension of the form [xp for fp in it if test]. | [
"A",
"list",
"comprehension",
"of",
"the",
"form",
"[",
"xp",
"for",
"fp",
"in",
"it",
"if",
"test",
"]",
"."
] | def ListComp(xp, fp, it, test=None):
"""A list comprehension of the form [xp for fp in it if test].
If test is None, the "if test" part is omitted.
"""
xp.prefix = ""
fp.prefix = " "
it.prefix = " "
for_leaf = Leaf(token.NAME, "for")
for_leaf.prefix = " "
in_leaf = Leaf(token.NAME, "in")
in_leaf.prefix = " "
inner_args = [for_leaf, fp, in_leaf, it]
if test:
test.prefix = " "
if_leaf = Leaf(token.NAME, "if")
if_leaf.prefix = " "
inner_args.append(Node(syms.comp_if, [if_leaf, test]))
inner = Node(syms.listmaker, [xp, Node(syms.comp_for, inner_args)])
return Node(syms.atom,
[Leaf(token.LBRACE, "["),
inner,
Leaf(token.RBRACE, "]")]) | [
"def",
"ListComp",
"(",
"xp",
",",
"fp",
",",
"it",
",",
"test",
"=",
"None",
")",
":",
"xp",
".",
"prefix",
"=",
"\"\"",
"fp",
".",
"prefix",
"=",
"\" \"",
"it",
".",
"prefix",
"=",
"\" \"",
"for_leaf",
"=",
"Leaf",
"(",
"token",
".",
"NAME",
",",
"\"for\"",
")",
"for_leaf",
".",
"prefix",
"=",
"\" \"",
"in_leaf",
"=",
"Leaf",
"(",
"token",
".",
"NAME",
",",
"\"in\"",
")",
"in_leaf",
".",
"prefix",
"=",
"\" \"",
"inner_args",
"=",
"[",
"for_leaf",
",",
"fp",
",",
"in_leaf",
",",
"it",
"]",
"if",
"test",
":",
"test",
".",
"prefix",
"=",
"\" \"",
"if_leaf",
"=",
"Leaf",
"(",
"token",
".",
"NAME",
",",
"\"if\"",
")",
"if_leaf",
".",
"prefix",
"=",
"\" \"",
"inner_args",
".",
"append",
"(",
"Node",
"(",
"syms",
".",
"comp_if",
",",
"[",
"if_leaf",
",",
"test",
"]",
")",
")",
"inner",
"=",
"Node",
"(",
"syms",
".",
"listmaker",
",",
"[",
"xp",
",",
"Node",
"(",
"syms",
".",
"comp_for",
",",
"inner_args",
")",
"]",
")",
"return",
"Node",
"(",
"syms",
".",
"atom",
",",
"[",
"Leaf",
"(",
"token",
".",
"LBRACE",
",",
"\"[\"",
")",
",",
"inner",
",",
"Leaf",
"(",
"token",
".",
"RBRACE",
",",
"\"]\"",
")",
"]",
")"
] | https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/tools/python3/src/Lib/lib2to3/fixer_util.py#L87-L109 | |
catboost/catboost | 167f64f237114a4d10b2b4ee42adb4569137debe | contrib/tools/python3/src/Lib/nntplib.py | python | NNTP._statparse | (self, resp) | return resp, art_num, message_id | Internal: parse the response line of a STAT, NEXT, LAST,
ARTICLE, HEAD or BODY command. | Internal: parse the response line of a STAT, NEXT, LAST,
ARTICLE, HEAD or BODY command. | [
"Internal",
":",
"parse",
"the",
"response",
"line",
"of",
"a",
"STAT",
"NEXT",
"LAST",
"ARTICLE",
"HEAD",
"or",
"BODY",
"command",
"."
] | def _statparse(self, resp):
"""Internal: parse the response line of a STAT, NEXT, LAST,
ARTICLE, HEAD or BODY command."""
if not resp.startswith('22'):
raise NNTPReplyError(resp)
words = resp.split()
art_num = int(words[1])
message_id = words[2]
return resp, art_num, message_id | [
"def",
"_statparse",
"(",
"self",
",",
"resp",
")",
":",
"if",
"not",
"resp",
".",
"startswith",
"(",
"'22'",
")",
":",
"raise",
"NNTPReplyError",
"(",
"resp",
")",
"words",
"=",
"resp",
".",
"split",
"(",
")",
"art_num",
"=",
"int",
"(",
"words",
"[",
"1",
"]",
")",
"message_id",
"=",
"words",
"[",
"2",
"]",
"return",
"resp",
",",
"art_num",
",",
"message_id"
] | https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/tools/python3/src/Lib/nntplib.py#L716-L724 | |
hughperkins/tf-coriander | 970d3df6c11400ad68405f22b0c42a52374e94ca | tensorflow/contrib/slim/python/slim/data/parallel_reader.py | python | ParallelReader.__init__ | (self,
reader_class,
common_queue,
num_readers=4,
reader_kwargs=None) | ParallelReader creates num_readers instances of the reader_class.
Each instance is created by calling the `reader_class` function passing
the arguments specified in `reader_kwargs` as in:
reader_class(**read_kwargs)
When you read from a ParallelReader, with its `read()` method,
you just dequeue examples from the `common_queue`.
The readers will read different files in parallel, asynchronously enqueueing
their output into `common_queue`. The `common_queue.dtypes` must be
[tf.string, tf.string]
Because each reader can read from a different file, the examples in the
`common_queue` could be from different files. Due to the asynchronous
reading there is no guarantee that all the readers will read the same
number of examples.
If the `common_queue` is a shuffling queue, then the examples are shuffled.
Usage:
common_queue = tf.RandomShuffleQueue(
capacity=256,
min_after_dequeue=128,
dtypes=[tf.string, tf.string])
p_reader = ParallelReader(tf.TFRecordReader, common_queue)
common_queue = tf.FIFOQueue(
capacity=256,
dtypes=[tf.string, tf.string])
p_reader = ParallelReader(readers, common_queue, num_readers=2)
Args:
reader_class: one of the io_ops.ReaderBase subclasses ex: TFRecordReader
common_queue: a Queue to hold (key, value pairs) with `dtypes` equal to
[tf.string, tf.string]. Must be one of the data_flow_ops.Queues
instances, ex. `tf.FIFOQueue()`, `tf.RandomShuffleQueue()`, ...
num_readers: a integer, number of instances of reader_class to create.
reader_kwargs: an optional dict of kwargs to create the readers.
Raises:
TypeError: if `common_queue.dtypes` is not [tf.string, tf.string]. | ParallelReader creates num_readers instances of the reader_class. | [
"ParallelReader",
"creates",
"num_readers",
"instances",
"of",
"the",
"reader_class",
"."
] | def __init__(self,
reader_class,
common_queue,
num_readers=4,
reader_kwargs=None):
"""ParallelReader creates num_readers instances of the reader_class.
Each instance is created by calling the `reader_class` function passing
the arguments specified in `reader_kwargs` as in:
reader_class(**read_kwargs)
When you read from a ParallelReader, with its `read()` method,
you just dequeue examples from the `common_queue`.
The readers will read different files in parallel, asynchronously enqueueing
their output into `common_queue`. The `common_queue.dtypes` must be
[tf.string, tf.string]
Because each reader can read from a different file, the examples in the
`common_queue` could be from different files. Due to the asynchronous
reading there is no guarantee that all the readers will read the same
number of examples.
If the `common_queue` is a shuffling queue, then the examples are shuffled.
Usage:
common_queue = tf.RandomShuffleQueue(
capacity=256,
min_after_dequeue=128,
dtypes=[tf.string, tf.string])
p_reader = ParallelReader(tf.TFRecordReader, common_queue)
common_queue = tf.FIFOQueue(
capacity=256,
dtypes=[tf.string, tf.string])
p_reader = ParallelReader(readers, common_queue, num_readers=2)
Args:
reader_class: one of the io_ops.ReaderBase subclasses ex: TFRecordReader
common_queue: a Queue to hold (key, value pairs) with `dtypes` equal to
[tf.string, tf.string]. Must be one of the data_flow_ops.Queues
instances, ex. `tf.FIFOQueue()`, `tf.RandomShuffleQueue()`, ...
num_readers: a integer, number of instances of reader_class to create.
reader_kwargs: an optional dict of kwargs to create the readers.
Raises:
TypeError: if `common_queue.dtypes` is not [tf.string, tf.string].
"""
if len(common_queue.dtypes) != 2:
raise TypeError('common_queue.dtypes must be [tf.string, tf.string]')
for dtype in common_queue.dtypes:
if not dtype.is_compatible_with(tf_dtypes.string):
raise TypeError('common_queue.dtypes must be [tf.string, tf.string]')
reader_kwargs = reader_kwargs or {}
self._readers = [reader_class(**reader_kwargs) for _ in range(num_readers)]
self._common_queue = common_queue | [
"def",
"__init__",
"(",
"self",
",",
"reader_class",
",",
"common_queue",
",",
"num_readers",
"=",
"4",
",",
"reader_kwargs",
"=",
"None",
")",
":",
"if",
"len",
"(",
"common_queue",
".",
"dtypes",
")",
"!=",
"2",
":",
"raise",
"TypeError",
"(",
"'common_queue.dtypes must be [tf.string, tf.string]'",
")",
"for",
"dtype",
"in",
"common_queue",
".",
"dtypes",
":",
"if",
"not",
"dtype",
".",
"is_compatible_with",
"(",
"tf_dtypes",
".",
"string",
")",
":",
"raise",
"TypeError",
"(",
"'common_queue.dtypes must be [tf.string, tf.string]'",
")",
"reader_kwargs",
"=",
"reader_kwargs",
"or",
"{",
"}",
"self",
".",
"_readers",
"=",
"[",
"reader_class",
"(",
"*",
"*",
"reader_kwargs",
")",
"for",
"_",
"in",
"range",
"(",
"num_readers",
")",
"]",
"self",
".",
"_common_queue",
"=",
"common_queue"
] | https://github.com/hughperkins/tf-coriander/blob/970d3df6c11400ad68405f22b0c42a52374e94ca/tensorflow/contrib/slim/python/slim/data/parallel_reader.py#L38-L95 | ||
DLR-SC/tigl | d1c5901e948e33d10b1f9659ff3e22c4717b455f | bindings/bindings_generator/cheader_parser.py | python | Annotation.parse_string | (self, string) | Parses an annotion string for input and output arguments
#annotate in: 1,2 out: 3A(4), 5A(M) nohandle returns: error|value
the number in the annotation specifies the index of an argument
(counting from 0).
An "A" states, that the argument is an array
Brackets after an Array like 4A(1,2) mean, that the size of an
array is determinind by the product of the given arguments values.
In this case the array4 had a size arg1.value*arg2.value.
An M means, that the array is not allocated inside the wrapped function,
but has to be preallocated. The normally requires an additional argument
stating the size of the array. | Parses an annotion string for input and output arguments
#annotate in: 1,2 out: 3A(4), 5A(M) nohandle returns: error|value
the number in the annotation specifies the index of an argument
(counting from 0).
An "A" states, that the argument is an array
Brackets after an Array like 4A(1,2) mean, that the size of an
array is determinind by the product of the given arguments values.
In this case the array4 had a size arg1.value*arg2.value.
An M means, that the array is not allocated inside the wrapped function,
but has to be preallocated. The normally requires an additional argument
stating the size of the array. | [
"Parses",
"an",
"annotion",
"string",
"for",
"input",
"and",
"output",
"arguments",
"#annotate",
"in",
":",
"1",
"2",
"out",
":",
"3A",
"(",
"4",
")",
"5A",
"(",
"M",
")",
"nohandle",
"returns",
":",
"error|value",
"the",
"number",
"in",
"the",
"annotation",
"specifies",
"the",
"index",
"of",
"an",
"argument",
"(",
"counting",
"from",
"0",
")",
".",
"An",
"A",
"states",
"that",
"the",
"argument",
"is",
"an",
"array",
"Brackets",
"after",
"an",
"Array",
"like",
"4A",
"(",
"1",
"2",
")",
"mean",
"that",
"the",
"size",
"of",
"an",
"array",
"is",
"determinind",
"by",
"the",
"product",
"of",
"the",
"given",
"arguments",
"values",
".",
"In",
"this",
"case",
"the",
"array4",
"had",
"a",
"size",
"arg1",
".",
"value",
"*",
"arg2",
".",
"value",
".",
"An",
"M",
"means",
"that",
"the",
"array",
"is",
"not",
"allocated",
"inside",
"the",
"wrapped",
"function",
"but",
"has",
"to",
"be",
"preallocated",
".",
"The",
"normally",
"requires",
"an",
"additional",
"argument",
"stating",
"the",
"size",
"of",
"the",
"array",
"."
] | def parse_string(self, string):
"""
Parses an annotion string for input and output arguments
#annotate in: 1,2 out: 3A(4), 5A(M) nohandle returns: error|value
the number in the annotation specifies the index of an argument
(counting from 0).
An "A" states, that the argument is an array
Brackets after an Array like 4A(1,2) mean, that the size of an
array is determinind by the product of the given arguments values.
In this case the array4 had a size arg1.value*arg2.value.
An M means, that the array is not allocated inside the wrapped function,
but has to be preallocated. The normally requires an additional argument
stating the size of the array.
"""
#search output args
self.parse_param_group('out', string, self.outargs)
#search input args
self.parse_param_group('in', string, self.inargs)
#search if to use handle
res = re.search(r'\bnohandle\b|\bhandle\b', string)
if res:
self.uses_handle = res.group() != 'nohandle'
#search if function returns status error (or value)
res = re.search(r'\bnoerror\b', string)
if res:
self.returns_error = res.group() != 'noerror'
else:
self.returns_error = True
#check correctness
for inarg in self.inargs:
if inarg in self.outargs:
raise Exception('Input argument can not be an output ' +
'argument at the same time') | [
"def",
"parse_string",
"(",
"self",
",",
"string",
")",
":",
"#search output args",
"self",
".",
"parse_param_group",
"(",
"'out'",
",",
"string",
",",
"self",
".",
"outargs",
")",
"#search input args",
"self",
".",
"parse_param_group",
"(",
"'in'",
",",
"string",
",",
"self",
".",
"inargs",
")",
"#search if to use handle",
"res",
"=",
"re",
".",
"search",
"(",
"r'\\bnohandle\\b|\\bhandle\\b'",
",",
"string",
")",
"if",
"res",
":",
"self",
".",
"uses_handle",
"=",
"res",
".",
"group",
"(",
")",
"!=",
"'nohandle'",
"#search if function returns status error (or value)",
"res",
"=",
"re",
".",
"search",
"(",
"r'\\bnoerror\\b'",
",",
"string",
")",
"if",
"res",
":",
"self",
".",
"returns_error",
"=",
"res",
".",
"group",
"(",
")",
"!=",
"'noerror'",
"else",
":",
"self",
".",
"returns_error",
"=",
"True",
"#check correctness",
"for",
"inarg",
"in",
"self",
".",
"inargs",
":",
"if",
"inarg",
"in",
"self",
".",
"outargs",
":",
"raise",
"Exception",
"(",
"'Input argument can not be an output '",
"+",
"'argument at the same time'",
")"
] | https://github.com/DLR-SC/tigl/blob/d1c5901e948e33d10b1f9659ff3e22c4717b455f/bindings/bindings_generator/cheader_parser.py#L183-L221 | ||
zju3dv/clean-pvnet | 5870c509e3cc205e1bb28910a7b1a9a3c8add9a8 | lib/utils/pysixd/transform.py | python | affine_matrix_from_points | (v0, v1, shear=True, scale=True, usesvd=True) | return M | Return affine transform matrix to register two point sets.
v0 and v1 are shape (ndims, \*) arrays of at least ndims non-homogeneous
coordinates, where ndims is the dimensionality of the coordinate space.
If shear is False, a similarity transformation matrix is returned.
If also scale is False, a rigid/Euclidean transformation matrix
is returned.
By default the algorithm by Hartley and Zissermann [15] is used.
If usesvd is True, similarity and Euclidean transformation matrices
are calculated by minimizing the weighted sum of squared deviations
(RMSD) according to the algorithm by Kabsch [8].
Otherwise, and if ndims is 3, the quaternion based algorithm by Horn [9]
is used, which is slower when using this Python implementation.
The returned matrix performs rotation, translation and uniform scaling
(if specified).
>>> v0 = [[0, 1031, 1031, 0], [0, 0, 1600, 1600]]
>>> v1 = [[675, 826, 826, 677], [55, 52, 281, 277]]
>>> affine_matrix_from_points(v0, v1)
array([[ 0.14549, 0.00062, 675.50008],
[ 0.00048, 0.14094, 53.24971],
[ 0. , 0. , 1. ]])
>>> T = translation_matrix(numpy.random.random(3)-0.5)
>>> R = random_rotation_matrix(numpy.random.random(3))
>>> S = scale_matrix(random.random())
>>> M = concatenate_matrices(T, R, S)
>>> v0 = (numpy.random.rand(4, 100) - 0.5) * 20
>>> v0[3] = 1
>>> v1 = numpy.dot(M, v0)
>>> v0[:3] += numpy.random.normal(0, 1e-8, 300).reshape(3, -1)
>>> M = affine_matrix_from_points(v0[:3], v1[:3])
>>> numpy.allclose(v1, numpy.dot(M, v0))
True
More examples in superimposition_matrix() | Return affine transform matrix to register two point sets. | [
"Return",
"affine",
"transform",
"matrix",
"to",
"register",
"two",
"point",
"sets",
"."
] | def affine_matrix_from_points(v0, v1, shear=True, scale=True, usesvd=True):
"""Return affine transform matrix to register two point sets.
v0 and v1 are shape (ndims, \*) arrays of at least ndims non-homogeneous
coordinates, where ndims is the dimensionality of the coordinate space.
If shear is False, a similarity transformation matrix is returned.
If also scale is False, a rigid/Euclidean transformation matrix
is returned.
By default the algorithm by Hartley and Zissermann [15] is used.
If usesvd is True, similarity and Euclidean transformation matrices
are calculated by minimizing the weighted sum of squared deviations
(RMSD) according to the algorithm by Kabsch [8].
Otherwise, and if ndims is 3, the quaternion based algorithm by Horn [9]
is used, which is slower when using this Python implementation.
The returned matrix performs rotation, translation and uniform scaling
(if specified).
>>> v0 = [[0, 1031, 1031, 0], [0, 0, 1600, 1600]]
>>> v1 = [[675, 826, 826, 677], [55, 52, 281, 277]]
>>> affine_matrix_from_points(v0, v1)
array([[ 0.14549, 0.00062, 675.50008],
[ 0.00048, 0.14094, 53.24971],
[ 0. , 0. , 1. ]])
>>> T = translation_matrix(numpy.random.random(3)-0.5)
>>> R = random_rotation_matrix(numpy.random.random(3))
>>> S = scale_matrix(random.random())
>>> M = concatenate_matrices(T, R, S)
>>> v0 = (numpy.random.rand(4, 100) - 0.5) * 20
>>> v0[3] = 1
>>> v1 = numpy.dot(M, v0)
>>> v0[:3] += numpy.random.normal(0, 1e-8, 300).reshape(3, -1)
>>> M = affine_matrix_from_points(v0[:3], v1[:3])
>>> numpy.allclose(v1, numpy.dot(M, v0))
True
More examples in superimposition_matrix()
"""
v0 = numpy.array(v0, dtype=numpy.float64, copy=True)
v1 = numpy.array(v1, dtype=numpy.float64, copy=True)
ndims = v0.shape[0]
if ndims < 2 or v0.shape[1] < ndims or v0.shape != v1.shape:
raise ValueError("input arrays are of wrong shape or type")
# move centroids to origin
t0 = -numpy.mean(v0, axis=1)
M0 = numpy.identity(ndims+1)
M0[:ndims, ndims] = t0
v0 += t0.reshape(ndims, 1)
t1 = -numpy.mean(v1, axis=1)
M1 = numpy.identity(ndims+1)
M1[:ndims, ndims] = t1
v1 += t1.reshape(ndims, 1)
if shear:
# Affine transformation
A = numpy.concatenate((v0, v1), axis=0)
u, s, vh = numpy.linalg.svd(A.T)
vh = vh[:ndims].T
B = vh[:ndims]
C = vh[ndims:2*ndims]
t = numpy.dot(C, numpy.linalg.pinv(B))
t = numpy.concatenate((t, numpy.zeros((ndims, 1))), axis=1)
M = numpy.vstack((t, ((0.0,)*ndims) + (1.0,)))
elif usesvd or ndims != 3:
# Rigid transformation via SVD of covariance matrix
u, s, vh = numpy.linalg.svd(numpy.dot(v1, v0.T))
# rotation matrix from SVD orthonormal bases
R = numpy.dot(u, vh)
if numpy.linalg.det(R) < 0.0:
# R does not constitute right handed system
R -= numpy.outer(u[:, ndims-1], vh[ndims-1, :]*2.0)
s[-1] *= -1.0
# homogeneous transformation matrix
M = numpy.identity(ndims+1)
M[:ndims, :ndims] = R
else:
# Rigid transformation matrix via quaternion
# compute symmetric matrix N
xx, yy, zz = numpy.sum(v0 * v1, axis=1)
xy, yz, zx = numpy.sum(v0 * numpy.roll(v1, -1, axis=0), axis=1)
xz, yx, zy = numpy.sum(v0 * numpy.roll(v1, -2, axis=0), axis=1)
N = [[xx+yy+zz, 0.0, 0.0, 0.0],
[yz-zy, xx-yy-zz, 0.0, 0.0],
[zx-xz, xy+yx, yy-xx-zz, 0.0],
[xy-yx, zx+xz, yz+zy, zz-xx-yy]]
# quaternion: eigenvector corresponding to most positive eigenvalue
w, V = numpy.linalg.eigh(N)
q = V[:, numpy.argmax(w)]
q /= vector_norm(q) # unit quaternion
# homogeneous transformation matrix
M = quaternion_matrix(q)
if scale and not shear:
# Affine transformation; scale is ratio of RMS deviations from centroid
v0 *= v0
v1 *= v1
M[:ndims, :ndims] *= math.sqrt(numpy.sum(v1) / numpy.sum(v0))
# move centroids back
M = numpy.dot(numpy.linalg.inv(M1), numpy.dot(M, M0))
M /= M[ndims, ndims]
return M | [
"def",
"affine_matrix_from_points",
"(",
"v0",
",",
"v1",
",",
"shear",
"=",
"True",
",",
"scale",
"=",
"True",
",",
"usesvd",
"=",
"True",
")",
":",
"v0",
"=",
"numpy",
".",
"array",
"(",
"v0",
",",
"dtype",
"=",
"numpy",
".",
"float64",
",",
"copy",
"=",
"True",
")",
"v1",
"=",
"numpy",
".",
"array",
"(",
"v1",
",",
"dtype",
"=",
"numpy",
".",
"float64",
",",
"copy",
"=",
"True",
")",
"ndims",
"=",
"v0",
".",
"shape",
"[",
"0",
"]",
"if",
"ndims",
"<",
"2",
"or",
"v0",
".",
"shape",
"[",
"1",
"]",
"<",
"ndims",
"or",
"v0",
".",
"shape",
"!=",
"v1",
".",
"shape",
":",
"raise",
"ValueError",
"(",
"\"input arrays are of wrong shape or type\"",
")",
"# move centroids to origin",
"t0",
"=",
"-",
"numpy",
".",
"mean",
"(",
"v0",
",",
"axis",
"=",
"1",
")",
"M0",
"=",
"numpy",
".",
"identity",
"(",
"ndims",
"+",
"1",
")",
"M0",
"[",
":",
"ndims",
",",
"ndims",
"]",
"=",
"t0",
"v0",
"+=",
"t0",
".",
"reshape",
"(",
"ndims",
",",
"1",
")",
"t1",
"=",
"-",
"numpy",
".",
"mean",
"(",
"v1",
",",
"axis",
"=",
"1",
")",
"M1",
"=",
"numpy",
".",
"identity",
"(",
"ndims",
"+",
"1",
")",
"M1",
"[",
":",
"ndims",
",",
"ndims",
"]",
"=",
"t1",
"v1",
"+=",
"t1",
".",
"reshape",
"(",
"ndims",
",",
"1",
")",
"if",
"shear",
":",
"# Affine transformation",
"A",
"=",
"numpy",
".",
"concatenate",
"(",
"(",
"v0",
",",
"v1",
")",
",",
"axis",
"=",
"0",
")",
"u",
",",
"s",
",",
"vh",
"=",
"numpy",
".",
"linalg",
".",
"svd",
"(",
"A",
".",
"T",
")",
"vh",
"=",
"vh",
"[",
":",
"ndims",
"]",
".",
"T",
"B",
"=",
"vh",
"[",
":",
"ndims",
"]",
"C",
"=",
"vh",
"[",
"ndims",
":",
"2",
"*",
"ndims",
"]",
"t",
"=",
"numpy",
".",
"dot",
"(",
"C",
",",
"numpy",
".",
"linalg",
".",
"pinv",
"(",
"B",
")",
")",
"t",
"=",
"numpy",
".",
"concatenate",
"(",
"(",
"t",
",",
"numpy",
".",
"zeros",
"(",
"(",
"ndims",
",",
"1",
")",
")",
")",
",",
"axis",
"=",
"1",
")",
"M",
"=",
"numpy",
".",
"vstack",
"(",
"(",
"t",
",",
"(",
"(",
"0.0",
",",
")",
"*",
"ndims",
")",
"+",
"(",
"1.0",
",",
")",
")",
")",
"elif",
"usesvd",
"or",
"ndims",
"!=",
"3",
":",
"# Rigid transformation via SVD of covariance matrix",
"u",
",",
"s",
",",
"vh",
"=",
"numpy",
".",
"linalg",
".",
"svd",
"(",
"numpy",
".",
"dot",
"(",
"v1",
",",
"v0",
".",
"T",
")",
")",
"# rotation matrix from SVD orthonormal bases",
"R",
"=",
"numpy",
".",
"dot",
"(",
"u",
",",
"vh",
")",
"if",
"numpy",
".",
"linalg",
".",
"det",
"(",
"R",
")",
"<",
"0.0",
":",
"# R does not constitute right handed system",
"R",
"-=",
"numpy",
".",
"outer",
"(",
"u",
"[",
":",
",",
"ndims",
"-",
"1",
"]",
",",
"vh",
"[",
"ndims",
"-",
"1",
",",
":",
"]",
"*",
"2.0",
")",
"s",
"[",
"-",
"1",
"]",
"*=",
"-",
"1.0",
"# homogeneous transformation matrix",
"M",
"=",
"numpy",
".",
"identity",
"(",
"ndims",
"+",
"1",
")",
"M",
"[",
":",
"ndims",
",",
":",
"ndims",
"]",
"=",
"R",
"else",
":",
"# Rigid transformation matrix via quaternion",
"# compute symmetric matrix N",
"xx",
",",
"yy",
",",
"zz",
"=",
"numpy",
".",
"sum",
"(",
"v0",
"*",
"v1",
",",
"axis",
"=",
"1",
")",
"xy",
",",
"yz",
",",
"zx",
"=",
"numpy",
".",
"sum",
"(",
"v0",
"*",
"numpy",
".",
"roll",
"(",
"v1",
",",
"-",
"1",
",",
"axis",
"=",
"0",
")",
",",
"axis",
"=",
"1",
")",
"xz",
",",
"yx",
",",
"zy",
"=",
"numpy",
".",
"sum",
"(",
"v0",
"*",
"numpy",
".",
"roll",
"(",
"v1",
",",
"-",
"2",
",",
"axis",
"=",
"0",
")",
",",
"axis",
"=",
"1",
")",
"N",
"=",
"[",
"[",
"xx",
"+",
"yy",
"+",
"zz",
",",
"0.0",
",",
"0.0",
",",
"0.0",
"]",
",",
"[",
"yz",
"-",
"zy",
",",
"xx",
"-",
"yy",
"-",
"zz",
",",
"0.0",
",",
"0.0",
"]",
",",
"[",
"zx",
"-",
"xz",
",",
"xy",
"+",
"yx",
",",
"yy",
"-",
"xx",
"-",
"zz",
",",
"0.0",
"]",
",",
"[",
"xy",
"-",
"yx",
",",
"zx",
"+",
"xz",
",",
"yz",
"+",
"zy",
",",
"zz",
"-",
"xx",
"-",
"yy",
"]",
"]",
"# quaternion: eigenvector corresponding to most positive eigenvalue",
"w",
",",
"V",
"=",
"numpy",
".",
"linalg",
".",
"eigh",
"(",
"N",
")",
"q",
"=",
"V",
"[",
":",
",",
"numpy",
".",
"argmax",
"(",
"w",
")",
"]",
"q",
"/=",
"vector_norm",
"(",
"q",
")",
"# unit quaternion",
"# homogeneous transformation matrix",
"M",
"=",
"quaternion_matrix",
"(",
"q",
")",
"if",
"scale",
"and",
"not",
"shear",
":",
"# Affine transformation; scale is ratio of RMS deviations from centroid",
"v0",
"*=",
"v0",
"v1",
"*=",
"v1",
"M",
"[",
":",
"ndims",
",",
":",
"ndims",
"]",
"*=",
"math",
".",
"sqrt",
"(",
"numpy",
".",
"sum",
"(",
"v1",
")",
"/",
"numpy",
".",
"sum",
"(",
"v0",
")",
")",
"# move centroids back",
"M",
"=",
"numpy",
".",
"dot",
"(",
"numpy",
".",
"linalg",
".",
"inv",
"(",
"M1",
")",
",",
"numpy",
".",
"dot",
"(",
"M",
",",
"M0",
")",
")",
"M",
"/=",
"M",
"[",
"ndims",
",",
"ndims",
"]",
"return",
"M"
] | https://github.com/zju3dv/clean-pvnet/blob/5870c509e3cc205e1bb28910a7b1a9a3c8add9a8/lib/utils/pysixd/transform.py#L889-L995 | |
rsummers11/CADLab | 976ed959a0b5208bb4173127a7ef732ac73a9b6f | body_part_regressor/bodypartregressor/load_img.py | python | im_list_to_blob | (ims, use_max_size=True) | return blob | Convert a list of images into a network input. | Convert a list of images into a network input. | [
"Convert",
"a",
"list",
"of",
"images",
"into",
"a",
"network",
"input",
"."
] | def im_list_to_blob(ims, use_max_size=True):
"""Convert a list of images into a network input.
"""
# max_shape = np.array([im.shape for im in ims]).max(axis=0)
# min_shape = np.array([im.shape for im in ims]).min(axis=0)
# print max_shape, min_shape
if use_max_size:
max_shape = np.array([cfg.MAX_SIZE, cfg.MAX_SIZE])
else:
max_shape = np.array([im.shape for im in ims]).max(axis=0)
num_images = len(ims)
blob = np.zeros((num_images, 3, max_shape[0], max_shape[1]),
dtype=np.float32)
for i in range(num_images):
im = ims[i]
m = (max_shape-im.shape)/2
for chn in range(3):
blob[i, chn, m[0]:m[0]+im.shape[0], m[1]:m[1]+im.shape[1]] = im
return blob | [
"def",
"im_list_to_blob",
"(",
"ims",
",",
"use_max_size",
"=",
"True",
")",
":",
"# max_shape = np.array([im.shape for im in ims]).max(axis=0)",
"# min_shape = np.array([im.shape for im in ims]).min(axis=0)",
"# print max_shape, min_shape",
"if",
"use_max_size",
":",
"max_shape",
"=",
"np",
".",
"array",
"(",
"[",
"cfg",
".",
"MAX_SIZE",
",",
"cfg",
".",
"MAX_SIZE",
"]",
")",
"else",
":",
"max_shape",
"=",
"np",
".",
"array",
"(",
"[",
"im",
".",
"shape",
"for",
"im",
"in",
"ims",
"]",
")",
".",
"max",
"(",
"axis",
"=",
"0",
")",
"num_images",
"=",
"len",
"(",
"ims",
")",
"blob",
"=",
"np",
".",
"zeros",
"(",
"(",
"num_images",
",",
"3",
",",
"max_shape",
"[",
"0",
"]",
",",
"max_shape",
"[",
"1",
"]",
")",
",",
"dtype",
"=",
"np",
".",
"float32",
")",
"for",
"i",
"in",
"range",
"(",
"num_images",
")",
":",
"im",
"=",
"ims",
"[",
"i",
"]",
"m",
"=",
"(",
"max_shape",
"-",
"im",
".",
"shape",
")",
"/",
"2",
"for",
"chn",
"in",
"range",
"(",
"3",
")",
":",
"blob",
"[",
"i",
",",
"chn",
",",
"m",
"[",
"0",
"]",
":",
"m",
"[",
"0",
"]",
"+",
"im",
".",
"shape",
"[",
"0",
"]",
",",
"m",
"[",
"1",
"]",
":",
"m",
"[",
"1",
"]",
"+",
"im",
".",
"shape",
"[",
"1",
"]",
"]",
"=",
"im",
"return",
"blob"
] | https://github.com/rsummers11/CADLab/blob/976ed959a0b5208bb4173127a7ef732ac73a9b6f/body_part_regressor/bodypartregressor/load_img.py#L62-L82 | |
klzgrad/naiveproxy | ed2c513637c77b18721fe428d7ed395b4d284c83 | src/build/find_depot_tools.py | python | add_depot_tools_to_path | () | return None | Search for depot_tools and add it to sys.path. | Search for depot_tools and add it to sys.path. | [
"Search",
"for",
"depot_tools",
"and",
"add",
"it",
"to",
"sys",
".",
"path",
"."
] | def add_depot_tools_to_path():
"""Search for depot_tools and add it to sys.path."""
# First, check if we have a DEPS'd in "depot_tools".
deps_depot_tools = os.path.join(SRC, 'third_party', 'depot_tools')
if IsRealDepotTools(deps_depot_tools):
# Put the pinned version at the start of the sys.path, in case there
# are other non-pinned versions already on the sys.path.
sys.path.insert(0, deps_depot_tools)
return deps_depot_tools
# Then look if depot_tools is already in PYTHONPATH.
for i in sys.path:
if i.rstrip(os.sep).endswith('depot_tools') and IsRealDepotTools(i):
return i
# Then look if depot_tools is in PATH, common case.
for i in os.environ['PATH'].split(os.pathsep):
if IsRealDepotTools(i):
sys.path.append(i.rstrip(os.sep))
return i
# Rare case, it's not even in PATH, look upward up to root.
root_dir = os.path.dirname(os.path.abspath(__file__))
previous_dir = os.path.abspath(__file__)
while root_dir and root_dir != previous_dir:
i = os.path.join(root_dir, 'depot_tools')
if IsRealDepotTools(i):
sys.path.append(i)
return i
previous_dir = root_dir
root_dir = os.path.dirname(root_dir)
print('Failed to find depot_tools', file=sys.stderr)
return None | [
"def",
"add_depot_tools_to_path",
"(",
")",
":",
"# First, check if we have a DEPS'd in \"depot_tools\".",
"deps_depot_tools",
"=",
"os",
".",
"path",
".",
"join",
"(",
"SRC",
",",
"'third_party'",
",",
"'depot_tools'",
")",
"if",
"IsRealDepotTools",
"(",
"deps_depot_tools",
")",
":",
"# Put the pinned version at the start of the sys.path, in case there",
"# are other non-pinned versions already on the sys.path.",
"sys",
".",
"path",
".",
"insert",
"(",
"0",
",",
"deps_depot_tools",
")",
"return",
"deps_depot_tools",
"# Then look if depot_tools is already in PYTHONPATH.",
"for",
"i",
"in",
"sys",
".",
"path",
":",
"if",
"i",
".",
"rstrip",
"(",
"os",
".",
"sep",
")",
".",
"endswith",
"(",
"'depot_tools'",
")",
"and",
"IsRealDepotTools",
"(",
"i",
")",
":",
"return",
"i",
"# Then look if depot_tools is in PATH, common case.",
"for",
"i",
"in",
"os",
".",
"environ",
"[",
"'PATH'",
"]",
".",
"split",
"(",
"os",
".",
"pathsep",
")",
":",
"if",
"IsRealDepotTools",
"(",
"i",
")",
":",
"sys",
".",
"path",
".",
"append",
"(",
"i",
".",
"rstrip",
"(",
"os",
".",
"sep",
")",
")",
"return",
"i",
"# Rare case, it's not even in PATH, look upward up to root.",
"root_dir",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"os",
".",
"path",
".",
"abspath",
"(",
"__file__",
")",
")",
"previous_dir",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"__file__",
")",
"while",
"root_dir",
"and",
"root_dir",
"!=",
"previous_dir",
":",
"i",
"=",
"os",
".",
"path",
".",
"join",
"(",
"root_dir",
",",
"'depot_tools'",
")",
"if",
"IsRealDepotTools",
"(",
"i",
")",
":",
"sys",
".",
"path",
".",
"append",
"(",
"i",
")",
"return",
"i",
"previous_dir",
"=",
"root_dir",
"root_dir",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"root_dir",
")",
"print",
"(",
"'Failed to find depot_tools'",
",",
"file",
"=",
"sys",
".",
"stderr",
")",
"return",
"None"
] | https://github.com/klzgrad/naiveproxy/blob/ed2c513637c77b18721fe428d7ed395b4d284c83/src/build/find_depot_tools.py#L29-L59 | |
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/distutils/sysconfig.py | python | parse_makefile | (fn, g=None) | return g | Parse a Makefile-style file.
A dictionary containing name/value pairs is returned. If an
optional dictionary is passed in as the second argument, it is
used instead of a new dictionary. | Parse a Makefile-style file. | [
"Parse",
"a",
"Makefile",
"-",
"style",
"file",
"."
] | def parse_makefile(fn, g=None):
"""Parse a Makefile-style file.
A dictionary containing name/value pairs is returned. If an
optional dictionary is passed in as the second argument, it is
used instead of a new dictionary.
"""
from distutils.text_file import TextFile
fp = TextFile(fn, strip_comments=1, skip_blanks=1, join_lines=1, errors="surrogateescape")
if g is None:
g = {}
done = {}
notdone = {}
while True:
line = fp.readline()
if line is None: # eof
break
m = _variable_rx.match(line)
if m:
n, v = m.group(1, 2)
v = v.strip()
# `$$' is a literal `$' in make
tmpv = v.replace('$$', '')
if "$" in tmpv:
notdone[n] = v
else:
try:
v = int(v)
except ValueError:
# insert literal `$'
done[n] = v.replace('$$', '$')
else:
done[n] = v
# Variables with a 'PY_' prefix in the makefile. These need to
# be made available without that prefix through sysconfig.
# Special care is needed to ensure that variable expansion works, even
# if the expansion uses the name without a prefix.
renamed_variables = ('CFLAGS', 'LDFLAGS', 'CPPFLAGS')
# do variable interpolation here
while notdone:
for name in list(notdone):
value = notdone[name]
m = _findvar1_rx.search(value) or _findvar2_rx.search(value)
if m:
n = m.group(1)
found = True
if n in done:
item = str(done[n])
elif n in notdone:
# get it on a subsequent round
found = False
elif n in os.environ:
# do it like make: fall back to environment
item = os.environ[n]
elif n in renamed_variables:
if name.startswith('PY_') and name[3:] in renamed_variables:
item = ""
elif 'PY_' + n in notdone:
found = False
else:
item = str(done['PY_' + n])
else:
done[n] = item = ""
if found:
after = value[m.end():]
value = value[:m.start()] + item + after
if "$" in after:
notdone[name] = value
else:
try: value = int(value)
except ValueError:
done[name] = value.strip()
else:
done[name] = value
del notdone[name]
if name.startswith('PY_') \
and name[3:] in renamed_variables:
name = name[3:]
if name not in done:
done[name] = value
else:
# bogus variable reference; just drop it since we can't deal
del notdone[name]
fp.close()
# strip spurious spaces
for k, v in done.items():
if isinstance(v, str):
done[k] = v.strip()
# save the results in the global dictionary
g.update(done)
return g | [
"def",
"parse_makefile",
"(",
"fn",
",",
"g",
"=",
"None",
")",
":",
"from",
"distutils",
".",
"text_file",
"import",
"TextFile",
"fp",
"=",
"TextFile",
"(",
"fn",
",",
"strip_comments",
"=",
"1",
",",
"skip_blanks",
"=",
"1",
",",
"join_lines",
"=",
"1",
",",
"errors",
"=",
"\"surrogateescape\"",
")",
"if",
"g",
"is",
"None",
":",
"g",
"=",
"{",
"}",
"done",
"=",
"{",
"}",
"notdone",
"=",
"{",
"}",
"while",
"True",
":",
"line",
"=",
"fp",
".",
"readline",
"(",
")",
"if",
"line",
"is",
"None",
":",
"# eof",
"break",
"m",
"=",
"_variable_rx",
".",
"match",
"(",
"line",
")",
"if",
"m",
":",
"n",
",",
"v",
"=",
"m",
".",
"group",
"(",
"1",
",",
"2",
")",
"v",
"=",
"v",
".",
"strip",
"(",
")",
"# `$$' is a literal `$' in make",
"tmpv",
"=",
"v",
".",
"replace",
"(",
"'$$'",
",",
"''",
")",
"if",
"\"$\"",
"in",
"tmpv",
":",
"notdone",
"[",
"n",
"]",
"=",
"v",
"else",
":",
"try",
":",
"v",
"=",
"int",
"(",
"v",
")",
"except",
"ValueError",
":",
"# insert literal `$'",
"done",
"[",
"n",
"]",
"=",
"v",
".",
"replace",
"(",
"'$$'",
",",
"'$'",
")",
"else",
":",
"done",
"[",
"n",
"]",
"=",
"v",
"# Variables with a 'PY_' prefix in the makefile. These need to",
"# be made available without that prefix through sysconfig.",
"# Special care is needed to ensure that variable expansion works, even",
"# if the expansion uses the name without a prefix.",
"renamed_variables",
"=",
"(",
"'CFLAGS'",
",",
"'LDFLAGS'",
",",
"'CPPFLAGS'",
")",
"# do variable interpolation here",
"while",
"notdone",
":",
"for",
"name",
"in",
"list",
"(",
"notdone",
")",
":",
"value",
"=",
"notdone",
"[",
"name",
"]",
"m",
"=",
"_findvar1_rx",
".",
"search",
"(",
"value",
")",
"or",
"_findvar2_rx",
".",
"search",
"(",
"value",
")",
"if",
"m",
":",
"n",
"=",
"m",
".",
"group",
"(",
"1",
")",
"found",
"=",
"True",
"if",
"n",
"in",
"done",
":",
"item",
"=",
"str",
"(",
"done",
"[",
"n",
"]",
")",
"elif",
"n",
"in",
"notdone",
":",
"# get it on a subsequent round",
"found",
"=",
"False",
"elif",
"n",
"in",
"os",
".",
"environ",
":",
"# do it like make: fall back to environment",
"item",
"=",
"os",
".",
"environ",
"[",
"n",
"]",
"elif",
"n",
"in",
"renamed_variables",
":",
"if",
"name",
".",
"startswith",
"(",
"'PY_'",
")",
"and",
"name",
"[",
"3",
":",
"]",
"in",
"renamed_variables",
":",
"item",
"=",
"\"\"",
"elif",
"'PY_'",
"+",
"n",
"in",
"notdone",
":",
"found",
"=",
"False",
"else",
":",
"item",
"=",
"str",
"(",
"done",
"[",
"'PY_'",
"+",
"n",
"]",
")",
"else",
":",
"done",
"[",
"n",
"]",
"=",
"item",
"=",
"\"\"",
"if",
"found",
":",
"after",
"=",
"value",
"[",
"m",
".",
"end",
"(",
")",
":",
"]",
"value",
"=",
"value",
"[",
":",
"m",
".",
"start",
"(",
")",
"]",
"+",
"item",
"+",
"after",
"if",
"\"$\"",
"in",
"after",
":",
"notdone",
"[",
"name",
"]",
"=",
"value",
"else",
":",
"try",
":",
"value",
"=",
"int",
"(",
"value",
")",
"except",
"ValueError",
":",
"done",
"[",
"name",
"]",
"=",
"value",
".",
"strip",
"(",
")",
"else",
":",
"done",
"[",
"name",
"]",
"=",
"value",
"del",
"notdone",
"[",
"name",
"]",
"if",
"name",
".",
"startswith",
"(",
"'PY_'",
")",
"and",
"name",
"[",
"3",
":",
"]",
"in",
"renamed_variables",
":",
"name",
"=",
"name",
"[",
"3",
":",
"]",
"if",
"name",
"not",
"in",
"done",
":",
"done",
"[",
"name",
"]",
"=",
"value",
"else",
":",
"# bogus variable reference; just drop it since we can't deal",
"del",
"notdone",
"[",
"name",
"]",
"fp",
".",
"close",
"(",
")",
"# strip spurious spaces",
"for",
"k",
",",
"v",
"in",
"done",
".",
"items",
"(",
")",
":",
"if",
"isinstance",
"(",
"v",
",",
"str",
")",
":",
"done",
"[",
"k",
"]",
"=",
"v",
".",
"strip",
"(",
")",
"# save the results in the global dictionary",
"g",
".",
"update",
"(",
"done",
")",
"return",
"g"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/distutils/sysconfig.py#L300-L403 | |
hpi-xnor/BMXNet-v2 | af2b1859eafc5c721b1397cef02f946aaf2ce20d | python/mxnet/gluon/data/dataloader.py | python | ConnectionWrapper.__getattr__ | (self, name) | return getattr(attr, name) | Emmulate conn | Emmulate conn | [
"Emmulate",
"conn"
] | def __getattr__(self, name):
"""Emmulate conn"""
attr = self.__dict__.get('_conn', None)
return getattr(attr, name) | [
"def",
"__getattr__",
"(",
"self",
",",
"name",
")",
":",
"attr",
"=",
"self",
".",
"__dict__",
".",
"get",
"(",
"'_conn'",
",",
"None",
")",
"return",
"getattr",
"(",
"attr",
",",
"name",
")"
] | https://github.com/hpi-xnor/BMXNet-v2/blob/af2b1859eafc5c721b1397cef02f946aaf2ce20d/python/mxnet/gluon/data/dataloader.py#L92-L95 | |
lammps/lammps | b75c3065430a75b1b5543a10e10f46d9b4c91913 | tools/i-pi/ipi/inputs/normalmodes.py | python | InputNormalModes.fetch | (self) | return NormalModes(self.mode.fetch(), self.transform.fetch(), super(InputNormalModes,self).fetch() ) | Creates a normal modes object.
Returns:
A normal modes object. | Creates a normal modes object. | [
"Creates",
"a",
"normal",
"modes",
"object",
"."
] | def fetch(self):
"""Creates a normal modes object.
Returns:
A normal modes object.
"""
super(InputNormalModes,self).check()
return NormalModes(self.mode.fetch(), self.transform.fetch(), super(InputNormalModes,self).fetch() ) | [
"def",
"fetch",
"(",
"self",
")",
":",
"super",
"(",
"InputNormalModes",
",",
"self",
")",
".",
"check",
"(",
")",
"return",
"NormalModes",
"(",
"self",
".",
"mode",
".",
"fetch",
"(",
")",
",",
"self",
".",
"transform",
".",
"fetch",
"(",
")",
",",
"super",
"(",
"InputNormalModes",
",",
"self",
")",
".",
"fetch",
"(",
")",
")"
] | https://github.com/lammps/lammps/blob/b75c3065430a75b1b5543a10e10f46d9b4c91913/tools/i-pi/ipi/inputs/normalmodes.py#L76-L84 | |
openthread/openthread | 9fcdbed9c526c70f1556d1ed84099c1535c7cd32 | tools/harness-thci/OpenThread_BR.py | python | SerialHandle.bash | (self, cmd, timeout=10) | Execute the command in bash. | Execute the command in bash. | [
"Execute",
"the",
"command",
"in",
"bash",
"."
] | def bash(self, cmd, timeout=10):
"""
Execute the command in bash.
"""
self.__bashClearLines()
self.__bashWriteLine(cmd)
self.__bashExpect(cmd, timeout=timeout, endswith=True)
response = []
deadline = time.time() + timeout
while time.time() < deadline:
line = self.__bashReadLine()
if line is None:
time.sleep(0.01)
continue
if line == RPI_FULL_PROMPT:
# return response lines without prompt
return response
response.append(line)
self.__bashWrite('\x03')
raise Exception('%s: failed to find end of response' % self.port) | [
"def",
"bash",
"(",
"self",
",",
"cmd",
",",
"timeout",
"=",
"10",
")",
":",
"self",
".",
"__bashClearLines",
"(",
")",
"self",
".",
"__bashWriteLine",
"(",
"cmd",
")",
"self",
".",
"__bashExpect",
"(",
"cmd",
",",
"timeout",
"=",
"timeout",
",",
"endswith",
"=",
"True",
")",
"response",
"=",
"[",
"]",
"deadline",
"=",
"time",
".",
"time",
"(",
")",
"+",
"timeout",
"while",
"time",
".",
"time",
"(",
")",
"<",
"deadline",
":",
"line",
"=",
"self",
".",
"__bashReadLine",
"(",
")",
"if",
"line",
"is",
"None",
":",
"time",
".",
"sleep",
"(",
"0.01",
")",
"continue",
"if",
"line",
"==",
"RPI_FULL_PROMPT",
":",
"# return response lines without prompt",
"return",
"response",
"response",
".",
"append",
"(",
"line",
")",
"self",
".",
"__bashWrite",
"(",
"'\\x03'",
")",
"raise",
"Exception",
"(",
"'%s: failed to find end of response'",
"%",
"self",
".",
"port",
")"
] | https://github.com/openthread/openthread/blob/9fcdbed9c526c70f1556d1ed84099c1535c7cd32/tools/harness-thci/OpenThread_BR.py#L169-L193 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.